1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Routing netlink socket interface: protocol independent part. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 * 15 * Fixes: 16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong. 17 */ 18 19 #include <linux/bitops.h> 20 #include <linux/errno.h> 21 #include <linux/module.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/kernel.h> 25 #include <linux/timer.h> 26 #include <linux/string.h> 27 #include <linux/sockios.h> 28 #include <linux/net.h> 29 #include <linux/fcntl.h> 30 #include <linux/mm.h> 31 #include <linux/slab.h> 32 #include <linux/interrupt.h> 33 #include <linux/capability.h> 34 #include <linux/skbuff.h> 35 #include <linux/init.h> 36 #include <linux/security.h> 37 #include <linux/mutex.h> 38 #include <linux/if_addr.h> 39 #include <linux/if_bridge.h> 40 #include <linux/if_vlan.h> 41 #include <linux/pci.h> 42 #include <linux/etherdevice.h> 43 #include <linux/bpf.h> 44 45 #include <linux/uaccess.h> 46 47 #include <linux/inet.h> 48 #include <linux/netdevice.h> 49 #include <net/switchdev.h> 50 #include <net/ip.h> 51 #include <net/protocol.h> 52 #include <net/arp.h> 53 #include <net/route.h> 54 #include <net/udp.h> 55 #include <net/tcp.h> 56 #include <net/sock.h> 57 #include <net/pkt_sched.h> 58 #include <net/fib_rules.h> 59 #include <net/rtnetlink.h> 60 #include <net/net_namespace.h> 61 62 #define RTNL_MAX_TYPE 50 63 #define RTNL_SLAVE_MAX_TYPE 36 64 65 struct rtnl_link { 66 rtnl_doit_func doit; 67 rtnl_dumpit_func dumpit; 68 struct module *owner; 69 unsigned int flags; 70 struct rcu_head rcu; 71 }; 72 73 static DEFINE_MUTEX(rtnl_mutex); 74 75 void rtnl_lock(void) 76 { 77 mutex_lock(&rtnl_mutex); 78 } 79 EXPORT_SYMBOL(rtnl_lock); 80 81 int rtnl_lock_killable(void) 82 { 83 return mutex_lock_killable(&rtnl_mutex); 84 } 85 EXPORT_SYMBOL(rtnl_lock_killable); 86 87 static struct sk_buff *defer_kfree_skb_list; 88 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 89 { 90 if (head && tail) { 91 tail->next = defer_kfree_skb_list; 92 defer_kfree_skb_list = head; 93 } 94 } 95 EXPORT_SYMBOL(rtnl_kfree_skbs); 96 97 void __rtnl_unlock(void) 98 { 99 struct sk_buff *head = defer_kfree_skb_list; 100 101 defer_kfree_skb_list = NULL; 102 103 mutex_unlock(&rtnl_mutex); 104 105 while (head) { 106 struct sk_buff *next = head->next; 107 108 kfree_skb(head); 109 cond_resched(); 110 head = next; 111 } 112 } 113 114 void rtnl_unlock(void) 115 { 116 /* This fellow will unlock it for us. */ 117 netdev_run_todo(); 118 } 119 EXPORT_SYMBOL(rtnl_unlock); 120 121 int rtnl_trylock(void) 122 { 123 return mutex_trylock(&rtnl_mutex); 124 } 125 EXPORT_SYMBOL(rtnl_trylock); 126 127 int rtnl_is_locked(void) 128 { 129 return mutex_is_locked(&rtnl_mutex); 130 } 131 EXPORT_SYMBOL(rtnl_is_locked); 132 133 bool refcount_dec_and_rtnl_lock(refcount_t *r) 134 { 135 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 136 } 137 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 138 139 #ifdef CONFIG_PROVE_LOCKING 140 bool lockdep_rtnl_is_held(void) 141 { 142 return lockdep_is_held(&rtnl_mutex); 143 } 144 EXPORT_SYMBOL(lockdep_rtnl_is_held); 145 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 146 147 static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 148 149 static inline int rtm_msgindex(int msgtype) 150 { 151 int msgindex = msgtype - RTM_BASE; 152 153 /* 154 * msgindex < 0 implies someone tried to register a netlink 155 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 156 * the message type has not been added to linux/rtnetlink.h 157 */ 158 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 159 160 return msgindex; 161 } 162 163 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 164 { 165 struct rtnl_link **tab; 166 167 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 168 protocol = PF_UNSPEC; 169 170 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 171 if (!tab) 172 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 173 174 return tab[msgtype]; 175 } 176 177 static int rtnl_register_internal(struct module *owner, 178 int protocol, int msgtype, 179 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 180 unsigned int flags) 181 { 182 struct rtnl_link *link, *old; 183 struct rtnl_link __rcu **tab; 184 int msgindex; 185 int ret = -ENOBUFS; 186 187 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 188 msgindex = rtm_msgindex(msgtype); 189 190 rtnl_lock(); 191 tab = rtnl_msg_handlers[protocol]; 192 if (tab == NULL) { 193 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 194 if (!tab) 195 goto unlock; 196 197 /* ensures we see the 0 stores */ 198 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 199 } 200 201 old = rtnl_dereference(tab[msgindex]); 202 if (old) { 203 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 204 if (!link) 205 goto unlock; 206 } else { 207 link = kzalloc(sizeof(*link), GFP_KERNEL); 208 if (!link) 209 goto unlock; 210 } 211 212 WARN_ON(link->owner && link->owner != owner); 213 link->owner = owner; 214 215 WARN_ON(doit && link->doit && link->doit != doit); 216 if (doit) 217 link->doit = doit; 218 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 219 if (dumpit) 220 link->dumpit = dumpit; 221 222 link->flags |= flags; 223 224 /* publish protocol:msgtype */ 225 rcu_assign_pointer(tab[msgindex], link); 226 ret = 0; 227 if (old) 228 kfree_rcu(old, rcu); 229 unlock: 230 rtnl_unlock(); 231 return ret; 232 } 233 234 /** 235 * rtnl_register_module - Register a rtnetlink message type 236 * 237 * @owner: module registering the hook (THIS_MODULE) 238 * @protocol: Protocol family or PF_UNSPEC 239 * @msgtype: rtnetlink message type 240 * @doit: Function pointer called for each request message 241 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 242 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions 243 * 244 * Like rtnl_register, but for use by removable modules. 245 */ 246 int rtnl_register_module(struct module *owner, 247 int protocol, int msgtype, 248 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 249 unsigned int flags) 250 { 251 return rtnl_register_internal(owner, protocol, msgtype, 252 doit, dumpit, flags); 253 } 254 EXPORT_SYMBOL_GPL(rtnl_register_module); 255 256 /** 257 * rtnl_register - Register a rtnetlink message type 258 * @protocol: Protocol family or PF_UNSPEC 259 * @msgtype: rtnetlink message type 260 * @doit: Function pointer called for each request message 261 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 262 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions 263 * 264 * Registers the specified function pointers (at least one of them has 265 * to be non-NULL) to be called whenever a request message for the 266 * specified protocol family and message type is received. 267 * 268 * The special protocol family PF_UNSPEC may be used to define fallback 269 * function pointers for the case when no entry for the specific protocol 270 * family exists. 271 */ 272 void rtnl_register(int protocol, int msgtype, 273 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 274 unsigned int flags) 275 { 276 int err; 277 278 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 279 flags); 280 if (err) 281 pr_err("Unable to register rtnetlink message handler, " 282 "protocol = %d, message type = %d\n", protocol, msgtype); 283 } 284 285 /** 286 * rtnl_unregister - Unregister a rtnetlink message type 287 * @protocol: Protocol family or PF_UNSPEC 288 * @msgtype: rtnetlink message type 289 * 290 * Returns 0 on success or a negative error code. 291 */ 292 int rtnl_unregister(int protocol, int msgtype) 293 { 294 struct rtnl_link **tab, *link; 295 int msgindex; 296 297 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 298 msgindex = rtm_msgindex(msgtype); 299 300 rtnl_lock(); 301 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 302 if (!tab) { 303 rtnl_unlock(); 304 return -ENOENT; 305 } 306 307 link = tab[msgindex]; 308 rcu_assign_pointer(tab[msgindex], NULL); 309 rtnl_unlock(); 310 311 kfree_rcu(link, rcu); 312 313 return 0; 314 } 315 EXPORT_SYMBOL_GPL(rtnl_unregister); 316 317 /** 318 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 319 * @protocol : Protocol family or PF_UNSPEC 320 * 321 * Identical to calling rtnl_unregster() for all registered message types 322 * of a certain protocol family. 323 */ 324 void rtnl_unregister_all(int protocol) 325 { 326 struct rtnl_link **tab, *link; 327 int msgindex; 328 329 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 330 331 rtnl_lock(); 332 tab = rtnl_msg_handlers[protocol]; 333 if (!tab) { 334 rtnl_unlock(); 335 return; 336 } 337 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 338 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 339 link = tab[msgindex]; 340 if (!link) 341 continue; 342 343 rcu_assign_pointer(tab[msgindex], NULL); 344 kfree_rcu(link, rcu); 345 } 346 rtnl_unlock(); 347 348 synchronize_net(); 349 350 kfree(tab); 351 } 352 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 353 354 static LIST_HEAD(link_ops); 355 356 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 357 { 358 const struct rtnl_link_ops *ops; 359 360 list_for_each_entry(ops, &link_ops, list) { 361 if (!strcmp(ops->kind, kind)) 362 return ops; 363 } 364 return NULL; 365 } 366 367 /** 368 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 369 * @ops: struct rtnl_link_ops * to register 370 * 371 * The caller must hold the rtnl_mutex. This function should be used 372 * by drivers that create devices during module initialization. It 373 * must be called before registering the devices. 374 * 375 * Returns 0 on success or a negative error code. 376 */ 377 int __rtnl_link_register(struct rtnl_link_ops *ops) 378 { 379 if (rtnl_link_ops_get(ops->kind)) 380 return -EEXIST; 381 382 /* The check for setup is here because if ops 383 * does not have that filled up, it is not possible 384 * to use the ops for creating device. So do not 385 * fill up dellink as well. That disables rtnl_dellink. 386 */ 387 if (ops->setup && !ops->dellink) 388 ops->dellink = unregister_netdevice_queue; 389 390 list_add_tail(&ops->list, &link_ops); 391 return 0; 392 } 393 EXPORT_SYMBOL_GPL(__rtnl_link_register); 394 395 /** 396 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 397 * @ops: struct rtnl_link_ops * to register 398 * 399 * Returns 0 on success or a negative error code. 400 */ 401 int rtnl_link_register(struct rtnl_link_ops *ops) 402 { 403 int err; 404 405 /* Sanity-check max sizes to avoid stack buffer overflow. */ 406 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 407 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 408 return -EINVAL; 409 410 rtnl_lock(); 411 err = __rtnl_link_register(ops); 412 rtnl_unlock(); 413 return err; 414 } 415 EXPORT_SYMBOL_GPL(rtnl_link_register); 416 417 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 418 { 419 struct net_device *dev; 420 LIST_HEAD(list_kill); 421 422 for_each_netdev(net, dev) { 423 if (dev->rtnl_link_ops == ops) 424 ops->dellink(dev, &list_kill); 425 } 426 unregister_netdevice_many(&list_kill); 427 } 428 429 /** 430 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 431 * @ops: struct rtnl_link_ops * to unregister 432 * 433 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 434 * integrity (hold pernet_ops_rwsem for writing to close the race 435 * with setup_net() and cleanup_net()). 436 */ 437 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 438 { 439 struct net *net; 440 441 for_each_net(net) { 442 __rtnl_kill_links(net, ops); 443 } 444 list_del(&ops->list); 445 } 446 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 447 448 /* Return with the rtnl_lock held when there are no network 449 * devices unregistering in any network namespace. 450 */ 451 static void rtnl_lock_unregistering_all(void) 452 { 453 struct net *net; 454 bool unregistering; 455 DEFINE_WAIT_FUNC(wait, woken_wake_function); 456 457 add_wait_queue(&netdev_unregistering_wq, &wait); 458 for (;;) { 459 unregistering = false; 460 rtnl_lock(); 461 /* We held write locked pernet_ops_rwsem, and parallel 462 * setup_net() and cleanup_net() are not possible. 463 */ 464 for_each_net(net) { 465 if (net->dev_unreg_count > 0) { 466 unregistering = true; 467 break; 468 } 469 } 470 if (!unregistering) 471 break; 472 __rtnl_unlock(); 473 474 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 475 } 476 remove_wait_queue(&netdev_unregistering_wq, &wait); 477 } 478 479 /** 480 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 481 * @ops: struct rtnl_link_ops * to unregister 482 */ 483 void rtnl_link_unregister(struct rtnl_link_ops *ops) 484 { 485 /* Close the race with setup_net() and cleanup_net() */ 486 down_write(&pernet_ops_rwsem); 487 rtnl_lock_unregistering_all(); 488 __rtnl_link_unregister(ops); 489 rtnl_unlock(); 490 up_write(&pernet_ops_rwsem); 491 } 492 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 493 494 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 495 { 496 struct net_device *master_dev; 497 const struct rtnl_link_ops *ops; 498 size_t size = 0; 499 500 rcu_read_lock(); 501 502 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 503 if (!master_dev) 504 goto out; 505 506 ops = master_dev->rtnl_link_ops; 507 if (!ops || !ops->get_slave_size) 508 goto out; 509 /* IFLA_INFO_SLAVE_DATA + nested data */ 510 size = nla_total_size(sizeof(struct nlattr)) + 511 ops->get_slave_size(master_dev, dev); 512 513 out: 514 rcu_read_unlock(); 515 return size; 516 } 517 518 static size_t rtnl_link_get_size(const struct net_device *dev) 519 { 520 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 521 size_t size; 522 523 if (!ops) 524 return 0; 525 526 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 527 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 528 529 if (ops->get_size) 530 /* IFLA_INFO_DATA + nested data */ 531 size += nla_total_size(sizeof(struct nlattr)) + 532 ops->get_size(dev); 533 534 if (ops->get_xstats_size) 535 /* IFLA_INFO_XSTATS */ 536 size += nla_total_size(ops->get_xstats_size(dev)); 537 538 size += rtnl_link_get_slave_info_data_size(dev); 539 540 return size; 541 } 542 543 static LIST_HEAD(rtnl_af_ops); 544 545 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 546 { 547 const struct rtnl_af_ops *ops; 548 549 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) { 550 if (ops->family == family) 551 return ops; 552 } 553 554 return NULL; 555 } 556 557 /** 558 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 559 * @ops: struct rtnl_af_ops * to register 560 * 561 * Returns 0 on success or a negative error code. 562 */ 563 void rtnl_af_register(struct rtnl_af_ops *ops) 564 { 565 rtnl_lock(); 566 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 567 rtnl_unlock(); 568 } 569 EXPORT_SYMBOL_GPL(rtnl_af_register); 570 571 /** 572 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 573 * @ops: struct rtnl_af_ops * to unregister 574 */ 575 void rtnl_af_unregister(struct rtnl_af_ops *ops) 576 { 577 rtnl_lock(); 578 list_del_rcu(&ops->list); 579 rtnl_unlock(); 580 581 synchronize_rcu(); 582 } 583 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 584 585 static size_t rtnl_link_get_af_size(const struct net_device *dev, 586 u32 ext_filter_mask) 587 { 588 struct rtnl_af_ops *af_ops; 589 size_t size; 590 591 /* IFLA_AF_SPEC */ 592 size = nla_total_size(sizeof(struct nlattr)); 593 594 rcu_read_lock(); 595 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 596 if (af_ops->get_link_af_size) { 597 /* AF_* + nested data */ 598 size += nla_total_size(sizeof(struct nlattr)) + 599 af_ops->get_link_af_size(dev, ext_filter_mask); 600 } 601 } 602 rcu_read_unlock(); 603 604 return size; 605 } 606 607 static bool rtnl_have_link_slave_info(const struct net_device *dev) 608 { 609 struct net_device *master_dev; 610 bool ret = false; 611 612 rcu_read_lock(); 613 614 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 615 if (master_dev && master_dev->rtnl_link_ops) 616 ret = true; 617 rcu_read_unlock(); 618 return ret; 619 } 620 621 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 622 const struct net_device *dev) 623 { 624 struct net_device *master_dev; 625 const struct rtnl_link_ops *ops; 626 struct nlattr *slave_data; 627 int err; 628 629 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 630 if (!master_dev) 631 return 0; 632 ops = master_dev->rtnl_link_ops; 633 if (!ops) 634 return 0; 635 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 636 return -EMSGSIZE; 637 if (ops->fill_slave_info) { 638 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA); 639 if (!slave_data) 640 return -EMSGSIZE; 641 err = ops->fill_slave_info(skb, master_dev, dev); 642 if (err < 0) 643 goto err_cancel_slave_data; 644 nla_nest_end(skb, slave_data); 645 } 646 return 0; 647 648 err_cancel_slave_data: 649 nla_nest_cancel(skb, slave_data); 650 return err; 651 } 652 653 static int rtnl_link_info_fill(struct sk_buff *skb, 654 const struct net_device *dev) 655 { 656 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 657 struct nlattr *data; 658 int err; 659 660 if (!ops) 661 return 0; 662 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 663 return -EMSGSIZE; 664 if (ops->fill_xstats) { 665 err = ops->fill_xstats(skb, dev); 666 if (err < 0) 667 return err; 668 } 669 if (ops->fill_info) { 670 data = nla_nest_start(skb, IFLA_INFO_DATA); 671 if (data == NULL) 672 return -EMSGSIZE; 673 err = ops->fill_info(skb, dev); 674 if (err < 0) 675 goto err_cancel_data; 676 nla_nest_end(skb, data); 677 } 678 return 0; 679 680 err_cancel_data: 681 nla_nest_cancel(skb, data); 682 return err; 683 } 684 685 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 686 { 687 struct nlattr *linkinfo; 688 int err = -EMSGSIZE; 689 690 linkinfo = nla_nest_start(skb, IFLA_LINKINFO); 691 if (linkinfo == NULL) 692 goto out; 693 694 err = rtnl_link_info_fill(skb, dev); 695 if (err < 0) 696 goto err_cancel_link; 697 698 err = rtnl_link_slave_info_fill(skb, dev); 699 if (err < 0) 700 goto err_cancel_link; 701 702 nla_nest_end(skb, linkinfo); 703 return 0; 704 705 err_cancel_link: 706 nla_nest_cancel(skb, linkinfo); 707 out: 708 return err; 709 } 710 711 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 712 { 713 struct sock *rtnl = net->rtnl; 714 int err = 0; 715 716 NETLINK_CB(skb).dst_group = group; 717 if (echo) 718 refcount_inc(&skb->users); 719 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 720 if (echo) 721 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 722 return err; 723 } 724 725 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 726 { 727 struct sock *rtnl = net->rtnl; 728 729 return nlmsg_unicast(rtnl, skb, pid); 730 } 731 EXPORT_SYMBOL(rtnl_unicast); 732 733 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 734 struct nlmsghdr *nlh, gfp_t flags) 735 { 736 struct sock *rtnl = net->rtnl; 737 int report = 0; 738 739 if (nlh) 740 report = nlmsg_report(nlh); 741 742 nlmsg_notify(rtnl, skb, pid, group, report, flags); 743 } 744 EXPORT_SYMBOL(rtnl_notify); 745 746 void rtnl_set_sk_err(struct net *net, u32 group, int error) 747 { 748 struct sock *rtnl = net->rtnl; 749 750 netlink_set_err(rtnl, 0, group, error); 751 } 752 EXPORT_SYMBOL(rtnl_set_sk_err); 753 754 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 755 { 756 struct nlattr *mx; 757 int i, valid = 0; 758 759 mx = nla_nest_start(skb, RTA_METRICS); 760 if (mx == NULL) 761 return -ENOBUFS; 762 763 for (i = 0; i < RTAX_MAX; i++) { 764 if (metrics[i]) { 765 if (i == RTAX_CC_ALGO - 1) { 766 char tmp[TCP_CA_NAME_MAX], *name; 767 768 name = tcp_ca_get_name_by_key(metrics[i], tmp); 769 if (!name) 770 continue; 771 if (nla_put_string(skb, i + 1, name)) 772 goto nla_put_failure; 773 } else if (i == RTAX_FEATURES - 1) { 774 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 775 776 if (!user_features) 777 continue; 778 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 779 if (nla_put_u32(skb, i + 1, user_features)) 780 goto nla_put_failure; 781 } else { 782 if (nla_put_u32(skb, i + 1, metrics[i])) 783 goto nla_put_failure; 784 } 785 valid++; 786 } 787 } 788 789 if (!valid) { 790 nla_nest_cancel(skb, mx); 791 return 0; 792 } 793 794 return nla_nest_end(skb, mx); 795 796 nla_put_failure: 797 nla_nest_cancel(skb, mx); 798 return -EMSGSIZE; 799 } 800 EXPORT_SYMBOL(rtnetlink_put_metrics); 801 802 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 803 long expires, u32 error) 804 { 805 struct rta_cacheinfo ci = { 806 .rta_error = error, 807 .rta_id = id, 808 }; 809 810 if (dst) { 811 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 812 ci.rta_used = dst->__use; 813 ci.rta_clntref = atomic_read(&dst->__refcnt); 814 } 815 if (expires) { 816 unsigned long clock; 817 818 clock = jiffies_to_clock_t(abs(expires)); 819 clock = min_t(unsigned long, clock, INT_MAX); 820 ci.rta_expires = (expires > 0) ? clock : -clock; 821 } 822 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 823 } 824 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 825 826 static void set_operstate(struct net_device *dev, unsigned char transition) 827 { 828 unsigned char operstate = dev->operstate; 829 830 switch (transition) { 831 case IF_OPER_UP: 832 if ((operstate == IF_OPER_DORMANT || 833 operstate == IF_OPER_UNKNOWN) && 834 !netif_dormant(dev)) 835 operstate = IF_OPER_UP; 836 break; 837 838 case IF_OPER_DORMANT: 839 if (operstate == IF_OPER_UP || 840 operstate == IF_OPER_UNKNOWN) 841 operstate = IF_OPER_DORMANT; 842 break; 843 } 844 845 if (dev->operstate != operstate) { 846 write_lock_bh(&dev_base_lock); 847 dev->operstate = operstate; 848 write_unlock_bh(&dev_base_lock); 849 netdev_state_change(dev); 850 } 851 } 852 853 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 854 { 855 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 856 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 857 } 858 859 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 860 const struct ifinfomsg *ifm) 861 { 862 unsigned int flags = ifm->ifi_flags; 863 864 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 865 if (ifm->ifi_change) 866 flags = (flags & ifm->ifi_change) | 867 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 868 869 return flags; 870 } 871 872 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 873 const struct rtnl_link_stats64 *b) 874 { 875 a->rx_packets = b->rx_packets; 876 a->tx_packets = b->tx_packets; 877 a->rx_bytes = b->rx_bytes; 878 a->tx_bytes = b->tx_bytes; 879 a->rx_errors = b->rx_errors; 880 a->tx_errors = b->tx_errors; 881 a->rx_dropped = b->rx_dropped; 882 a->tx_dropped = b->tx_dropped; 883 884 a->multicast = b->multicast; 885 a->collisions = b->collisions; 886 887 a->rx_length_errors = b->rx_length_errors; 888 a->rx_over_errors = b->rx_over_errors; 889 a->rx_crc_errors = b->rx_crc_errors; 890 a->rx_frame_errors = b->rx_frame_errors; 891 a->rx_fifo_errors = b->rx_fifo_errors; 892 a->rx_missed_errors = b->rx_missed_errors; 893 894 a->tx_aborted_errors = b->tx_aborted_errors; 895 a->tx_carrier_errors = b->tx_carrier_errors; 896 a->tx_fifo_errors = b->tx_fifo_errors; 897 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 898 a->tx_window_errors = b->tx_window_errors; 899 900 a->rx_compressed = b->rx_compressed; 901 a->tx_compressed = b->tx_compressed; 902 903 a->rx_nohandler = b->rx_nohandler; 904 } 905 906 /* All VF info */ 907 static inline int rtnl_vfinfo_size(const struct net_device *dev, 908 u32 ext_filter_mask) 909 { 910 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 911 int num_vfs = dev_num_vf(dev->dev.parent); 912 size_t size = nla_total_size(0); 913 size += num_vfs * 914 (nla_total_size(0) + 915 nla_total_size(sizeof(struct ifla_vf_mac)) + 916 nla_total_size(sizeof(struct ifla_vf_vlan)) + 917 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 918 nla_total_size(MAX_VLAN_LIST_LEN * 919 sizeof(struct ifla_vf_vlan_info)) + 920 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 921 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 922 nla_total_size(sizeof(struct ifla_vf_rate)) + 923 nla_total_size(sizeof(struct ifla_vf_link_state)) + 924 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 925 nla_total_size(0) + /* nest IFLA_VF_STATS */ 926 /* IFLA_VF_STATS_RX_PACKETS */ 927 nla_total_size_64bit(sizeof(__u64)) + 928 /* IFLA_VF_STATS_TX_PACKETS */ 929 nla_total_size_64bit(sizeof(__u64)) + 930 /* IFLA_VF_STATS_RX_BYTES */ 931 nla_total_size_64bit(sizeof(__u64)) + 932 /* IFLA_VF_STATS_TX_BYTES */ 933 nla_total_size_64bit(sizeof(__u64)) + 934 /* IFLA_VF_STATS_BROADCAST */ 935 nla_total_size_64bit(sizeof(__u64)) + 936 /* IFLA_VF_STATS_MULTICAST */ 937 nla_total_size_64bit(sizeof(__u64)) + 938 /* IFLA_VF_STATS_RX_DROPPED */ 939 nla_total_size_64bit(sizeof(__u64)) + 940 /* IFLA_VF_STATS_TX_DROPPED */ 941 nla_total_size_64bit(sizeof(__u64)) + 942 nla_total_size(sizeof(struct ifla_vf_trust))); 943 return size; 944 } else 945 return 0; 946 } 947 948 static size_t rtnl_port_size(const struct net_device *dev, 949 u32 ext_filter_mask) 950 { 951 size_t port_size = nla_total_size(4) /* PORT_VF */ 952 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 953 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 954 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 955 + nla_total_size(1) /* PROT_VDP_REQUEST */ 956 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 957 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 958 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 959 + port_size; 960 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 961 + port_size; 962 963 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 964 !(ext_filter_mask & RTEXT_FILTER_VF)) 965 return 0; 966 if (dev_num_vf(dev->dev.parent)) 967 return port_self_size + vf_ports_size + 968 vf_port_size * dev_num_vf(dev->dev.parent); 969 else 970 return port_self_size; 971 } 972 973 static size_t rtnl_xdp_size(void) 974 { 975 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 976 nla_total_size(1) + /* XDP_ATTACHED */ 977 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 978 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 979 980 return xdp_size; 981 } 982 983 static noinline size_t if_nlmsg_size(const struct net_device *dev, 984 u32 ext_filter_mask) 985 { 986 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 987 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 988 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 989 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 990 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 991 + nla_total_size(sizeof(struct rtnl_link_stats)) 992 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 993 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 994 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 995 + nla_total_size(4) /* IFLA_TXQLEN */ 996 + nla_total_size(4) /* IFLA_WEIGHT */ 997 + nla_total_size(4) /* IFLA_MTU */ 998 + nla_total_size(4) /* IFLA_LINK */ 999 + nla_total_size(4) /* IFLA_MASTER */ 1000 + nla_total_size(1) /* IFLA_CARRIER */ 1001 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1002 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1003 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1004 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1005 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1006 + nla_total_size(1) /* IFLA_OPERSTATE */ 1007 + nla_total_size(1) /* IFLA_LINKMODE */ 1008 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1009 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1010 + nla_total_size(4) /* IFLA_GROUP */ 1011 + nla_total_size(ext_filter_mask 1012 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1013 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1014 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1015 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1016 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1017 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1018 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1019 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1020 + rtnl_xdp_size() /* IFLA_XDP */ 1021 + nla_total_size(4) /* IFLA_EVENT */ 1022 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1023 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1024 + nla_total_size(1) /* IFLA_PROTO_DOWN */ 1025 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1026 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1027 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1028 + nla_total_size(4) /* IFLA_MIN_MTU */ 1029 + nla_total_size(4) /* IFLA_MAX_MTU */ 1030 + 0; 1031 } 1032 1033 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1034 { 1035 struct nlattr *vf_ports; 1036 struct nlattr *vf_port; 1037 int vf; 1038 int err; 1039 1040 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS); 1041 if (!vf_ports) 1042 return -EMSGSIZE; 1043 1044 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1045 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 1046 if (!vf_port) 1047 goto nla_put_failure; 1048 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1049 goto nla_put_failure; 1050 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1051 if (err == -EMSGSIZE) 1052 goto nla_put_failure; 1053 if (err) { 1054 nla_nest_cancel(skb, vf_port); 1055 continue; 1056 } 1057 nla_nest_end(skb, vf_port); 1058 } 1059 1060 nla_nest_end(skb, vf_ports); 1061 1062 return 0; 1063 1064 nla_put_failure: 1065 nla_nest_cancel(skb, vf_ports); 1066 return -EMSGSIZE; 1067 } 1068 1069 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1070 { 1071 struct nlattr *port_self; 1072 int err; 1073 1074 port_self = nla_nest_start(skb, IFLA_PORT_SELF); 1075 if (!port_self) 1076 return -EMSGSIZE; 1077 1078 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1079 if (err) { 1080 nla_nest_cancel(skb, port_self); 1081 return (err == -EMSGSIZE) ? err : 0; 1082 } 1083 1084 nla_nest_end(skb, port_self); 1085 1086 return 0; 1087 } 1088 1089 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1090 u32 ext_filter_mask) 1091 { 1092 int err; 1093 1094 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1095 !(ext_filter_mask & RTEXT_FILTER_VF)) 1096 return 0; 1097 1098 err = rtnl_port_self_fill(skb, dev); 1099 if (err) 1100 return err; 1101 1102 if (dev_num_vf(dev->dev.parent)) { 1103 err = rtnl_vf_ports_fill(skb, dev); 1104 if (err) 1105 return err; 1106 } 1107 1108 return 0; 1109 } 1110 1111 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1112 { 1113 int err; 1114 struct netdev_phys_item_id ppid; 1115 1116 err = dev_get_phys_port_id(dev, &ppid); 1117 if (err) { 1118 if (err == -EOPNOTSUPP) 1119 return 0; 1120 return err; 1121 } 1122 1123 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1124 return -EMSGSIZE; 1125 1126 return 0; 1127 } 1128 1129 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1130 { 1131 char name[IFNAMSIZ]; 1132 int err; 1133 1134 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1135 if (err) { 1136 if (err == -EOPNOTSUPP) 1137 return 0; 1138 return err; 1139 } 1140 1141 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1142 return -EMSGSIZE; 1143 1144 return 0; 1145 } 1146 1147 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1148 { 1149 int err; 1150 struct switchdev_attr attr = { 1151 .orig_dev = dev, 1152 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1153 .flags = SWITCHDEV_F_NO_RECURSE, 1154 }; 1155 1156 err = switchdev_port_attr_get(dev, &attr); 1157 if (err) { 1158 if (err == -EOPNOTSUPP) 1159 return 0; 1160 return err; 1161 } 1162 1163 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len, 1164 attr.u.ppid.id)) 1165 return -EMSGSIZE; 1166 1167 return 0; 1168 } 1169 1170 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1171 struct net_device *dev) 1172 { 1173 struct rtnl_link_stats64 *sp; 1174 struct nlattr *attr; 1175 1176 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1177 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1178 if (!attr) 1179 return -EMSGSIZE; 1180 1181 sp = nla_data(attr); 1182 dev_get_stats(dev, sp); 1183 1184 attr = nla_reserve(skb, IFLA_STATS, 1185 sizeof(struct rtnl_link_stats)); 1186 if (!attr) 1187 return -EMSGSIZE; 1188 1189 copy_rtnl_link_stats(nla_data(attr), sp); 1190 1191 return 0; 1192 } 1193 1194 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1195 struct net_device *dev, 1196 int vfs_num, 1197 struct nlattr *vfinfo) 1198 { 1199 struct ifla_vf_rss_query_en vf_rss_query_en; 1200 struct nlattr *vf, *vfstats, *vfvlanlist; 1201 struct ifla_vf_link_state vf_linkstate; 1202 struct ifla_vf_vlan_info vf_vlan_info; 1203 struct ifla_vf_spoofchk vf_spoofchk; 1204 struct ifla_vf_tx_rate vf_tx_rate; 1205 struct ifla_vf_stats vf_stats; 1206 struct ifla_vf_trust vf_trust; 1207 struct ifla_vf_vlan vf_vlan; 1208 struct ifla_vf_rate vf_rate; 1209 struct ifla_vf_mac vf_mac; 1210 struct ifla_vf_info ivi; 1211 1212 memset(&ivi, 0, sizeof(ivi)); 1213 1214 /* Not all SR-IOV capable drivers support the 1215 * spoofcheck and "RSS query enable" query. Preset to 1216 * -1 so the user space tool can detect that the driver 1217 * didn't report anything. 1218 */ 1219 ivi.spoofchk = -1; 1220 ivi.rss_query_en = -1; 1221 ivi.trusted = -1; 1222 /* The default value for VF link state is "auto" 1223 * IFLA_VF_LINK_STATE_AUTO which equals zero 1224 */ 1225 ivi.linkstate = 0; 1226 /* VLAN Protocol by default is 802.1Q */ 1227 ivi.vlan_proto = htons(ETH_P_8021Q); 1228 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1229 return 0; 1230 1231 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1232 1233 vf_mac.vf = 1234 vf_vlan.vf = 1235 vf_vlan_info.vf = 1236 vf_rate.vf = 1237 vf_tx_rate.vf = 1238 vf_spoofchk.vf = 1239 vf_linkstate.vf = 1240 vf_rss_query_en.vf = 1241 vf_trust.vf = ivi.vf; 1242 1243 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1244 vf_vlan.vlan = ivi.vlan; 1245 vf_vlan.qos = ivi.qos; 1246 vf_vlan_info.vlan = ivi.vlan; 1247 vf_vlan_info.qos = ivi.qos; 1248 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1249 vf_tx_rate.rate = ivi.max_tx_rate; 1250 vf_rate.min_tx_rate = ivi.min_tx_rate; 1251 vf_rate.max_tx_rate = ivi.max_tx_rate; 1252 vf_spoofchk.setting = ivi.spoofchk; 1253 vf_linkstate.link_state = ivi.linkstate; 1254 vf_rss_query_en.setting = ivi.rss_query_en; 1255 vf_trust.setting = ivi.trusted; 1256 vf = nla_nest_start(skb, IFLA_VF_INFO); 1257 if (!vf) 1258 goto nla_put_vfinfo_failure; 1259 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1260 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1261 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1262 &vf_rate) || 1263 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1264 &vf_tx_rate) || 1265 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1266 &vf_spoofchk) || 1267 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1268 &vf_linkstate) || 1269 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1270 sizeof(vf_rss_query_en), 1271 &vf_rss_query_en) || 1272 nla_put(skb, IFLA_VF_TRUST, 1273 sizeof(vf_trust), &vf_trust)) 1274 goto nla_put_vf_failure; 1275 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST); 1276 if (!vfvlanlist) 1277 goto nla_put_vf_failure; 1278 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1279 &vf_vlan_info)) { 1280 nla_nest_cancel(skb, vfvlanlist); 1281 goto nla_put_vf_failure; 1282 } 1283 nla_nest_end(skb, vfvlanlist); 1284 memset(&vf_stats, 0, sizeof(vf_stats)); 1285 if (dev->netdev_ops->ndo_get_vf_stats) 1286 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1287 &vf_stats); 1288 vfstats = nla_nest_start(skb, IFLA_VF_STATS); 1289 if (!vfstats) 1290 goto nla_put_vf_failure; 1291 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1292 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1293 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1294 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1295 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1296 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1297 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1298 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1299 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1300 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1301 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1302 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1303 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1304 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1305 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1306 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1307 nla_nest_cancel(skb, vfstats); 1308 goto nla_put_vf_failure; 1309 } 1310 nla_nest_end(skb, vfstats); 1311 nla_nest_end(skb, vf); 1312 return 0; 1313 1314 nla_put_vf_failure: 1315 nla_nest_cancel(skb, vf); 1316 nla_put_vfinfo_failure: 1317 nla_nest_cancel(skb, vfinfo); 1318 return -EMSGSIZE; 1319 } 1320 1321 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1322 struct net_device *dev, 1323 u32 ext_filter_mask) 1324 { 1325 struct nlattr *vfinfo; 1326 int i, num_vfs; 1327 1328 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1329 return 0; 1330 1331 num_vfs = dev_num_vf(dev->dev.parent); 1332 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1333 return -EMSGSIZE; 1334 1335 if (!dev->netdev_ops->ndo_get_vf_config) 1336 return 0; 1337 1338 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); 1339 if (!vfinfo) 1340 return -EMSGSIZE; 1341 1342 for (i = 0; i < num_vfs; i++) { 1343 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) 1344 return -EMSGSIZE; 1345 } 1346 1347 nla_nest_end(skb, vfinfo); 1348 return 0; 1349 } 1350 1351 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1352 { 1353 struct rtnl_link_ifmap map; 1354 1355 memset(&map, 0, sizeof(map)); 1356 map.mem_start = dev->mem_start; 1357 map.mem_end = dev->mem_end; 1358 map.base_addr = dev->base_addr; 1359 map.irq = dev->irq; 1360 map.dma = dev->dma; 1361 map.port = dev->if_port; 1362 1363 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1364 return -EMSGSIZE; 1365 1366 return 0; 1367 } 1368 1369 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1370 { 1371 const struct bpf_prog *generic_xdp_prog; 1372 1373 ASSERT_RTNL(); 1374 1375 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1376 if (!generic_xdp_prog) 1377 return 0; 1378 return generic_xdp_prog->aux->id; 1379 } 1380 1381 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1382 { 1383 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG); 1384 } 1385 1386 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1387 { 1388 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, 1389 XDP_QUERY_PROG_HW); 1390 } 1391 1392 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1393 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1394 u32 (*get_prog_id)(struct net_device *dev)) 1395 { 1396 u32 curr_id; 1397 int err; 1398 1399 curr_id = get_prog_id(dev); 1400 if (!curr_id) 1401 return 0; 1402 1403 *prog_id = curr_id; 1404 err = nla_put_u32(skb, attr, curr_id); 1405 if (err) 1406 return err; 1407 1408 if (*mode != XDP_ATTACHED_NONE) 1409 *mode = XDP_ATTACHED_MULTI; 1410 else 1411 *mode = tgt_mode; 1412 1413 return 0; 1414 } 1415 1416 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1417 { 1418 struct nlattr *xdp; 1419 u32 prog_id; 1420 int err; 1421 u8 mode; 1422 1423 xdp = nla_nest_start(skb, IFLA_XDP); 1424 if (!xdp) 1425 return -EMSGSIZE; 1426 1427 prog_id = 0; 1428 mode = XDP_ATTACHED_NONE; 1429 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1430 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1431 if (err) 1432 goto err_cancel; 1433 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1434 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1435 if (err) 1436 goto err_cancel; 1437 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1438 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1439 if (err) 1440 goto err_cancel; 1441 1442 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1443 if (err) 1444 goto err_cancel; 1445 1446 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1447 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1448 if (err) 1449 goto err_cancel; 1450 } 1451 1452 nla_nest_end(skb, xdp); 1453 return 0; 1454 1455 err_cancel: 1456 nla_nest_cancel(skb, xdp); 1457 return err; 1458 } 1459 1460 static u32 rtnl_get_event(unsigned long event) 1461 { 1462 u32 rtnl_event_type = IFLA_EVENT_NONE; 1463 1464 switch (event) { 1465 case NETDEV_REBOOT: 1466 rtnl_event_type = IFLA_EVENT_REBOOT; 1467 break; 1468 case NETDEV_FEAT_CHANGE: 1469 rtnl_event_type = IFLA_EVENT_FEATURES; 1470 break; 1471 case NETDEV_BONDING_FAILOVER: 1472 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1473 break; 1474 case NETDEV_NOTIFY_PEERS: 1475 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1476 break; 1477 case NETDEV_RESEND_IGMP: 1478 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1479 break; 1480 case NETDEV_CHANGEINFODATA: 1481 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1482 break; 1483 default: 1484 break; 1485 } 1486 1487 return rtnl_event_type; 1488 } 1489 1490 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1491 { 1492 const struct net_device *upper_dev; 1493 int ret = 0; 1494 1495 rcu_read_lock(); 1496 1497 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1498 if (upper_dev) 1499 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1500 1501 rcu_read_unlock(); 1502 return ret; 1503 } 1504 1505 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev) 1506 { 1507 int ifindex = dev_get_iflink(dev); 1508 1509 if (dev->ifindex == ifindex) 1510 return 0; 1511 1512 return nla_put_u32(skb, IFLA_LINK, ifindex); 1513 } 1514 1515 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1516 struct net_device *dev) 1517 { 1518 char buf[IFALIASZ]; 1519 int ret; 1520 1521 ret = dev_get_alias(dev, buf, sizeof(buf)); 1522 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1523 } 1524 1525 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1526 const struct net_device *dev, 1527 struct net *src_net) 1528 { 1529 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1530 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1531 1532 if (!net_eq(dev_net(dev), link_net)) { 1533 int id = peernet2id_alloc(src_net, link_net); 1534 1535 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1536 return -EMSGSIZE; 1537 } 1538 } 1539 1540 return 0; 1541 } 1542 1543 static int rtnl_fill_link_af(struct sk_buff *skb, 1544 const struct net_device *dev, 1545 u32 ext_filter_mask) 1546 { 1547 const struct rtnl_af_ops *af_ops; 1548 struct nlattr *af_spec; 1549 1550 af_spec = nla_nest_start(skb, IFLA_AF_SPEC); 1551 if (!af_spec) 1552 return -EMSGSIZE; 1553 1554 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1555 struct nlattr *af; 1556 int err; 1557 1558 if (!af_ops->fill_link_af) 1559 continue; 1560 1561 af = nla_nest_start(skb, af_ops->family); 1562 if (!af) 1563 return -EMSGSIZE; 1564 1565 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1566 /* 1567 * Caller may return ENODATA to indicate that there 1568 * was no data to be dumped. This is not an error, it 1569 * means we should trim the attribute header and 1570 * continue. 1571 */ 1572 if (err == -ENODATA) 1573 nla_nest_cancel(skb, af); 1574 else if (err < 0) 1575 return -EMSGSIZE; 1576 1577 nla_nest_end(skb, af); 1578 } 1579 1580 nla_nest_end(skb, af_spec); 1581 return 0; 1582 } 1583 1584 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1585 struct net_device *dev, struct net *src_net, 1586 int type, u32 pid, u32 seq, u32 change, 1587 unsigned int flags, u32 ext_filter_mask, 1588 u32 event, int *new_nsid, int new_ifindex, 1589 int tgt_netnsid) 1590 { 1591 struct ifinfomsg *ifm; 1592 struct nlmsghdr *nlh; 1593 1594 ASSERT_RTNL(); 1595 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1596 if (nlh == NULL) 1597 return -EMSGSIZE; 1598 1599 ifm = nlmsg_data(nlh); 1600 ifm->ifi_family = AF_UNSPEC; 1601 ifm->__ifi_pad = 0; 1602 ifm->ifi_type = dev->type; 1603 ifm->ifi_index = dev->ifindex; 1604 ifm->ifi_flags = dev_get_flags(dev); 1605 ifm->ifi_change = change; 1606 1607 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1608 goto nla_put_failure; 1609 1610 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1611 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1612 nla_put_u8(skb, IFLA_OPERSTATE, 1613 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1614 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1615 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1616 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1617 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1618 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1619 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1620 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1621 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1622 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1623 #ifdef CONFIG_RPS 1624 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1625 #endif 1626 nla_put_iflink(skb, dev) || 1627 put_master_ifindex(skb, dev) || 1628 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1629 (dev->qdisc && 1630 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || 1631 nla_put_ifalias(skb, dev) || 1632 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1633 atomic_read(&dev->carrier_up_count) + 1634 atomic_read(&dev->carrier_down_count)) || 1635 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) || 1636 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1637 atomic_read(&dev->carrier_up_count)) || 1638 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1639 atomic_read(&dev->carrier_down_count))) 1640 goto nla_put_failure; 1641 1642 if (event != IFLA_EVENT_NONE) { 1643 if (nla_put_u32(skb, IFLA_EVENT, event)) 1644 goto nla_put_failure; 1645 } 1646 1647 if (rtnl_fill_link_ifmap(skb, dev)) 1648 goto nla_put_failure; 1649 1650 if (dev->addr_len) { 1651 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1652 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1653 goto nla_put_failure; 1654 } 1655 1656 if (rtnl_phys_port_id_fill(skb, dev)) 1657 goto nla_put_failure; 1658 1659 if (rtnl_phys_port_name_fill(skb, dev)) 1660 goto nla_put_failure; 1661 1662 if (rtnl_phys_switch_id_fill(skb, dev)) 1663 goto nla_put_failure; 1664 1665 if (rtnl_fill_stats(skb, dev)) 1666 goto nla_put_failure; 1667 1668 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1669 goto nla_put_failure; 1670 1671 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1672 goto nla_put_failure; 1673 1674 if (rtnl_xdp_fill(skb, dev)) 1675 goto nla_put_failure; 1676 1677 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1678 if (rtnl_link_fill(skb, dev) < 0) 1679 goto nla_put_failure; 1680 } 1681 1682 if (rtnl_fill_link_netnsid(skb, dev, src_net)) 1683 goto nla_put_failure; 1684 1685 if (new_nsid && 1686 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1687 goto nla_put_failure; 1688 if (new_ifindex && 1689 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1690 goto nla_put_failure; 1691 1692 1693 rcu_read_lock(); 1694 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1695 goto nla_put_failure_rcu; 1696 rcu_read_unlock(); 1697 1698 nlmsg_end(skb, nlh); 1699 return 0; 1700 1701 nla_put_failure_rcu: 1702 rcu_read_unlock(); 1703 nla_put_failure: 1704 nlmsg_cancel(skb, nlh); 1705 return -EMSGSIZE; 1706 } 1707 1708 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1709 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1710 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1711 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1712 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1713 [IFLA_MTU] = { .type = NLA_U32 }, 1714 [IFLA_LINK] = { .type = NLA_U32 }, 1715 [IFLA_MASTER] = { .type = NLA_U32 }, 1716 [IFLA_CARRIER] = { .type = NLA_U8 }, 1717 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1718 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1719 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1720 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1721 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1722 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1723 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1724 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1725 * allow 0-length string (needed to remove an alias). 1726 */ 1727 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1728 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1729 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1730 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1731 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1732 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1733 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1734 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1735 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1736 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1737 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1738 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1739 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1740 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1741 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1742 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1743 [IFLA_XDP] = { .type = NLA_NESTED }, 1744 [IFLA_EVENT] = { .type = NLA_U32 }, 1745 [IFLA_GROUP] = { .type = NLA_U32 }, 1746 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1747 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1748 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1749 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 1750 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 1751 }; 1752 1753 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1754 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 1755 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 1756 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 1757 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1758 }; 1759 1760 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1761 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1762 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1763 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 1764 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 1765 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 1766 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 1767 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 1768 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 1769 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 1770 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 1771 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1772 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1773 }; 1774 1775 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1776 [IFLA_PORT_VF] = { .type = NLA_U32 }, 1777 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 1778 .len = PORT_PROFILE_MAX }, 1779 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 1780 .len = PORT_UUID_MAX }, 1781 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 1782 .len = PORT_UUID_MAX }, 1783 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 1784 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 1785 1786 /* Unused, but we need to keep it here since user space could 1787 * fill it. It's also broken with regard to NLA_BINARY use in 1788 * combination with structs. 1789 */ 1790 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 1791 .len = sizeof(struct ifla_port_vsi) }, 1792 }; 1793 1794 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 1795 [IFLA_XDP_FD] = { .type = NLA_S32 }, 1796 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 1797 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 1798 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 1799 }; 1800 1801 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 1802 { 1803 const struct rtnl_link_ops *ops = NULL; 1804 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 1805 1806 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, 1807 ifla_info_policy, NULL) < 0) 1808 return NULL; 1809 1810 if (linfo[IFLA_INFO_KIND]) { 1811 char kind[MODULE_NAME_LEN]; 1812 1813 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 1814 ops = rtnl_link_ops_get(kind); 1815 } 1816 1817 return ops; 1818 } 1819 1820 static bool link_master_filtered(struct net_device *dev, int master_idx) 1821 { 1822 struct net_device *master; 1823 1824 if (!master_idx) 1825 return false; 1826 1827 master = netdev_master_upper_dev_get(dev); 1828 if (!master || master->ifindex != master_idx) 1829 return true; 1830 1831 return false; 1832 } 1833 1834 static bool link_kind_filtered(const struct net_device *dev, 1835 const struct rtnl_link_ops *kind_ops) 1836 { 1837 if (kind_ops && dev->rtnl_link_ops != kind_ops) 1838 return true; 1839 1840 return false; 1841 } 1842 1843 static bool link_dump_filtered(struct net_device *dev, 1844 int master_idx, 1845 const struct rtnl_link_ops *kind_ops) 1846 { 1847 if (link_master_filtered(dev, master_idx) || 1848 link_kind_filtered(dev, kind_ops)) 1849 return true; 1850 1851 return false; 1852 } 1853 1854 /** 1855 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 1856 * @sk: netlink socket 1857 * @netnsid: network namespace identifier 1858 * 1859 * Returns the network namespace identified by netnsid on success or an error 1860 * pointer on failure. 1861 */ 1862 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 1863 { 1864 struct net *net; 1865 1866 net = get_net_ns_by_id(sock_net(sk), netnsid); 1867 if (!net) 1868 return ERR_PTR(-EINVAL); 1869 1870 /* For now, the caller is required to have CAP_NET_ADMIN in 1871 * the user namespace owning the target net ns. 1872 */ 1873 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 1874 put_net(net); 1875 return ERR_PTR(-EACCES); 1876 } 1877 return net; 1878 } 1879 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 1880 1881 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 1882 bool strict_check, struct nlattr **tb, 1883 struct netlink_ext_ack *extack) 1884 { 1885 int hdrlen; 1886 1887 if (strict_check) { 1888 struct ifinfomsg *ifm; 1889 1890 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 1891 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 1892 return -EINVAL; 1893 } 1894 1895 ifm = nlmsg_data(nlh); 1896 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 1897 ifm->ifi_change) { 1898 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 1899 return -EINVAL; 1900 } 1901 if (ifm->ifi_index) { 1902 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 1903 return -EINVAL; 1904 } 1905 1906 return nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 1907 ifla_policy, extack); 1908 } 1909 1910 /* A hack to preserve kernel<->userspace interface. 1911 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 1912 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 1913 * what iproute2 < v3.9.0 used. 1914 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 1915 * attribute, its netlink message is shorter than struct ifinfomsg. 1916 */ 1917 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 1918 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 1919 1920 return nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, extack); 1921 } 1922 1923 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 1924 { 1925 struct netlink_ext_ack *extack = cb->extack; 1926 const struct nlmsghdr *nlh = cb->nlh; 1927 struct net *net = sock_net(skb->sk); 1928 struct net *tgt_net = net; 1929 int h, s_h; 1930 int idx = 0, s_idx; 1931 struct net_device *dev; 1932 struct hlist_head *head; 1933 struct nlattr *tb[IFLA_MAX+1]; 1934 u32 ext_filter_mask = 0; 1935 const struct rtnl_link_ops *kind_ops = NULL; 1936 unsigned int flags = NLM_F_MULTI; 1937 int master_idx = 0; 1938 int netnsid = -1; 1939 int err, i; 1940 1941 s_h = cb->args[0]; 1942 s_idx = cb->args[1]; 1943 1944 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 1945 if (err < 0) { 1946 if (cb->strict_check) 1947 return err; 1948 1949 goto walk_entries; 1950 } 1951 1952 for (i = 0; i <= IFLA_MAX; ++i) { 1953 if (!tb[i]) 1954 continue; 1955 1956 /* new attributes should only be added with strict checking */ 1957 switch (i) { 1958 case IFLA_TARGET_NETNSID: 1959 netnsid = nla_get_s32(tb[i]); 1960 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 1961 if (IS_ERR(tgt_net)) { 1962 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 1963 return PTR_ERR(tgt_net); 1964 } 1965 break; 1966 case IFLA_EXT_MASK: 1967 ext_filter_mask = nla_get_u32(tb[i]); 1968 break; 1969 case IFLA_MASTER: 1970 master_idx = nla_get_u32(tb[i]); 1971 break; 1972 case IFLA_LINKINFO: 1973 kind_ops = linkinfo_to_kind_ops(tb[i]); 1974 break; 1975 default: 1976 if (cb->strict_check) { 1977 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 1978 return -EINVAL; 1979 } 1980 } 1981 } 1982 1983 if (master_idx || kind_ops) 1984 flags |= NLM_F_DUMP_FILTERED; 1985 1986 walk_entries: 1987 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1988 idx = 0; 1989 head = &tgt_net->dev_index_head[h]; 1990 hlist_for_each_entry(dev, head, index_hlist) { 1991 if (link_dump_filtered(dev, master_idx, kind_ops)) 1992 goto cont; 1993 if (idx < s_idx) 1994 goto cont; 1995 err = rtnl_fill_ifinfo(skb, dev, net, 1996 RTM_NEWLINK, 1997 NETLINK_CB(cb->skb).portid, 1998 nlh->nlmsg_seq, 0, flags, 1999 ext_filter_mask, 0, NULL, 0, 2000 netnsid); 2001 2002 if (err < 0) { 2003 if (likely(skb->len)) 2004 goto out; 2005 2006 goto out_err; 2007 } 2008 cont: 2009 idx++; 2010 } 2011 } 2012 out: 2013 err = skb->len; 2014 out_err: 2015 cb->args[1] = idx; 2016 cb->args[0] = h; 2017 cb->seq = net->dev_base_seq; 2018 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2019 if (netnsid >= 0) 2020 put_net(tgt_net); 2021 2022 return err; 2023 } 2024 2025 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 2026 struct netlink_ext_ack *exterr) 2027 { 2028 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr); 2029 } 2030 EXPORT_SYMBOL(rtnl_nla_parse_ifla); 2031 2032 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2033 { 2034 struct net *net; 2035 /* Examine the link attributes and figure out which 2036 * network namespace we are talking about. 2037 */ 2038 if (tb[IFLA_NET_NS_PID]) 2039 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2040 else if (tb[IFLA_NET_NS_FD]) 2041 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2042 else 2043 net = get_net(src_net); 2044 return net; 2045 } 2046 EXPORT_SYMBOL(rtnl_link_get_net); 2047 2048 /* Figure out which network namespace we are talking about by 2049 * examining the link attributes in the following order: 2050 * 2051 * 1. IFLA_NET_NS_PID 2052 * 2. IFLA_NET_NS_FD 2053 * 3. IFLA_TARGET_NETNSID 2054 */ 2055 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2056 struct nlattr *tb[]) 2057 { 2058 struct net *net; 2059 2060 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2061 return rtnl_link_get_net(src_net, tb); 2062 2063 if (!tb[IFLA_TARGET_NETNSID]) 2064 return get_net(src_net); 2065 2066 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2067 if (!net) 2068 return ERR_PTR(-EINVAL); 2069 2070 return net; 2071 } 2072 2073 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2074 struct net *src_net, 2075 struct nlattr *tb[], int cap) 2076 { 2077 struct net *net; 2078 2079 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2080 if (IS_ERR(net)) 2081 return net; 2082 2083 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2084 put_net(net); 2085 return ERR_PTR(-EPERM); 2086 } 2087 2088 return net; 2089 } 2090 2091 /* Verify that rtnetlink requests do not pass additional properties 2092 * potentially referring to different network namespaces. 2093 */ 2094 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2095 struct netlink_ext_ack *extack, 2096 bool netns_id_only) 2097 { 2098 2099 if (netns_id_only) { 2100 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2101 return 0; 2102 2103 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2104 return -EOPNOTSUPP; 2105 } 2106 2107 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2108 goto invalid_attr; 2109 2110 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2111 goto invalid_attr; 2112 2113 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2114 goto invalid_attr; 2115 2116 return 0; 2117 2118 invalid_attr: 2119 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2120 return -EINVAL; 2121 } 2122 2123 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) 2124 { 2125 if (dev) { 2126 if (tb[IFLA_ADDRESS] && 2127 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2128 return -EINVAL; 2129 2130 if (tb[IFLA_BROADCAST] && 2131 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2132 return -EINVAL; 2133 } 2134 2135 if (tb[IFLA_AF_SPEC]) { 2136 struct nlattr *af; 2137 int rem, err; 2138 2139 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2140 const struct rtnl_af_ops *af_ops; 2141 2142 rcu_read_lock(); 2143 af_ops = rtnl_af_lookup(nla_type(af)); 2144 if (!af_ops) { 2145 rcu_read_unlock(); 2146 return -EAFNOSUPPORT; 2147 } 2148 2149 if (!af_ops->set_link_af) { 2150 rcu_read_unlock(); 2151 return -EOPNOTSUPP; 2152 } 2153 2154 if (af_ops->validate_link_af) { 2155 err = af_ops->validate_link_af(dev, af); 2156 if (err < 0) { 2157 rcu_read_unlock(); 2158 return err; 2159 } 2160 } 2161 2162 rcu_read_unlock(); 2163 } 2164 } 2165 2166 return 0; 2167 } 2168 2169 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2170 int guid_type) 2171 { 2172 const struct net_device_ops *ops = dev->netdev_ops; 2173 2174 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2175 } 2176 2177 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2178 { 2179 if (dev->type != ARPHRD_INFINIBAND) 2180 return -EOPNOTSUPP; 2181 2182 return handle_infiniband_guid(dev, ivt, guid_type); 2183 } 2184 2185 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2186 { 2187 const struct net_device_ops *ops = dev->netdev_ops; 2188 int err = -EINVAL; 2189 2190 if (tb[IFLA_VF_MAC]) { 2191 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2192 2193 err = -EOPNOTSUPP; 2194 if (ops->ndo_set_vf_mac) 2195 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2196 ivm->mac); 2197 if (err < 0) 2198 return err; 2199 } 2200 2201 if (tb[IFLA_VF_VLAN]) { 2202 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2203 2204 err = -EOPNOTSUPP; 2205 if (ops->ndo_set_vf_vlan) 2206 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2207 ivv->qos, 2208 htons(ETH_P_8021Q)); 2209 if (err < 0) 2210 return err; 2211 } 2212 2213 if (tb[IFLA_VF_VLAN_LIST]) { 2214 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2215 struct nlattr *attr; 2216 int rem, len = 0; 2217 2218 err = -EOPNOTSUPP; 2219 if (!ops->ndo_set_vf_vlan) 2220 return err; 2221 2222 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2223 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2224 nla_len(attr) < NLA_HDRLEN) { 2225 return -EINVAL; 2226 } 2227 if (len >= MAX_VLAN_LIST_LEN) 2228 return -EOPNOTSUPP; 2229 ivvl[len] = nla_data(attr); 2230 2231 len++; 2232 } 2233 if (len == 0) 2234 return -EINVAL; 2235 2236 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2237 ivvl[0]->qos, ivvl[0]->vlan_proto); 2238 if (err < 0) 2239 return err; 2240 } 2241 2242 if (tb[IFLA_VF_TX_RATE]) { 2243 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2244 struct ifla_vf_info ivf; 2245 2246 err = -EOPNOTSUPP; 2247 if (ops->ndo_get_vf_config) 2248 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2249 if (err < 0) 2250 return err; 2251 2252 err = -EOPNOTSUPP; 2253 if (ops->ndo_set_vf_rate) 2254 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2255 ivf.min_tx_rate, 2256 ivt->rate); 2257 if (err < 0) 2258 return err; 2259 } 2260 2261 if (tb[IFLA_VF_RATE]) { 2262 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2263 2264 err = -EOPNOTSUPP; 2265 if (ops->ndo_set_vf_rate) 2266 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2267 ivt->min_tx_rate, 2268 ivt->max_tx_rate); 2269 if (err < 0) 2270 return err; 2271 } 2272 2273 if (tb[IFLA_VF_SPOOFCHK]) { 2274 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2275 2276 err = -EOPNOTSUPP; 2277 if (ops->ndo_set_vf_spoofchk) 2278 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2279 ivs->setting); 2280 if (err < 0) 2281 return err; 2282 } 2283 2284 if (tb[IFLA_VF_LINK_STATE]) { 2285 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2286 2287 err = -EOPNOTSUPP; 2288 if (ops->ndo_set_vf_link_state) 2289 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2290 ivl->link_state); 2291 if (err < 0) 2292 return err; 2293 } 2294 2295 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2296 struct ifla_vf_rss_query_en *ivrssq_en; 2297 2298 err = -EOPNOTSUPP; 2299 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2300 if (ops->ndo_set_vf_rss_query_en) 2301 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2302 ivrssq_en->setting); 2303 if (err < 0) 2304 return err; 2305 } 2306 2307 if (tb[IFLA_VF_TRUST]) { 2308 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2309 2310 err = -EOPNOTSUPP; 2311 if (ops->ndo_set_vf_trust) 2312 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2313 if (err < 0) 2314 return err; 2315 } 2316 2317 if (tb[IFLA_VF_IB_NODE_GUID]) { 2318 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2319 2320 if (!ops->ndo_set_vf_guid) 2321 return -EOPNOTSUPP; 2322 2323 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2324 } 2325 2326 if (tb[IFLA_VF_IB_PORT_GUID]) { 2327 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2328 2329 if (!ops->ndo_set_vf_guid) 2330 return -EOPNOTSUPP; 2331 2332 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2333 } 2334 2335 return err; 2336 } 2337 2338 static int do_set_master(struct net_device *dev, int ifindex, 2339 struct netlink_ext_ack *extack) 2340 { 2341 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2342 const struct net_device_ops *ops; 2343 int err; 2344 2345 if (upper_dev) { 2346 if (upper_dev->ifindex == ifindex) 2347 return 0; 2348 ops = upper_dev->netdev_ops; 2349 if (ops->ndo_del_slave) { 2350 err = ops->ndo_del_slave(upper_dev, dev); 2351 if (err) 2352 return err; 2353 } else { 2354 return -EOPNOTSUPP; 2355 } 2356 } 2357 2358 if (ifindex) { 2359 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2360 if (!upper_dev) 2361 return -EINVAL; 2362 ops = upper_dev->netdev_ops; 2363 if (ops->ndo_add_slave) { 2364 err = ops->ndo_add_slave(upper_dev, dev, extack); 2365 if (err) 2366 return err; 2367 } else { 2368 return -EOPNOTSUPP; 2369 } 2370 } 2371 return 0; 2372 } 2373 2374 #define DO_SETLINK_MODIFIED 0x01 2375 /* notify flag means notify + modified. */ 2376 #define DO_SETLINK_NOTIFY 0x03 2377 static int do_setlink(const struct sk_buff *skb, 2378 struct net_device *dev, struct ifinfomsg *ifm, 2379 struct netlink_ext_ack *extack, 2380 struct nlattr **tb, char *ifname, int status) 2381 { 2382 const struct net_device_ops *ops = dev->netdev_ops; 2383 int err; 2384 2385 err = validate_linkmsg(dev, tb); 2386 if (err < 0) 2387 return err; 2388 2389 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2390 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev), 2391 tb, CAP_NET_ADMIN); 2392 if (IS_ERR(net)) { 2393 err = PTR_ERR(net); 2394 goto errout; 2395 } 2396 2397 err = dev_change_net_namespace(dev, net, ifname); 2398 put_net(net); 2399 if (err) 2400 goto errout; 2401 status |= DO_SETLINK_MODIFIED; 2402 } 2403 2404 if (tb[IFLA_MAP]) { 2405 struct rtnl_link_ifmap *u_map; 2406 struct ifmap k_map; 2407 2408 if (!ops->ndo_set_config) { 2409 err = -EOPNOTSUPP; 2410 goto errout; 2411 } 2412 2413 if (!netif_device_present(dev)) { 2414 err = -ENODEV; 2415 goto errout; 2416 } 2417 2418 u_map = nla_data(tb[IFLA_MAP]); 2419 k_map.mem_start = (unsigned long) u_map->mem_start; 2420 k_map.mem_end = (unsigned long) u_map->mem_end; 2421 k_map.base_addr = (unsigned short) u_map->base_addr; 2422 k_map.irq = (unsigned char) u_map->irq; 2423 k_map.dma = (unsigned char) u_map->dma; 2424 k_map.port = (unsigned char) u_map->port; 2425 2426 err = ops->ndo_set_config(dev, &k_map); 2427 if (err < 0) 2428 goto errout; 2429 2430 status |= DO_SETLINK_NOTIFY; 2431 } 2432 2433 if (tb[IFLA_ADDRESS]) { 2434 struct sockaddr *sa; 2435 int len; 2436 2437 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2438 sizeof(*sa)); 2439 sa = kmalloc(len, GFP_KERNEL); 2440 if (!sa) { 2441 err = -ENOMEM; 2442 goto errout; 2443 } 2444 sa->sa_family = dev->type; 2445 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2446 dev->addr_len); 2447 err = dev_set_mac_address(dev, sa, extack); 2448 kfree(sa); 2449 if (err) 2450 goto errout; 2451 status |= DO_SETLINK_MODIFIED; 2452 } 2453 2454 if (tb[IFLA_MTU]) { 2455 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2456 if (err < 0) 2457 goto errout; 2458 status |= DO_SETLINK_MODIFIED; 2459 } 2460 2461 if (tb[IFLA_GROUP]) { 2462 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2463 status |= DO_SETLINK_NOTIFY; 2464 } 2465 2466 /* 2467 * Interface selected by interface index but interface 2468 * name provided implies that a name change has been 2469 * requested. 2470 */ 2471 if (ifm->ifi_index > 0 && ifname[0]) { 2472 err = dev_change_name(dev, ifname); 2473 if (err < 0) 2474 goto errout; 2475 status |= DO_SETLINK_MODIFIED; 2476 } 2477 2478 if (tb[IFLA_IFALIAS]) { 2479 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2480 nla_len(tb[IFLA_IFALIAS])); 2481 if (err < 0) 2482 goto errout; 2483 status |= DO_SETLINK_NOTIFY; 2484 } 2485 2486 if (tb[IFLA_BROADCAST]) { 2487 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2488 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2489 } 2490 2491 if (ifm->ifi_flags || ifm->ifi_change) { 2492 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2493 extack); 2494 if (err < 0) 2495 goto errout; 2496 } 2497 2498 if (tb[IFLA_MASTER]) { 2499 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2500 if (err) 2501 goto errout; 2502 status |= DO_SETLINK_MODIFIED; 2503 } 2504 2505 if (tb[IFLA_CARRIER]) { 2506 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2507 if (err) 2508 goto errout; 2509 status |= DO_SETLINK_MODIFIED; 2510 } 2511 2512 if (tb[IFLA_TXQLEN]) { 2513 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2514 2515 err = dev_change_tx_queue_len(dev, value); 2516 if (err) 2517 goto errout; 2518 status |= DO_SETLINK_MODIFIED; 2519 } 2520 2521 if (tb[IFLA_GSO_MAX_SIZE]) { 2522 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2523 2524 if (max_size > GSO_MAX_SIZE) { 2525 err = -EINVAL; 2526 goto errout; 2527 } 2528 2529 if (dev->gso_max_size ^ max_size) { 2530 netif_set_gso_max_size(dev, max_size); 2531 status |= DO_SETLINK_MODIFIED; 2532 } 2533 } 2534 2535 if (tb[IFLA_GSO_MAX_SEGS]) { 2536 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2537 2538 if (max_segs > GSO_MAX_SEGS) { 2539 err = -EINVAL; 2540 goto errout; 2541 } 2542 2543 if (dev->gso_max_segs ^ max_segs) { 2544 dev->gso_max_segs = max_segs; 2545 status |= DO_SETLINK_MODIFIED; 2546 } 2547 } 2548 2549 if (tb[IFLA_OPERSTATE]) 2550 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2551 2552 if (tb[IFLA_LINKMODE]) { 2553 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2554 2555 write_lock_bh(&dev_base_lock); 2556 if (dev->link_mode ^ value) 2557 status |= DO_SETLINK_NOTIFY; 2558 dev->link_mode = value; 2559 write_unlock_bh(&dev_base_lock); 2560 } 2561 2562 if (tb[IFLA_VFINFO_LIST]) { 2563 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2564 struct nlattr *attr; 2565 int rem; 2566 2567 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2568 if (nla_type(attr) != IFLA_VF_INFO || 2569 nla_len(attr) < NLA_HDRLEN) { 2570 err = -EINVAL; 2571 goto errout; 2572 } 2573 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, 2574 ifla_vf_policy, NULL); 2575 if (err < 0) 2576 goto errout; 2577 err = do_setvfinfo(dev, vfinfo); 2578 if (err < 0) 2579 goto errout; 2580 status |= DO_SETLINK_NOTIFY; 2581 } 2582 } 2583 err = 0; 2584 2585 if (tb[IFLA_VF_PORTS]) { 2586 struct nlattr *port[IFLA_PORT_MAX+1]; 2587 struct nlattr *attr; 2588 int vf; 2589 int rem; 2590 2591 err = -EOPNOTSUPP; 2592 if (!ops->ndo_set_vf_port) 2593 goto errout; 2594 2595 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2596 if (nla_type(attr) != IFLA_VF_PORT || 2597 nla_len(attr) < NLA_HDRLEN) { 2598 err = -EINVAL; 2599 goto errout; 2600 } 2601 err = nla_parse_nested(port, IFLA_PORT_MAX, attr, 2602 ifla_port_policy, NULL); 2603 if (err < 0) 2604 goto errout; 2605 if (!port[IFLA_PORT_VF]) { 2606 err = -EOPNOTSUPP; 2607 goto errout; 2608 } 2609 vf = nla_get_u32(port[IFLA_PORT_VF]); 2610 err = ops->ndo_set_vf_port(dev, vf, port); 2611 if (err < 0) 2612 goto errout; 2613 status |= DO_SETLINK_NOTIFY; 2614 } 2615 } 2616 err = 0; 2617 2618 if (tb[IFLA_PORT_SELF]) { 2619 struct nlattr *port[IFLA_PORT_MAX+1]; 2620 2621 err = nla_parse_nested(port, IFLA_PORT_MAX, 2622 tb[IFLA_PORT_SELF], ifla_port_policy, 2623 NULL); 2624 if (err < 0) 2625 goto errout; 2626 2627 err = -EOPNOTSUPP; 2628 if (ops->ndo_set_vf_port) 2629 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 2630 if (err < 0) 2631 goto errout; 2632 status |= DO_SETLINK_NOTIFY; 2633 } 2634 2635 if (tb[IFLA_AF_SPEC]) { 2636 struct nlattr *af; 2637 int rem; 2638 2639 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2640 const struct rtnl_af_ops *af_ops; 2641 2642 rcu_read_lock(); 2643 2644 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 2645 2646 err = af_ops->set_link_af(dev, af); 2647 if (err < 0) { 2648 rcu_read_unlock(); 2649 goto errout; 2650 } 2651 2652 rcu_read_unlock(); 2653 status |= DO_SETLINK_NOTIFY; 2654 } 2655 } 2656 err = 0; 2657 2658 if (tb[IFLA_PROTO_DOWN]) { 2659 err = dev_change_proto_down(dev, 2660 nla_get_u8(tb[IFLA_PROTO_DOWN])); 2661 if (err) 2662 goto errout; 2663 status |= DO_SETLINK_NOTIFY; 2664 } 2665 2666 if (tb[IFLA_XDP]) { 2667 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 2668 u32 xdp_flags = 0; 2669 2670 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP], 2671 ifla_xdp_policy, NULL); 2672 if (err < 0) 2673 goto errout; 2674 2675 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 2676 err = -EINVAL; 2677 goto errout; 2678 } 2679 2680 if (xdp[IFLA_XDP_FLAGS]) { 2681 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 2682 if (xdp_flags & ~XDP_FLAGS_MASK) { 2683 err = -EINVAL; 2684 goto errout; 2685 } 2686 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 2687 err = -EINVAL; 2688 goto errout; 2689 } 2690 } 2691 2692 if (xdp[IFLA_XDP_FD]) { 2693 err = dev_change_xdp_fd(dev, extack, 2694 nla_get_s32(xdp[IFLA_XDP_FD]), 2695 xdp_flags); 2696 if (err) 2697 goto errout; 2698 status |= DO_SETLINK_NOTIFY; 2699 } 2700 } 2701 2702 errout: 2703 if (status & DO_SETLINK_MODIFIED) { 2704 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 2705 netdev_state_change(dev); 2706 2707 if (err < 0) 2708 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 2709 dev->name); 2710 } 2711 2712 return err; 2713 } 2714 2715 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 2716 struct netlink_ext_ack *extack) 2717 { 2718 struct net *net = sock_net(skb->sk); 2719 struct ifinfomsg *ifm; 2720 struct net_device *dev; 2721 int err; 2722 struct nlattr *tb[IFLA_MAX+1]; 2723 char ifname[IFNAMSIZ]; 2724 2725 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, 2726 extack); 2727 if (err < 0) 2728 goto errout; 2729 2730 err = rtnl_ensure_unique_netns(tb, extack, false); 2731 if (err < 0) 2732 goto errout; 2733 2734 if (tb[IFLA_IFNAME]) 2735 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2736 else 2737 ifname[0] = '\0'; 2738 2739 err = -EINVAL; 2740 ifm = nlmsg_data(nlh); 2741 if (ifm->ifi_index > 0) 2742 dev = __dev_get_by_index(net, ifm->ifi_index); 2743 else if (tb[IFLA_IFNAME]) 2744 dev = __dev_get_by_name(net, ifname); 2745 else 2746 goto errout; 2747 2748 if (dev == NULL) { 2749 err = -ENODEV; 2750 goto errout; 2751 } 2752 2753 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0); 2754 errout: 2755 return err; 2756 } 2757 2758 static int rtnl_group_dellink(const struct net *net, int group) 2759 { 2760 struct net_device *dev, *aux; 2761 LIST_HEAD(list_kill); 2762 bool found = false; 2763 2764 if (!group) 2765 return -EPERM; 2766 2767 for_each_netdev(net, dev) { 2768 if (dev->group == group) { 2769 const struct rtnl_link_ops *ops; 2770 2771 found = true; 2772 ops = dev->rtnl_link_ops; 2773 if (!ops || !ops->dellink) 2774 return -EOPNOTSUPP; 2775 } 2776 } 2777 2778 if (!found) 2779 return -ENODEV; 2780 2781 for_each_netdev_safe(net, dev, aux) { 2782 if (dev->group == group) { 2783 const struct rtnl_link_ops *ops; 2784 2785 ops = dev->rtnl_link_ops; 2786 ops->dellink(dev, &list_kill); 2787 } 2788 } 2789 unregister_netdevice_many(&list_kill); 2790 2791 return 0; 2792 } 2793 2794 int rtnl_delete_link(struct net_device *dev) 2795 { 2796 const struct rtnl_link_ops *ops; 2797 LIST_HEAD(list_kill); 2798 2799 ops = dev->rtnl_link_ops; 2800 if (!ops || !ops->dellink) 2801 return -EOPNOTSUPP; 2802 2803 ops->dellink(dev, &list_kill); 2804 unregister_netdevice_many(&list_kill); 2805 2806 return 0; 2807 } 2808 EXPORT_SYMBOL_GPL(rtnl_delete_link); 2809 2810 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 2811 struct netlink_ext_ack *extack) 2812 { 2813 struct net *net = sock_net(skb->sk); 2814 struct net *tgt_net = net; 2815 struct net_device *dev = NULL; 2816 struct ifinfomsg *ifm; 2817 char ifname[IFNAMSIZ]; 2818 struct nlattr *tb[IFLA_MAX+1]; 2819 int err; 2820 int netnsid = -1; 2821 2822 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 2823 if (err < 0) 2824 return err; 2825 2826 err = rtnl_ensure_unique_netns(tb, extack, true); 2827 if (err < 0) 2828 return err; 2829 2830 if (tb[IFLA_IFNAME]) 2831 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2832 2833 if (tb[IFLA_TARGET_NETNSID]) { 2834 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 2835 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 2836 if (IS_ERR(tgt_net)) 2837 return PTR_ERR(tgt_net); 2838 } 2839 2840 err = -EINVAL; 2841 ifm = nlmsg_data(nlh); 2842 if (ifm->ifi_index > 0) 2843 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 2844 else if (tb[IFLA_IFNAME]) 2845 dev = __dev_get_by_name(tgt_net, ifname); 2846 else if (tb[IFLA_GROUP]) 2847 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 2848 else 2849 goto out; 2850 2851 if (!dev) { 2852 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0) 2853 err = -ENODEV; 2854 2855 goto out; 2856 } 2857 2858 err = rtnl_delete_link(dev); 2859 2860 out: 2861 if (netnsid >= 0) 2862 put_net(tgt_net); 2863 2864 return err; 2865 } 2866 2867 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) 2868 { 2869 unsigned int old_flags; 2870 int err; 2871 2872 old_flags = dev->flags; 2873 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 2874 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2875 NULL); 2876 if (err < 0) 2877 return err; 2878 } 2879 2880 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 2881 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); 2882 } else { 2883 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2884 __dev_notify_flags(dev, old_flags, ~0U); 2885 } 2886 return 0; 2887 } 2888 EXPORT_SYMBOL(rtnl_configure_link); 2889 2890 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 2891 unsigned char name_assign_type, 2892 const struct rtnl_link_ops *ops, 2893 struct nlattr *tb[], 2894 struct netlink_ext_ack *extack) 2895 { 2896 struct net_device *dev; 2897 unsigned int num_tx_queues = 1; 2898 unsigned int num_rx_queues = 1; 2899 2900 if (tb[IFLA_NUM_TX_QUEUES]) 2901 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 2902 else if (ops->get_num_tx_queues) 2903 num_tx_queues = ops->get_num_tx_queues(); 2904 2905 if (tb[IFLA_NUM_RX_QUEUES]) 2906 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 2907 else if (ops->get_num_rx_queues) 2908 num_rx_queues = ops->get_num_rx_queues(); 2909 2910 if (num_tx_queues < 1 || num_tx_queues > 4096) { 2911 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 2912 return ERR_PTR(-EINVAL); 2913 } 2914 2915 if (num_rx_queues < 1 || num_rx_queues > 4096) { 2916 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 2917 return ERR_PTR(-EINVAL); 2918 } 2919 2920 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, 2921 ops->setup, num_tx_queues, num_rx_queues); 2922 if (!dev) 2923 return ERR_PTR(-ENOMEM); 2924 2925 dev_net_set(dev, net); 2926 dev->rtnl_link_ops = ops; 2927 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 2928 2929 if (tb[IFLA_MTU]) 2930 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 2931 if (tb[IFLA_ADDRESS]) { 2932 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), 2933 nla_len(tb[IFLA_ADDRESS])); 2934 dev->addr_assign_type = NET_ADDR_SET; 2935 } 2936 if (tb[IFLA_BROADCAST]) 2937 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 2938 nla_len(tb[IFLA_BROADCAST])); 2939 if (tb[IFLA_TXQLEN]) 2940 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 2941 if (tb[IFLA_OPERSTATE]) 2942 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2943 if (tb[IFLA_LINKMODE]) 2944 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 2945 if (tb[IFLA_GROUP]) 2946 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2947 if (tb[IFLA_GSO_MAX_SIZE]) 2948 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 2949 if (tb[IFLA_GSO_MAX_SEGS]) 2950 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2951 2952 return dev; 2953 } 2954 EXPORT_SYMBOL(rtnl_create_link); 2955 2956 static int rtnl_group_changelink(const struct sk_buff *skb, 2957 struct net *net, int group, 2958 struct ifinfomsg *ifm, 2959 struct netlink_ext_ack *extack, 2960 struct nlattr **tb) 2961 { 2962 struct net_device *dev, *aux; 2963 int err; 2964 2965 for_each_netdev_safe(net, dev, aux) { 2966 if (dev->group == group) { 2967 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0); 2968 if (err < 0) 2969 return err; 2970 } 2971 } 2972 2973 return 0; 2974 } 2975 2976 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 2977 struct nlattr **attr, struct netlink_ext_ack *extack) 2978 { 2979 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 2980 unsigned char name_assign_type = NET_NAME_USER; 2981 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 2982 const struct rtnl_link_ops *m_ops = NULL; 2983 struct net_device *master_dev = NULL; 2984 struct net *net = sock_net(skb->sk); 2985 const struct rtnl_link_ops *ops; 2986 struct nlattr *tb[IFLA_MAX + 1]; 2987 struct net *dest_net, *link_net; 2988 struct nlattr **slave_data; 2989 char kind[MODULE_NAME_LEN]; 2990 struct net_device *dev; 2991 struct ifinfomsg *ifm; 2992 char ifname[IFNAMSIZ]; 2993 struct nlattr **data; 2994 int err; 2995 2996 #ifdef CONFIG_MODULES 2997 replay: 2998 #endif 2999 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3000 if (err < 0) 3001 return err; 3002 3003 err = rtnl_ensure_unique_netns(tb, extack, false); 3004 if (err < 0) 3005 return err; 3006 3007 if (tb[IFLA_IFNAME]) 3008 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3009 else 3010 ifname[0] = '\0'; 3011 3012 ifm = nlmsg_data(nlh); 3013 if (ifm->ifi_index > 0) 3014 dev = __dev_get_by_index(net, ifm->ifi_index); 3015 else { 3016 if (ifname[0]) 3017 dev = __dev_get_by_name(net, ifname); 3018 else 3019 dev = NULL; 3020 } 3021 3022 if (dev) { 3023 master_dev = netdev_master_upper_dev_get(dev); 3024 if (master_dev) 3025 m_ops = master_dev->rtnl_link_ops; 3026 } 3027 3028 err = validate_linkmsg(dev, tb); 3029 if (err < 0) 3030 return err; 3031 3032 if (tb[IFLA_LINKINFO]) { 3033 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX, 3034 tb[IFLA_LINKINFO], ifla_info_policy, 3035 NULL); 3036 if (err < 0) 3037 return err; 3038 } else 3039 memset(linkinfo, 0, sizeof(linkinfo)); 3040 3041 if (linkinfo[IFLA_INFO_KIND]) { 3042 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3043 ops = rtnl_link_ops_get(kind); 3044 } else { 3045 kind[0] = '\0'; 3046 ops = NULL; 3047 } 3048 3049 data = NULL; 3050 if (ops) { 3051 if (ops->maxtype > RTNL_MAX_TYPE) 3052 return -EINVAL; 3053 3054 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3055 err = nla_parse_nested(attr, ops->maxtype, 3056 linkinfo[IFLA_INFO_DATA], 3057 ops->policy, extack); 3058 if (err < 0) 3059 return err; 3060 data = attr; 3061 } 3062 if (ops->validate) { 3063 err = ops->validate(tb, data, extack); 3064 if (err < 0) 3065 return err; 3066 } 3067 } 3068 3069 slave_data = NULL; 3070 if (m_ops) { 3071 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3072 return -EINVAL; 3073 3074 if (m_ops->slave_maxtype && 3075 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3076 err = nla_parse_nested(slave_attr, m_ops->slave_maxtype, 3077 linkinfo[IFLA_INFO_SLAVE_DATA], 3078 m_ops->slave_policy, extack); 3079 if (err < 0) 3080 return err; 3081 slave_data = slave_attr; 3082 } 3083 } 3084 3085 if (dev) { 3086 int status = 0; 3087 3088 if (nlh->nlmsg_flags & NLM_F_EXCL) 3089 return -EEXIST; 3090 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3091 return -EOPNOTSUPP; 3092 3093 if (linkinfo[IFLA_INFO_DATA]) { 3094 if (!ops || ops != dev->rtnl_link_ops || 3095 !ops->changelink) 3096 return -EOPNOTSUPP; 3097 3098 err = ops->changelink(dev, tb, data, extack); 3099 if (err < 0) 3100 return err; 3101 status |= DO_SETLINK_NOTIFY; 3102 } 3103 3104 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3105 if (!m_ops || !m_ops->slave_changelink) 3106 return -EOPNOTSUPP; 3107 3108 err = m_ops->slave_changelink(master_dev, dev, tb, 3109 slave_data, extack); 3110 if (err < 0) 3111 return err; 3112 status |= DO_SETLINK_NOTIFY; 3113 } 3114 3115 return do_setlink(skb, dev, ifm, extack, tb, ifname, status); 3116 } 3117 3118 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3119 if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) 3120 return rtnl_group_changelink(skb, net, 3121 nla_get_u32(tb[IFLA_GROUP]), 3122 ifm, extack, tb); 3123 return -ENODEV; 3124 } 3125 3126 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3127 return -EOPNOTSUPP; 3128 3129 if (!ops) { 3130 #ifdef CONFIG_MODULES 3131 if (kind[0]) { 3132 __rtnl_unlock(); 3133 request_module("rtnl-link-%s", kind); 3134 rtnl_lock(); 3135 ops = rtnl_link_ops_get(kind); 3136 if (ops) 3137 goto replay; 3138 } 3139 #endif 3140 NL_SET_ERR_MSG(extack, "Unknown device type"); 3141 return -EOPNOTSUPP; 3142 } 3143 3144 if (!ops->setup) 3145 return -EOPNOTSUPP; 3146 3147 if (!ifname[0]) { 3148 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3149 name_assign_type = NET_NAME_ENUM; 3150 } 3151 3152 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3153 if (IS_ERR(dest_net)) 3154 return PTR_ERR(dest_net); 3155 3156 if (tb[IFLA_LINK_NETNSID]) { 3157 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3158 3159 link_net = get_net_ns_by_id(dest_net, id); 3160 if (!link_net) { 3161 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3162 err = -EINVAL; 3163 goto out; 3164 } 3165 err = -EPERM; 3166 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3167 goto out; 3168 } else { 3169 link_net = NULL; 3170 } 3171 3172 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3173 name_assign_type, ops, tb, extack); 3174 if (IS_ERR(dev)) { 3175 err = PTR_ERR(dev); 3176 goto out; 3177 } 3178 3179 dev->ifindex = ifm->ifi_index; 3180 3181 if (ops->newlink) { 3182 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3183 /* Drivers should call free_netdev() in ->destructor 3184 * and unregister it on failure after registration 3185 * so that device could be finally freed in rtnl_unlock. 3186 */ 3187 if (err < 0) { 3188 /* If device is not registered at all, free it now */ 3189 if (dev->reg_state == NETREG_UNINITIALIZED) 3190 free_netdev(dev); 3191 goto out; 3192 } 3193 } else { 3194 err = register_netdevice(dev); 3195 if (err < 0) { 3196 free_netdev(dev); 3197 goto out; 3198 } 3199 } 3200 err = rtnl_configure_link(dev, ifm); 3201 if (err < 0) 3202 goto out_unregister; 3203 if (link_net) { 3204 err = dev_change_net_namespace(dev, dest_net, ifname); 3205 if (err < 0) 3206 goto out_unregister; 3207 } 3208 if (tb[IFLA_MASTER]) { 3209 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3210 if (err) 3211 goto out_unregister; 3212 } 3213 out: 3214 if (link_net) 3215 put_net(link_net); 3216 put_net(dest_net); 3217 return err; 3218 out_unregister: 3219 if (ops->newlink) { 3220 LIST_HEAD(list_kill); 3221 3222 ops->dellink(dev, &list_kill); 3223 unregister_netdevice_many(&list_kill); 3224 } else { 3225 unregister_netdevice(dev); 3226 } 3227 goto out; 3228 } 3229 3230 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3231 struct netlink_ext_ack *extack) 3232 { 3233 struct nlattr **attr; 3234 int ret; 3235 3236 attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL); 3237 if (!attr) 3238 return -ENOMEM; 3239 3240 ret = __rtnl_newlink(skb, nlh, attr, extack); 3241 kfree(attr); 3242 return ret; 3243 } 3244 3245 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3246 struct netlink_ext_ack *extack) 3247 { 3248 struct net *net = sock_net(skb->sk); 3249 struct net *tgt_net = net; 3250 struct ifinfomsg *ifm; 3251 char ifname[IFNAMSIZ]; 3252 struct nlattr *tb[IFLA_MAX+1]; 3253 struct net_device *dev = NULL; 3254 struct sk_buff *nskb; 3255 int netnsid = -1; 3256 int err; 3257 u32 ext_filter_mask = 0; 3258 3259 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3260 if (err < 0) 3261 return err; 3262 3263 err = rtnl_ensure_unique_netns(tb, extack, true); 3264 if (err < 0) 3265 return err; 3266 3267 if (tb[IFLA_TARGET_NETNSID]) { 3268 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3269 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3270 if (IS_ERR(tgt_net)) 3271 return PTR_ERR(tgt_net); 3272 } 3273 3274 if (tb[IFLA_IFNAME]) 3275 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3276 3277 if (tb[IFLA_EXT_MASK]) 3278 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3279 3280 err = -EINVAL; 3281 ifm = nlmsg_data(nlh); 3282 if (ifm->ifi_index > 0) 3283 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3284 else if (tb[IFLA_IFNAME]) 3285 dev = __dev_get_by_name(tgt_net, ifname); 3286 else 3287 goto out; 3288 3289 err = -ENODEV; 3290 if (dev == NULL) 3291 goto out; 3292 3293 err = -ENOBUFS; 3294 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3295 if (nskb == NULL) 3296 goto out; 3297 3298 err = rtnl_fill_ifinfo(nskb, dev, net, 3299 RTM_NEWLINK, NETLINK_CB(skb).portid, 3300 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3301 0, NULL, 0, netnsid); 3302 if (err < 0) { 3303 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3304 WARN_ON(err == -EMSGSIZE); 3305 kfree_skb(nskb); 3306 } else 3307 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3308 out: 3309 if (netnsid >= 0) 3310 put_net(tgt_net); 3311 3312 return err; 3313 } 3314 3315 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3316 { 3317 struct net *net = sock_net(skb->sk); 3318 struct net_device *dev; 3319 struct nlattr *tb[IFLA_MAX+1]; 3320 u32 ext_filter_mask = 0; 3321 u16 min_ifinfo_dump_size = 0; 3322 int hdrlen; 3323 3324 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3325 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3326 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3327 3328 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3329 if (tb[IFLA_EXT_MASK]) 3330 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3331 } 3332 3333 if (!ext_filter_mask) 3334 return NLMSG_GOODSIZE; 3335 /* 3336 * traverse the list of net devices and compute the minimum 3337 * buffer size based upon the filter mask. 3338 */ 3339 rcu_read_lock(); 3340 for_each_netdev_rcu(net, dev) { 3341 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size, 3342 if_nlmsg_size(dev, 3343 ext_filter_mask)); 3344 } 3345 rcu_read_unlock(); 3346 3347 return nlmsg_total_size(min_ifinfo_dump_size); 3348 } 3349 3350 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3351 { 3352 int idx; 3353 int s_idx = cb->family; 3354 int type = cb->nlh->nlmsg_type - RTM_BASE; 3355 int ret = 0; 3356 3357 if (s_idx == 0) 3358 s_idx = 1; 3359 3360 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3361 struct rtnl_link **tab; 3362 struct rtnl_link *link; 3363 rtnl_dumpit_func dumpit; 3364 3365 if (idx < s_idx || idx == PF_PACKET) 3366 continue; 3367 3368 if (type < 0 || type >= RTM_NR_MSGTYPES) 3369 continue; 3370 3371 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 3372 if (!tab) 3373 continue; 3374 3375 link = tab[type]; 3376 if (!link) 3377 continue; 3378 3379 dumpit = link->dumpit; 3380 if (!dumpit) 3381 continue; 3382 3383 if (idx > s_idx) { 3384 memset(&cb->args[0], 0, sizeof(cb->args)); 3385 cb->prev_seq = 0; 3386 cb->seq = 0; 3387 } 3388 ret = dumpit(skb, cb); 3389 if (ret) 3390 break; 3391 } 3392 cb->family = idx; 3393 3394 return skb->len ? : ret; 3395 } 3396 3397 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 3398 unsigned int change, 3399 u32 event, gfp_t flags, int *new_nsid, 3400 int new_ifindex) 3401 { 3402 struct net *net = dev_net(dev); 3403 struct sk_buff *skb; 3404 int err = -ENOBUFS; 3405 size_t if_info_size; 3406 3407 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags); 3408 if (skb == NULL) 3409 goto errout; 3410 3411 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3412 type, 0, 0, change, 0, 0, event, 3413 new_nsid, new_ifindex, -1); 3414 if (err < 0) { 3415 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3416 WARN_ON(err == -EMSGSIZE); 3417 kfree_skb(skb); 3418 goto errout; 3419 } 3420 return skb; 3421 errout: 3422 if (err < 0) 3423 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 3424 return NULL; 3425 } 3426 3427 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags) 3428 { 3429 struct net *net = dev_net(dev); 3430 3431 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); 3432 } 3433 3434 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 3435 unsigned int change, u32 event, 3436 gfp_t flags, int *new_nsid, int new_ifindex) 3437 { 3438 struct sk_buff *skb; 3439 3440 if (dev->reg_state != NETREG_REGISTERED) 3441 return; 3442 3443 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 3444 new_ifindex); 3445 if (skb) 3446 rtmsg_ifinfo_send(skb, dev, flags); 3447 } 3448 3449 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 3450 gfp_t flags) 3451 { 3452 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3453 NULL, 0); 3454 } 3455 3456 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 3457 gfp_t flags, int *new_nsid, int new_ifindex) 3458 { 3459 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3460 new_nsid, new_ifindex); 3461 } 3462 3463 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 3464 struct net_device *dev, 3465 u8 *addr, u16 vid, u32 pid, u32 seq, 3466 int type, unsigned int flags, 3467 int nlflags, u16 ndm_state) 3468 { 3469 struct nlmsghdr *nlh; 3470 struct ndmsg *ndm; 3471 3472 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 3473 if (!nlh) 3474 return -EMSGSIZE; 3475 3476 ndm = nlmsg_data(nlh); 3477 ndm->ndm_family = AF_BRIDGE; 3478 ndm->ndm_pad1 = 0; 3479 ndm->ndm_pad2 = 0; 3480 ndm->ndm_flags = flags; 3481 ndm->ndm_type = 0; 3482 ndm->ndm_ifindex = dev->ifindex; 3483 ndm->ndm_state = ndm_state; 3484 3485 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) 3486 goto nla_put_failure; 3487 if (vid) 3488 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 3489 goto nla_put_failure; 3490 3491 nlmsg_end(skb, nlh); 3492 return 0; 3493 3494 nla_put_failure: 3495 nlmsg_cancel(skb, nlh); 3496 return -EMSGSIZE; 3497 } 3498 3499 static inline size_t rtnl_fdb_nlmsg_size(void) 3500 { 3501 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 3502 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 3503 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 3504 0; 3505 } 3506 3507 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 3508 u16 ndm_state) 3509 { 3510 struct net *net = dev_net(dev); 3511 struct sk_buff *skb; 3512 int err = -ENOBUFS; 3513 3514 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); 3515 if (!skb) 3516 goto errout; 3517 3518 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 3519 0, 0, type, NTF_SELF, 0, ndm_state); 3520 if (err < 0) { 3521 kfree_skb(skb); 3522 goto errout; 3523 } 3524 3525 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3526 return; 3527 errout: 3528 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3529 } 3530 3531 /** 3532 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 3533 */ 3534 int ndo_dflt_fdb_add(struct ndmsg *ndm, 3535 struct nlattr *tb[], 3536 struct net_device *dev, 3537 const unsigned char *addr, u16 vid, 3538 u16 flags) 3539 { 3540 int err = -EINVAL; 3541 3542 /* If aging addresses are supported device will need to 3543 * implement its own handler for this. 3544 */ 3545 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 3546 pr_info("%s: FDB only supports static addresses\n", dev->name); 3547 return err; 3548 } 3549 3550 if (vid) { 3551 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 3552 return err; 3553 } 3554 3555 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 3556 err = dev_uc_add_excl(dev, addr); 3557 else if (is_multicast_ether_addr(addr)) 3558 err = dev_mc_add_excl(dev, addr); 3559 3560 /* Only return duplicate errors if NLM_F_EXCL is set */ 3561 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 3562 err = 0; 3563 3564 return err; 3565 } 3566 EXPORT_SYMBOL(ndo_dflt_fdb_add); 3567 3568 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 3569 struct netlink_ext_ack *extack) 3570 { 3571 u16 vid = 0; 3572 3573 if (vlan_attr) { 3574 if (nla_len(vlan_attr) != sizeof(u16)) { 3575 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 3576 return -EINVAL; 3577 } 3578 3579 vid = nla_get_u16(vlan_attr); 3580 3581 if (!vid || vid >= VLAN_VID_MASK) { 3582 NL_SET_ERR_MSG(extack, "invalid vlan id"); 3583 return -EINVAL; 3584 } 3585 } 3586 *p_vid = vid; 3587 return 0; 3588 } 3589 3590 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 3591 struct netlink_ext_ack *extack) 3592 { 3593 struct net *net = sock_net(skb->sk); 3594 struct ndmsg *ndm; 3595 struct nlattr *tb[NDA_MAX+1]; 3596 struct net_device *dev; 3597 u8 *addr; 3598 u16 vid; 3599 int err; 3600 3601 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 3602 if (err < 0) 3603 return err; 3604 3605 ndm = nlmsg_data(nlh); 3606 if (ndm->ndm_ifindex == 0) { 3607 NL_SET_ERR_MSG(extack, "invalid ifindex"); 3608 return -EINVAL; 3609 } 3610 3611 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 3612 if (dev == NULL) { 3613 NL_SET_ERR_MSG(extack, "unknown ifindex"); 3614 return -ENODEV; 3615 } 3616 3617 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 3618 NL_SET_ERR_MSG(extack, "invalid address"); 3619 return -EINVAL; 3620 } 3621 3622 if (dev->type != ARPHRD_ETHER) { 3623 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 3624 return -EINVAL; 3625 } 3626 3627 addr = nla_data(tb[NDA_LLADDR]); 3628 3629 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 3630 if (err) 3631 return err; 3632 3633 err = -EOPNOTSUPP; 3634 3635 /* Support fdb on master device the net/bridge default case */ 3636 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 3637 (dev->priv_flags & IFF_BRIDGE_PORT)) { 3638 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3639 const struct net_device_ops *ops = br_dev->netdev_ops; 3640 3641 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 3642 nlh->nlmsg_flags); 3643 if (err) 3644 goto out; 3645 else 3646 ndm->ndm_flags &= ~NTF_MASTER; 3647 } 3648 3649 /* Embedded bridge, macvlan, and any other device support */ 3650 if ((ndm->ndm_flags & NTF_SELF)) { 3651 if (dev->netdev_ops->ndo_fdb_add) 3652 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 3653 vid, 3654 nlh->nlmsg_flags); 3655 else 3656 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 3657 nlh->nlmsg_flags); 3658 3659 if (!err) { 3660 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 3661 ndm->ndm_state); 3662 ndm->ndm_flags &= ~NTF_SELF; 3663 } 3664 } 3665 out: 3666 return err; 3667 } 3668 3669 /** 3670 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 3671 */ 3672 int ndo_dflt_fdb_del(struct ndmsg *ndm, 3673 struct nlattr *tb[], 3674 struct net_device *dev, 3675 const unsigned char *addr, u16 vid) 3676 { 3677 int err = -EINVAL; 3678 3679 /* If aging addresses are supported device will need to 3680 * implement its own handler for this. 3681 */ 3682 if (!(ndm->ndm_state & NUD_PERMANENT)) { 3683 pr_info("%s: FDB only supports static addresses\n", dev->name); 3684 return err; 3685 } 3686 3687 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 3688 err = dev_uc_del(dev, addr); 3689 else if (is_multicast_ether_addr(addr)) 3690 err = dev_mc_del(dev, addr); 3691 3692 return err; 3693 } 3694 EXPORT_SYMBOL(ndo_dflt_fdb_del); 3695 3696 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 3697 struct netlink_ext_ack *extack) 3698 { 3699 struct net *net = sock_net(skb->sk); 3700 struct ndmsg *ndm; 3701 struct nlattr *tb[NDA_MAX+1]; 3702 struct net_device *dev; 3703 int err = -EINVAL; 3704 __u8 *addr; 3705 u16 vid; 3706 3707 if (!netlink_capable(skb, CAP_NET_ADMIN)) 3708 return -EPERM; 3709 3710 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 3711 if (err < 0) 3712 return err; 3713 3714 ndm = nlmsg_data(nlh); 3715 if (ndm->ndm_ifindex == 0) { 3716 NL_SET_ERR_MSG(extack, "invalid ifindex"); 3717 return -EINVAL; 3718 } 3719 3720 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 3721 if (dev == NULL) { 3722 NL_SET_ERR_MSG(extack, "unknown ifindex"); 3723 return -ENODEV; 3724 } 3725 3726 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 3727 NL_SET_ERR_MSG(extack, "invalid address"); 3728 return -EINVAL; 3729 } 3730 3731 if (dev->type != ARPHRD_ETHER) { 3732 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 3733 return -EINVAL; 3734 } 3735 3736 addr = nla_data(tb[NDA_LLADDR]); 3737 3738 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 3739 if (err) 3740 return err; 3741 3742 err = -EOPNOTSUPP; 3743 3744 /* Support fdb on master device the net/bridge default case */ 3745 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 3746 (dev->priv_flags & IFF_BRIDGE_PORT)) { 3747 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3748 const struct net_device_ops *ops = br_dev->netdev_ops; 3749 3750 if (ops->ndo_fdb_del) 3751 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); 3752 3753 if (err) 3754 goto out; 3755 else 3756 ndm->ndm_flags &= ~NTF_MASTER; 3757 } 3758 3759 /* Embedded bridge, macvlan, and any other device support */ 3760 if (ndm->ndm_flags & NTF_SELF) { 3761 if (dev->netdev_ops->ndo_fdb_del) 3762 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr, 3763 vid); 3764 else 3765 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 3766 3767 if (!err) { 3768 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 3769 ndm->ndm_state); 3770 ndm->ndm_flags &= ~NTF_SELF; 3771 } 3772 } 3773 out: 3774 return err; 3775 } 3776 3777 static int nlmsg_populate_fdb(struct sk_buff *skb, 3778 struct netlink_callback *cb, 3779 struct net_device *dev, 3780 int *idx, 3781 struct netdev_hw_addr_list *list) 3782 { 3783 struct netdev_hw_addr *ha; 3784 int err; 3785 u32 portid, seq; 3786 3787 portid = NETLINK_CB(cb->skb).portid; 3788 seq = cb->nlh->nlmsg_seq; 3789 3790 list_for_each_entry(ha, &list->list, list) { 3791 if (*idx < cb->args[2]) 3792 goto skip; 3793 3794 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 3795 portid, seq, 3796 RTM_NEWNEIGH, NTF_SELF, 3797 NLM_F_MULTI, NUD_PERMANENT); 3798 if (err < 0) 3799 return err; 3800 skip: 3801 *idx += 1; 3802 } 3803 return 0; 3804 } 3805 3806 /** 3807 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 3808 * @nlh: netlink message header 3809 * @dev: netdevice 3810 * 3811 * Default netdevice operation to dump the existing unicast address list. 3812 * Returns number of addresses from list put in skb. 3813 */ 3814 int ndo_dflt_fdb_dump(struct sk_buff *skb, 3815 struct netlink_callback *cb, 3816 struct net_device *dev, 3817 struct net_device *filter_dev, 3818 int *idx) 3819 { 3820 int err; 3821 3822 if (dev->type != ARPHRD_ETHER) 3823 return -EINVAL; 3824 3825 netif_addr_lock_bh(dev); 3826 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 3827 if (err) 3828 goto out; 3829 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 3830 out: 3831 netif_addr_unlock_bh(dev); 3832 return err; 3833 } 3834 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 3835 3836 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 3837 int *br_idx, int *brport_idx, 3838 struct netlink_ext_ack *extack) 3839 { 3840 struct nlattr *tb[NDA_MAX + 1]; 3841 struct ndmsg *ndm; 3842 int err, i; 3843 3844 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 3845 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 3846 return -EINVAL; 3847 } 3848 3849 ndm = nlmsg_data(nlh); 3850 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 3851 ndm->ndm_flags || ndm->ndm_type) { 3852 NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request"); 3853 return -EINVAL; 3854 } 3855 3856 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX, 3857 NULL, extack); 3858 if (err < 0) 3859 return err; 3860 3861 *brport_idx = ndm->ndm_ifindex; 3862 for (i = 0; i <= NDA_MAX; ++i) { 3863 if (!tb[i]) 3864 continue; 3865 3866 switch (i) { 3867 case NDA_IFINDEX: 3868 if (nla_len(tb[i]) != sizeof(u32)) { 3869 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 3870 return -EINVAL; 3871 } 3872 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 3873 break; 3874 case NDA_MASTER: 3875 if (nla_len(tb[i]) != sizeof(u32)) { 3876 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 3877 return -EINVAL; 3878 } 3879 *br_idx = nla_get_u32(tb[NDA_MASTER]); 3880 break; 3881 default: 3882 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 3883 return -EINVAL; 3884 } 3885 } 3886 3887 return 0; 3888 } 3889 3890 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 3891 int *br_idx, int *brport_idx, 3892 struct netlink_ext_ack *extack) 3893 { 3894 struct nlattr *tb[IFLA_MAX+1]; 3895 int err; 3896 3897 /* A hack to preserve kernel<->userspace interface. 3898 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 3899 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 3900 * So, check for ndmsg with an optional u32 attribute (not used here). 3901 * Fortunately these sizes don't conflict with the size of ifinfomsg 3902 * with an optional attribute. 3903 */ 3904 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 3905 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 3906 nla_attr_size(sizeof(u32)))) { 3907 struct ifinfomsg *ifm; 3908 3909 err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, 3910 ifla_policy, extack); 3911 if (err < 0) { 3912 return -EINVAL; 3913 } else if (err == 0) { 3914 if (tb[IFLA_MASTER]) 3915 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 3916 } 3917 3918 ifm = nlmsg_data(nlh); 3919 *brport_idx = ifm->ifi_index; 3920 } 3921 return 0; 3922 } 3923 3924 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 3925 { 3926 struct net_device *dev; 3927 struct net_device *br_dev = NULL; 3928 const struct net_device_ops *ops = NULL; 3929 const struct net_device_ops *cops = NULL; 3930 struct net *net = sock_net(skb->sk); 3931 struct hlist_head *head; 3932 int brport_idx = 0; 3933 int br_idx = 0; 3934 int h, s_h; 3935 int idx = 0, s_idx; 3936 int err = 0; 3937 int fidx = 0; 3938 3939 if (cb->strict_check) 3940 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 3941 cb->extack); 3942 else 3943 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 3944 cb->extack); 3945 if (err < 0) 3946 return err; 3947 3948 if (br_idx) { 3949 br_dev = __dev_get_by_index(net, br_idx); 3950 if (!br_dev) 3951 return -ENODEV; 3952 3953 ops = br_dev->netdev_ops; 3954 } 3955 3956 s_h = cb->args[0]; 3957 s_idx = cb->args[1]; 3958 3959 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 3960 idx = 0; 3961 head = &net->dev_index_head[h]; 3962 hlist_for_each_entry(dev, head, index_hlist) { 3963 3964 if (brport_idx && (dev->ifindex != brport_idx)) 3965 continue; 3966 3967 if (!br_idx) { /* user did not specify a specific bridge */ 3968 if (dev->priv_flags & IFF_BRIDGE_PORT) { 3969 br_dev = netdev_master_upper_dev_get(dev); 3970 cops = br_dev->netdev_ops; 3971 } 3972 } else { 3973 if (dev != br_dev && 3974 !(dev->priv_flags & IFF_BRIDGE_PORT)) 3975 continue; 3976 3977 if (br_dev != netdev_master_upper_dev_get(dev) && 3978 !(dev->priv_flags & IFF_EBRIDGE)) 3979 continue; 3980 cops = ops; 3981 } 3982 3983 if (idx < s_idx) 3984 goto cont; 3985 3986 if (dev->priv_flags & IFF_BRIDGE_PORT) { 3987 if (cops && cops->ndo_fdb_dump) { 3988 err = cops->ndo_fdb_dump(skb, cb, 3989 br_dev, dev, 3990 &fidx); 3991 if (err == -EMSGSIZE) 3992 goto out; 3993 } 3994 } 3995 3996 if (dev->netdev_ops->ndo_fdb_dump) 3997 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 3998 dev, NULL, 3999 &fidx); 4000 else 4001 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4002 &fidx); 4003 if (err == -EMSGSIZE) 4004 goto out; 4005 4006 cops = NULL; 4007 4008 /* reset fdb offset to 0 for rest of the interfaces */ 4009 cb->args[2] = 0; 4010 fidx = 0; 4011 cont: 4012 idx++; 4013 } 4014 } 4015 4016 out: 4017 cb->args[0] = h; 4018 cb->args[1] = idx; 4019 cb->args[2] = fidx; 4020 4021 return skb->len; 4022 } 4023 4024 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4025 struct nlattr **tb, u8 *ndm_flags, 4026 int *br_idx, int *brport_idx, u8 **addr, 4027 u16 *vid, struct netlink_ext_ack *extack) 4028 { 4029 struct ndmsg *ndm; 4030 int err, i; 4031 4032 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4033 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4034 return -EINVAL; 4035 } 4036 4037 ndm = nlmsg_data(nlh); 4038 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4039 ndm->ndm_type) { 4040 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4041 return -EINVAL; 4042 } 4043 4044 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4045 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4046 return -EINVAL; 4047 } 4048 4049 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX, 4050 nda_policy, extack); 4051 if (err < 0) 4052 return err; 4053 4054 *ndm_flags = ndm->ndm_flags; 4055 *brport_idx = ndm->ndm_ifindex; 4056 for (i = 0; i <= NDA_MAX; ++i) { 4057 if (!tb[i]) 4058 continue; 4059 4060 switch (i) { 4061 case NDA_MASTER: 4062 *br_idx = nla_get_u32(tb[i]); 4063 break; 4064 case NDA_LLADDR: 4065 if (nla_len(tb[i]) != ETH_ALEN) { 4066 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4067 return -EINVAL; 4068 } 4069 *addr = nla_data(tb[i]); 4070 break; 4071 case NDA_VLAN: 4072 err = fdb_vid_parse(tb[i], vid, extack); 4073 if (err) 4074 return err; 4075 break; 4076 case NDA_VNI: 4077 break; 4078 default: 4079 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4080 return -EINVAL; 4081 } 4082 } 4083 4084 return 0; 4085 } 4086 4087 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4088 struct netlink_ext_ack *extack) 4089 { 4090 struct net_device *dev = NULL, *br_dev = NULL; 4091 const struct net_device_ops *ops = NULL; 4092 struct net *net = sock_net(in_skb->sk); 4093 struct nlattr *tb[NDA_MAX + 1]; 4094 struct sk_buff *skb; 4095 int brport_idx = 0; 4096 u8 ndm_flags = 0; 4097 int br_idx = 0; 4098 u8 *addr = NULL; 4099 u16 vid = 0; 4100 int err; 4101 4102 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4103 &brport_idx, &addr, &vid, extack); 4104 if (err < 0) 4105 return err; 4106 4107 if (!addr) { 4108 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4109 return -EINVAL; 4110 } 4111 4112 if (brport_idx) { 4113 dev = __dev_get_by_index(net, brport_idx); 4114 if (!dev) { 4115 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4116 return -ENODEV; 4117 } 4118 } 4119 4120 if (br_idx) { 4121 if (dev) { 4122 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4123 return -EINVAL; 4124 } 4125 4126 br_dev = __dev_get_by_index(net, br_idx); 4127 if (!br_dev) { 4128 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4129 return -EINVAL; 4130 } 4131 ops = br_dev->netdev_ops; 4132 } 4133 4134 if (dev) { 4135 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4136 if (!(dev->priv_flags & IFF_BRIDGE_PORT)) { 4137 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4138 return -EINVAL; 4139 } 4140 br_dev = netdev_master_upper_dev_get(dev); 4141 if (!br_dev) { 4142 NL_SET_ERR_MSG(extack, "Master of device not found"); 4143 return -EINVAL; 4144 } 4145 ops = br_dev->netdev_ops; 4146 } else { 4147 if (!(ndm_flags & NTF_SELF)) { 4148 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4149 return -EINVAL; 4150 } 4151 ops = dev->netdev_ops; 4152 } 4153 } 4154 4155 if (!br_dev && !dev) { 4156 NL_SET_ERR_MSG(extack, "No device specified"); 4157 return -ENODEV; 4158 } 4159 4160 if (!ops || !ops->ndo_fdb_get) { 4161 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4162 return -EOPNOTSUPP; 4163 } 4164 4165 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4166 if (!skb) 4167 return -ENOBUFS; 4168 4169 if (br_dev) 4170 dev = br_dev; 4171 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4172 NETLINK_CB(in_skb).portid, 4173 nlh->nlmsg_seq, extack); 4174 if (err) 4175 goto out; 4176 4177 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4178 out: 4179 kfree_skb(skb); 4180 return err; 4181 } 4182 4183 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4184 unsigned int attrnum, unsigned int flag) 4185 { 4186 if (mask & flag) 4187 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4188 return 0; 4189 } 4190 4191 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4192 struct net_device *dev, u16 mode, 4193 u32 flags, u32 mask, int nlflags, 4194 u32 filter_mask, 4195 int (*vlan_fill)(struct sk_buff *skb, 4196 struct net_device *dev, 4197 u32 filter_mask)) 4198 { 4199 struct nlmsghdr *nlh; 4200 struct ifinfomsg *ifm; 4201 struct nlattr *br_afspec; 4202 struct nlattr *protinfo; 4203 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4204 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4205 int err = 0; 4206 4207 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4208 if (nlh == NULL) 4209 return -EMSGSIZE; 4210 4211 ifm = nlmsg_data(nlh); 4212 ifm->ifi_family = AF_BRIDGE; 4213 ifm->__ifi_pad = 0; 4214 ifm->ifi_type = dev->type; 4215 ifm->ifi_index = dev->ifindex; 4216 ifm->ifi_flags = dev_get_flags(dev); 4217 ifm->ifi_change = 0; 4218 4219 4220 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4221 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4222 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4223 (br_dev && 4224 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4225 (dev->addr_len && 4226 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4227 (dev->ifindex != dev_get_iflink(dev) && 4228 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4229 goto nla_put_failure; 4230 4231 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC); 4232 if (!br_afspec) 4233 goto nla_put_failure; 4234 4235 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4236 nla_nest_cancel(skb, br_afspec); 4237 goto nla_put_failure; 4238 } 4239 4240 if (mode != BRIDGE_MODE_UNDEF) { 4241 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4242 nla_nest_cancel(skb, br_afspec); 4243 goto nla_put_failure; 4244 } 4245 } 4246 if (vlan_fill) { 4247 err = vlan_fill(skb, dev, filter_mask); 4248 if (err) { 4249 nla_nest_cancel(skb, br_afspec); 4250 goto nla_put_failure; 4251 } 4252 } 4253 nla_nest_end(skb, br_afspec); 4254 4255 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 4256 if (!protinfo) 4257 goto nla_put_failure; 4258 4259 if (brport_nla_put_flag(skb, flags, mask, 4260 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4261 brport_nla_put_flag(skb, flags, mask, 4262 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4263 brport_nla_put_flag(skb, flags, mask, 4264 IFLA_BRPORT_FAST_LEAVE, 4265 BR_MULTICAST_FAST_LEAVE) || 4266 brport_nla_put_flag(skb, flags, mask, 4267 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4268 brport_nla_put_flag(skb, flags, mask, 4269 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4270 brport_nla_put_flag(skb, flags, mask, 4271 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4272 brport_nla_put_flag(skb, flags, mask, 4273 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4274 brport_nla_put_flag(skb, flags, mask, 4275 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) { 4276 nla_nest_cancel(skb, protinfo); 4277 goto nla_put_failure; 4278 } 4279 4280 nla_nest_end(skb, protinfo); 4281 4282 nlmsg_end(skb, nlh); 4283 return 0; 4284 nla_put_failure: 4285 nlmsg_cancel(skb, nlh); 4286 return err ? err : -EMSGSIZE; 4287 } 4288 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4289 4290 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4291 bool strict_check, u32 *filter_mask, 4292 struct netlink_ext_ack *extack) 4293 { 4294 struct nlattr *tb[IFLA_MAX+1]; 4295 int err, i; 4296 4297 if (strict_check) { 4298 struct ifinfomsg *ifm; 4299 4300 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4301 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4302 return -EINVAL; 4303 } 4304 4305 ifm = nlmsg_data(nlh); 4306 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4307 ifm->ifi_change || ifm->ifi_index) { 4308 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4309 return -EINVAL; 4310 } 4311 4312 err = nlmsg_parse_strict(nlh, sizeof(struct ifinfomsg), tb, 4313 IFLA_MAX, ifla_policy, extack); 4314 } else { 4315 err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, 4316 IFLA_MAX, ifla_policy, extack); 4317 } 4318 if (err < 0) 4319 return err; 4320 4321 /* new attributes should only be added with strict checking */ 4322 for (i = 0; i <= IFLA_MAX; ++i) { 4323 if (!tb[i]) 4324 continue; 4325 4326 switch (i) { 4327 case IFLA_EXT_MASK: 4328 *filter_mask = nla_get_u32(tb[i]); 4329 break; 4330 default: 4331 if (strict_check) { 4332 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 4333 return -EINVAL; 4334 } 4335 } 4336 } 4337 4338 return 0; 4339 } 4340 4341 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 4342 { 4343 const struct nlmsghdr *nlh = cb->nlh; 4344 struct net *net = sock_net(skb->sk); 4345 struct net_device *dev; 4346 int idx = 0; 4347 u32 portid = NETLINK_CB(cb->skb).portid; 4348 u32 seq = nlh->nlmsg_seq; 4349 u32 filter_mask = 0; 4350 int err; 4351 4352 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 4353 cb->extack); 4354 if (err < 0 && cb->strict_check) 4355 return err; 4356 4357 rcu_read_lock(); 4358 for_each_netdev_rcu(net, dev) { 4359 const struct net_device_ops *ops = dev->netdev_ops; 4360 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4361 4362 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 4363 if (idx >= cb->args[0]) { 4364 err = br_dev->netdev_ops->ndo_bridge_getlink( 4365 skb, portid, seq, dev, 4366 filter_mask, NLM_F_MULTI); 4367 if (err < 0 && err != -EOPNOTSUPP) { 4368 if (likely(skb->len)) 4369 break; 4370 4371 goto out_err; 4372 } 4373 } 4374 idx++; 4375 } 4376 4377 if (ops->ndo_bridge_getlink) { 4378 if (idx >= cb->args[0]) { 4379 err = ops->ndo_bridge_getlink(skb, portid, 4380 seq, dev, 4381 filter_mask, 4382 NLM_F_MULTI); 4383 if (err < 0 && err != -EOPNOTSUPP) { 4384 if (likely(skb->len)) 4385 break; 4386 4387 goto out_err; 4388 } 4389 } 4390 idx++; 4391 } 4392 } 4393 err = skb->len; 4394 out_err: 4395 rcu_read_unlock(); 4396 cb->args[0] = idx; 4397 4398 return err; 4399 } 4400 4401 static inline size_t bridge_nlmsg_size(void) 4402 { 4403 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 4404 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 4405 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 4406 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 4407 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 4408 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 4409 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 4410 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 4411 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 4412 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 4413 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 4414 } 4415 4416 static int rtnl_bridge_notify(struct net_device *dev) 4417 { 4418 struct net *net = dev_net(dev); 4419 struct sk_buff *skb; 4420 int err = -EOPNOTSUPP; 4421 4422 if (!dev->netdev_ops->ndo_bridge_getlink) 4423 return 0; 4424 4425 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 4426 if (!skb) { 4427 err = -ENOMEM; 4428 goto errout; 4429 } 4430 4431 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 4432 if (err < 0) 4433 goto errout; 4434 4435 if (!skb->len) 4436 goto errout; 4437 4438 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 4439 return 0; 4440 errout: 4441 WARN_ON(err == -EMSGSIZE); 4442 kfree_skb(skb); 4443 if (err) 4444 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4445 return err; 4446 } 4447 4448 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 4449 struct netlink_ext_ack *extack) 4450 { 4451 struct net *net = sock_net(skb->sk); 4452 struct ifinfomsg *ifm; 4453 struct net_device *dev; 4454 struct nlattr *br_spec, *attr = NULL; 4455 int rem, err = -EOPNOTSUPP; 4456 u16 flags = 0; 4457 bool have_flags = false; 4458 4459 if (nlmsg_len(nlh) < sizeof(*ifm)) 4460 return -EINVAL; 4461 4462 ifm = nlmsg_data(nlh); 4463 if (ifm->ifi_family != AF_BRIDGE) 4464 return -EPFNOSUPPORT; 4465 4466 dev = __dev_get_by_index(net, ifm->ifi_index); 4467 if (!dev) { 4468 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4469 return -ENODEV; 4470 } 4471 4472 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4473 if (br_spec) { 4474 nla_for_each_nested(attr, br_spec, rem) { 4475 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 4476 if (nla_len(attr) < sizeof(flags)) 4477 return -EINVAL; 4478 4479 have_flags = true; 4480 flags = nla_get_u16(attr); 4481 break; 4482 } 4483 } 4484 } 4485 4486 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 4487 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4488 4489 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 4490 err = -EOPNOTSUPP; 4491 goto out; 4492 } 4493 4494 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 4495 extack); 4496 if (err) 4497 goto out; 4498 4499 flags &= ~BRIDGE_FLAGS_MASTER; 4500 } 4501 4502 if ((flags & BRIDGE_FLAGS_SELF)) { 4503 if (!dev->netdev_ops->ndo_bridge_setlink) 4504 err = -EOPNOTSUPP; 4505 else 4506 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 4507 flags, 4508 extack); 4509 if (!err) { 4510 flags &= ~BRIDGE_FLAGS_SELF; 4511 4512 /* Generate event to notify upper layer of bridge 4513 * change 4514 */ 4515 err = rtnl_bridge_notify(dev); 4516 } 4517 } 4518 4519 if (have_flags) 4520 memcpy(nla_data(attr), &flags, sizeof(flags)); 4521 out: 4522 return err; 4523 } 4524 4525 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 4526 struct netlink_ext_ack *extack) 4527 { 4528 struct net *net = sock_net(skb->sk); 4529 struct ifinfomsg *ifm; 4530 struct net_device *dev; 4531 struct nlattr *br_spec, *attr = NULL; 4532 int rem, err = -EOPNOTSUPP; 4533 u16 flags = 0; 4534 bool have_flags = false; 4535 4536 if (nlmsg_len(nlh) < sizeof(*ifm)) 4537 return -EINVAL; 4538 4539 ifm = nlmsg_data(nlh); 4540 if (ifm->ifi_family != AF_BRIDGE) 4541 return -EPFNOSUPPORT; 4542 4543 dev = __dev_get_by_index(net, ifm->ifi_index); 4544 if (!dev) { 4545 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4546 return -ENODEV; 4547 } 4548 4549 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4550 if (br_spec) { 4551 nla_for_each_nested(attr, br_spec, rem) { 4552 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 4553 if (nla_len(attr) < sizeof(flags)) 4554 return -EINVAL; 4555 4556 have_flags = true; 4557 flags = nla_get_u16(attr); 4558 break; 4559 } 4560 } 4561 } 4562 4563 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 4564 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4565 4566 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 4567 err = -EOPNOTSUPP; 4568 goto out; 4569 } 4570 4571 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 4572 if (err) 4573 goto out; 4574 4575 flags &= ~BRIDGE_FLAGS_MASTER; 4576 } 4577 4578 if ((flags & BRIDGE_FLAGS_SELF)) { 4579 if (!dev->netdev_ops->ndo_bridge_dellink) 4580 err = -EOPNOTSUPP; 4581 else 4582 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 4583 flags); 4584 4585 if (!err) { 4586 flags &= ~BRIDGE_FLAGS_SELF; 4587 4588 /* Generate event to notify upper layer of bridge 4589 * change 4590 */ 4591 err = rtnl_bridge_notify(dev); 4592 } 4593 } 4594 4595 if (have_flags) 4596 memcpy(nla_data(attr), &flags, sizeof(flags)); 4597 out: 4598 return err; 4599 } 4600 4601 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 4602 { 4603 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 4604 (!idxattr || idxattr == attrid); 4605 } 4606 4607 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1) 4608 static int rtnl_get_offload_stats_attr_size(int attr_id) 4609 { 4610 switch (attr_id) { 4611 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 4612 return sizeof(struct rtnl_link_stats64); 4613 } 4614 4615 return 0; 4616 } 4617 4618 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev, 4619 int *prividx) 4620 { 4621 struct nlattr *attr = NULL; 4622 int attr_id, size; 4623 void *attr_data; 4624 int err; 4625 4626 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats && 4627 dev->netdev_ops->ndo_get_offload_stats)) 4628 return -ENODATA; 4629 4630 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; 4631 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { 4632 if (attr_id < *prividx) 4633 continue; 4634 4635 size = rtnl_get_offload_stats_attr_size(attr_id); 4636 if (!size) 4637 continue; 4638 4639 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) 4640 continue; 4641 4642 attr = nla_reserve_64bit(skb, attr_id, size, 4643 IFLA_OFFLOAD_XSTATS_UNSPEC); 4644 if (!attr) 4645 goto nla_put_failure; 4646 4647 attr_data = nla_data(attr); 4648 memset(attr_data, 0, size); 4649 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, 4650 attr_data); 4651 if (err) 4652 goto get_offload_stats_failure; 4653 } 4654 4655 if (!attr) 4656 return -ENODATA; 4657 4658 *prividx = 0; 4659 return 0; 4660 4661 nla_put_failure: 4662 err = -EMSGSIZE; 4663 get_offload_stats_failure: 4664 *prividx = attr_id; 4665 return err; 4666 } 4667 4668 static int rtnl_get_offload_stats_size(const struct net_device *dev) 4669 { 4670 int nla_size = 0; 4671 int attr_id; 4672 int size; 4673 4674 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats && 4675 dev->netdev_ops->ndo_get_offload_stats)) 4676 return 0; 4677 4678 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; 4679 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { 4680 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) 4681 continue; 4682 size = rtnl_get_offload_stats_attr_size(attr_id); 4683 nla_size += nla_total_size_64bit(size); 4684 } 4685 4686 if (nla_size != 0) 4687 nla_size += nla_total_size(0); 4688 4689 return nla_size; 4690 } 4691 4692 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 4693 int type, u32 pid, u32 seq, u32 change, 4694 unsigned int flags, unsigned int filter_mask, 4695 int *idxattr, int *prividx) 4696 { 4697 struct if_stats_msg *ifsm; 4698 struct nlmsghdr *nlh; 4699 struct nlattr *attr; 4700 int s_prividx = *prividx; 4701 int err; 4702 4703 ASSERT_RTNL(); 4704 4705 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 4706 if (!nlh) 4707 return -EMSGSIZE; 4708 4709 ifsm = nlmsg_data(nlh); 4710 ifsm->family = PF_UNSPEC; 4711 ifsm->pad1 = 0; 4712 ifsm->pad2 = 0; 4713 ifsm->ifindex = dev->ifindex; 4714 ifsm->filter_mask = filter_mask; 4715 4716 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 4717 struct rtnl_link_stats64 *sp; 4718 4719 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 4720 sizeof(struct rtnl_link_stats64), 4721 IFLA_STATS_UNSPEC); 4722 if (!attr) 4723 goto nla_put_failure; 4724 4725 sp = nla_data(attr); 4726 dev_get_stats(dev, sp); 4727 } 4728 4729 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 4730 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 4731 4732 if (ops && ops->fill_linkxstats) { 4733 *idxattr = IFLA_STATS_LINK_XSTATS; 4734 attr = nla_nest_start(skb, 4735 IFLA_STATS_LINK_XSTATS); 4736 if (!attr) 4737 goto nla_put_failure; 4738 4739 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 4740 nla_nest_end(skb, attr); 4741 if (err) 4742 goto nla_put_failure; 4743 *idxattr = 0; 4744 } 4745 } 4746 4747 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 4748 *idxattr)) { 4749 const struct rtnl_link_ops *ops = NULL; 4750 const struct net_device *master; 4751 4752 master = netdev_master_upper_dev_get(dev); 4753 if (master) 4754 ops = master->rtnl_link_ops; 4755 if (ops && ops->fill_linkxstats) { 4756 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 4757 attr = nla_nest_start(skb, 4758 IFLA_STATS_LINK_XSTATS_SLAVE); 4759 if (!attr) 4760 goto nla_put_failure; 4761 4762 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 4763 nla_nest_end(skb, attr); 4764 if (err) 4765 goto nla_put_failure; 4766 *idxattr = 0; 4767 } 4768 } 4769 4770 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 4771 *idxattr)) { 4772 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 4773 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS); 4774 if (!attr) 4775 goto nla_put_failure; 4776 4777 err = rtnl_get_offload_stats(skb, dev, prividx); 4778 if (err == -ENODATA) 4779 nla_nest_cancel(skb, attr); 4780 else 4781 nla_nest_end(skb, attr); 4782 4783 if (err && err != -ENODATA) 4784 goto nla_put_failure; 4785 *idxattr = 0; 4786 } 4787 4788 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 4789 struct rtnl_af_ops *af_ops; 4790 4791 *idxattr = IFLA_STATS_AF_SPEC; 4792 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC); 4793 if (!attr) 4794 goto nla_put_failure; 4795 4796 rcu_read_lock(); 4797 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 4798 if (af_ops->fill_stats_af) { 4799 struct nlattr *af; 4800 int err; 4801 4802 af = nla_nest_start(skb, af_ops->family); 4803 if (!af) { 4804 rcu_read_unlock(); 4805 goto nla_put_failure; 4806 } 4807 err = af_ops->fill_stats_af(skb, dev); 4808 4809 if (err == -ENODATA) { 4810 nla_nest_cancel(skb, af); 4811 } else if (err < 0) { 4812 rcu_read_unlock(); 4813 goto nla_put_failure; 4814 } 4815 4816 nla_nest_end(skb, af); 4817 } 4818 } 4819 rcu_read_unlock(); 4820 4821 nla_nest_end(skb, attr); 4822 4823 *idxattr = 0; 4824 } 4825 4826 nlmsg_end(skb, nlh); 4827 4828 return 0; 4829 4830 nla_put_failure: 4831 /* not a multi message or no progress mean a real error */ 4832 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 4833 nlmsg_cancel(skb, nlh); 4834 else 4835 nlmsg_end(skb, nlh); 4836 4837 return -EMSGSIZE; 4838 } 4839 4840 static size_t if_nlmsg_stats_size(const struct net_device *dev, 4841 u32 filter_mask) 4842 { 4843 size_t size = 0; 4844 4845 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 4846 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 4847 4848 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 4849 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 4850 int attr = IFLA_STATS_LINK_XSTATS; 4851 4852 if (ops && ops->get_linkxstats_size) { 4853 size += nla_total_size(ops->get_linkxstats_size(dev, 4854 attr)); 4855 /* for IFLA_STATS_LINK_XSTATS */ 4856 size += nla_total_size(0); 4857 } 4858 } 4859 4860 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 4861 struct net_device *_dev = (struct net_device *)dev; 4862 const struct rtnl_link_ops *ops = NULL; 4863 const struct net_device *master; 4864 4865 /* netdev_master_upper_dev_get can't take const */ 4866 master = netdev_master_upper_dev_get(_dev); 4867 if (master) 4868 ops = master->rtnl_link_ops; 4869 if (ops && ops->get_linkxstats_size) { 4870 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 4871 4872 size += nla_total_size(ops->get_linkxstats_size(dev, 4873 attr)); 4874 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 4875 size += nla_total_size(0); 4876 } 4877 } 4878 4879 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) 4880 size += rtnl_get_offload_stats_size(dev); 4881 4882 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 4883 struct rtnl_af_ops *af_ops; 4884 4885 /* for IFLA_STATS_AF_SPEC */ 4886 size += nla_total_size(0); 4887 4888 rcu_read_lock(); 4889 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 4890 if (af_ops->get_stats_af_size) { 4891 size += nla_total_size( 4892 af_ops->get_stats_af_size(dev)); 4893 4894 /* for AF_* */ 4895 size += nla_total_size(0); 4896 } 4897 } 4898 rcu_read_unlock(); 4899 } 4900 4901 return size; 4902 } 4903 4904 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 4905 struct netlink_ext_ack *extack) 4906 { 4907 struct net *net = sock_net(skb->sk); 4908 struct net_device *dev = NULL; 4909 int idxattr = 0, prividx = 0; 4910 struct if_stats_msg *ifsm; 4911 struct sk_buff *nskb; 4912 u32 filter_mask; 4913 int err; 4914 4915 if (nlmsg_len(nlh) < sizeof(*ifsm)) 4916 return -EINVAL; 4917 4918 ifsm = nlmsg_data(nlh); 4919 if (ifsm->ifindex > 0) 4920 dev = __dev_get_by_index(net, ifsm->ifindex); 4921 else 4922 return -EINVAL; 4923 4924 if (!dev) 4925 return -ENODEV; 4926 4927 filter_mask = ifsm->filter_mask; 4928 if (!filter_mask) 4929 return -EINVAL; 4930 4931 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL); 4932 if (!nskb) 4933 return -ENOBUFS; 4934 4935 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 4936 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 4937 0, filter_mask, &idxattr, &prividx); 4938 if (err < 0) { 4939 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 4940 WARN_ON(err == -EMSGSIZE); 4941 kfree_skb(nskb); 4942 } else { 4943 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 4944 } 4945 4946 return err; 4947 } 4948 4949 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 4950 { 4951 struct netlink_ext_ack *extack = cb->extack; 4952 int h, s_h, err, s_idx, s_idxattr, s_prividx; 4953 struct net *net = sock_net(skb->sk); 4954 unsigned int flags = NLM_F_MULTI; 4955 struct if_stats_msg *ifsm; 4956 struct hlist_head *head; 4957 struct net_device *dev; 4958 u32 filter_mask = 0; 4959 int idx = 0; 4960 4961 s_h = cb->args[0]; 4962 s_idx = cb->args[1]; 4963 s_idxattr = cb->args[2]; 4964 s_prividx = cb->args[3]; 4965 4966 cb->seq = net->dev_base_seq; 4967 4968 if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) { 4969 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 4970 return -EINVAL; 4971 } 4972 4973 ifsm = nlmsg_data(cb->nlh); 4974 4975 /* only requests using strict checks can pass data to influence 4976 * the dump. The legacy exception is filter_mask. 4977 */ 4978 if (cb->strict_check) { 4979 if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) { 4980 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 4981 return -EINVAL; 4982 } 4983 if (nlmsg_attrlen(cb->nlh, sizeof(*ifsm))) { 4984 NL_SET_ERR_MSG(extack, "Invalid attributes after stats header"); 4985 return -EINVAL; 4986 } 4987 } 4988 4989 filter_mask = ifsm->filter_mask; 4990 if (!filter_mask) { 4991 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 4992 return -EINVAL; 4993 } 4994 4995 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4996 idx = 0; 4997 head = &net->dev_index_head[h]; 4998 hlist_for_each_entry(dev, head, index_hlist) { 4999 if (idx < s_idx) 5000 goto cont; 5001 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 5002 NETLINK_CB(cb->skb).portid, 5003 cb->nlh->nlmsg_seq, 0, 5004 flags, filter_mask, 5005 &s_idxattr, &s_prividx); 5006 /* If we ran out of room on the first message, 5007 * we're in trouble 5008 */ 5009 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 5010 5011 if (err < 0) 5012 goto out; 5013 s_prividx = 0; 5014 s_idxattr = 0; 5015 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 5016 cont: 5017 idx++; 5018 } 5019 } 5020 out: 5021 cb->args[3] = s_prividx; 5022 cb->args[2] = s_idxattr; 5023 cb->args[1] = idx; 5024 cb->args[0] = h; 5025 5026 return skb->len; 5027 } 5028 5029 /* Process one rtnetlink message. */ 5030 5031 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 5032 struct netlink_ext_ack *extack) 5033 { 5034 struct net *net = sock_net(skb->sk); 5035 struct rtnl_link *link; 5036 struct module *owner; 5037 int err = -EOPNOTSUPP; 5038 rtnl_doit_func doit; 5039 unsigned int flags; 5040 int kind; 5041 int family; 5042 int type; 5043 5044 type = nlh->nlmsg_type; 5045 if (type > RTM_MAX) 5046 return -EOPNOTSUPP; 5047 5048 type -= RTM_BASE; 5049 5050 /* All the messages must have at least 1 byte length */ 5051 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 5052 return 0; 5053 5054 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 5055 kind = type&3; 5056 5057 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN)) 5058 return -EPERM; 5059 5060 rcu_read_lock(); 5061 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 5062 struct sock *rtnl; 5063 rtnl_dumpit_func dumpit; 5064 u16 min_dump_alloc = 0; 5065 5066 link = rtnl_get_link(family, type); 5067 if (!link || !link->dumpit) { 5068 family = PF_UNSPEC; 5069 link = rtnl_get_link(family, type); 5070 if (!link || !link->dumpit) 5071 goto err_unlock; 5072 } 5073 owner = link->owner; 5074 dumpit = link->dumpit; 5075 5076 if (type == RTM_GETLINK - RTM_BASE) 5077 min_dump_alloc = rtnl_calcit(skb, nlh); 5078 5079 err = 0; 5080 /* need to do this before rcu_read_unlock() */ 5081 if (!try_module_get(owner)) 5082 err = -EPROTONOSUPPORT; 5083 5084 rcu_read_unlock(); 5085 5086 rtnl = net->rtnl; 5087 if (err == 0) { 5088 struct netlink_dump_control c = { 5089 .dump = dumpit, 5090 .min_dump_alloc = min_dump_alloc, 5091 .module = owner, 5092 }; 5093 err = netlink_dump_start(rtnl, skb, nlh, &c); 5094 /* netlink_dump_start() will keep a reference on 5095 * module if dump is still in progress. 5096 */ 5097 module_put(owner); 5098 } 5099 return err; 5100 } 5101 5102 link = rtnl_get_link(family, type); 5103 if (!link || !link->doit) { 5104 family = PF_UNSPEC; 5105 link = rtnl_get_link(PF_UNSPEC, type); 5106 if (!link || !link->doit) 5107 goto out_unlock; 5108 } 5109 5110 owner = link->owner; 5111 if (!try_module_get(owner)) { 5112 err = -EPROTONOSUPPORT; 5113 goto out_unlock; 5114 } 5115 5116 flags = link->flags; 5117 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 5118 doit = link->doit; 5119 rcu_read_unlock(); 5120 if (doit) 5121 err = doit(skb, nlh, extack); 5122 module_put(owner); 5123 return err; 5124 } 5125 rcu_read_unlock(); 5126 5127 rtnl_lock(); 5128 link = rtnl_get_link(family, type); 5129 if (link && link->doit) 5130 err = link->doit(skb, nlh, extack); 5131 rtnl_unlock(); 5132 5133 module_put(owner); 5134 5135 return err; 5136 5137 out_unlock: 5138 rcu_read_unlock(); 5139 return err; 5140 5141 err_unlock: 5142 rcu_read_unlock(); 5143 return -EOPNOTSUPP; 5144 } 5145 5146 static void rtnetlink_rcv(struct sk_buff *skb) 5147 { 5148 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 5149 } 5150 5151 static int rtnetlink_bind(struct net *net, int group) 5152 { 5153 switch (group) { 5154 case RTNLGRP_IPV4_MROUTE_R: 5155 case RTNLGRP_IPV6_MROUTE_R: 5156 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 5157 return -EPERM; 5158 break; 5159 } 5160 return 0; 5161 } 5162 5163 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 5164 { 5165 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5166 5167 switch (event) { 5168 case NETDEV_REBOOT: 5169 case NETDEV_CHANGEMTU: 5170 case NETDEV_CHANGEADDR: 5171 case NETDEV_CHANGENAME: 5172 case NETDEV_FEAT_CHANGE: 5173 case NETDEV_BONDING_FAILOVER: 5174 case NETDEV_POST_TYPE_CHANGE: 5175 case NETDEV_NOTIFY_PEERS: 5176 case NETDEV_CHANGEUPPER: 5177 case NETDEV_RESEND_IGMP: 5178 case NETDEV_CHANGEINFODATA: 5179 case NETDEV_CHANGELOWERSTATE: 5180 case NETDEV_CHANGE_TX_QUEUE_LEN: 5181 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 5182 GFP_KERNEL, NULL, 0); 5183 break; 5184 default: 5185 break; 5186 } 5187 return NOTIFY_DONE; 5188 } 5189 5190 static struct notifier_block rtnetlink_dev_notifier = { 5191 .notifier_call = rtnetlink_event, 5192 }; 5193 5194 5195 static int __net_init rtnetlink_net_init(struct net *net) 5196 { 5197 struct sock *sk; 5198 struct netlink_kernel_cfg cfg = { 5199 .groups = RTNLGRP_MAX, 5200 .input = rtnetlink_rcv, 5201 .cb_mutex = &rtnl_mutex, 5202 .flags = NL_CFG_F_NONROOT_RECV, 5203 .bind = rtnetlink_bind, 5204 }; 5205 5206 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 5207 if (!sk) 5208 return -ENOMEM; 5209 net->rtnl = sk; 5210 return 0; 5211 } 5212 5213 static void __net_exit rtnetlink_net_exit(struct net *net) 5214 { 5215 netlink_kernel_release(net->rtnl); 5216 net->rtnl = NULL; 5217 } 5218 5219 static struct pernet_operations rtnetlink_net_ops = { 5220 .init = rtnetlink_net_init, 5221 .exit = rtnetlink_net_exit, 5222 }; 5223 5224 void __init rtnetlink_init(void) 5225 { 5226 if (register_pernet_subsys(&rtnetlink_net_ops)) 5227 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 5228 5229 register_netdevice_notifier(&rtnetlink_dev_notifier); 5230 5231 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 5232 rtnl_dump_ifinfo, 0); 5233 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 5234 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 5235 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 5236 5237 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 5238 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 5239 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 5240 5241 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 5242 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0); 5243 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 5244 5245 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 5246 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 5247 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 5248 5249 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 5250 0); 5251 } 5252