1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 #if IS_ENABLED(CONFIG_IPV6) 58 #include <net/addrconf.h> 59 #endif 60 61 #include "dev.h" 62 63 #define RTNL_MAX_TYPE 50 64 #define RTNL_SLAVE_MAX_TYPE 43 65 66 struct rtnl_link { 67 rtnl_doit_func doit; 68 rtnl_dumpit_func dumpit; 69 struct module *owner; 70 unsigned int flags; 71 struct rcu_head rcu; 72 }; 73 74 static DEFINE_MUTEX(rtnl_mutex); 75 76 void rtnl_lock(void) 77 { 78 mutex_lock(&rtnl_mutex); 79 } 80 EXPORT_SYMBOL(rtnl_lock); 81 82 int rtnl_lock_killable(void) 83 { 84 return mutex_lock_killable(&rtnl_mutex); 85 } 86 EXPORT_SYMBOL(rtnl_lock_killable); 87 88 static struct sk_buff *defer_kfree_skb_list; 89 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 90 { 91 if (head && tail) { 92 tail->next = defer_kfree_skb_list; 93 defer_kfree_skb_list = head; 94 } 95 } 96 EXPORT_SYMBOL(rtnl_kfree_skbs); 97 98 void __rtnl_unlock(void) 99 { 100 struct sk_buff *head = defer_kfree_skb_list; 101 102 defer_kfree_skb_list = NULL; 103 104 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 105 * is used. In some places, e.g. in cfg80211, we have code that will do 106 * something like 107 * rtnl_lock() 108 * wiphy_lock() 109 * ... 110 * rtnl_unlock() 111 * 112 * and because netdev_run_todo() acquires the RTNL for items on the list 113 * we could cause a situation such as this: 114 * Thread 1 Thread 2 115 * rtnl_lock() 116 * unregister_netdevice() 117 * __rtnl_unlock() 118 * rtnl_lock() 119 * wiphy_lock() 120 * rtnl_unlock() 121 * netdev_run_todo() 122 * __rtnl_unlock() 123 * 124 * // list not empty now 125 * // because of thread 2 126 * rtnl_lock() 127 * while (!list_empty(...)) 128 * rtnl_lock() 129 * wiphy_lock() 130 * **** DEADLOCK **** 131 * 132 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 133 * it's not used in cases where something is added to do the list. 134 */ 135 WARN_ON(!list_empty(&net_todo_list)); 136 137 mutex_unlock(&rtnl_mutex); 138 139 while (head) { 140 struct sk_buff *next = head->next; 141 142 kfree_skb(head); 143 cond_resched(); 144 head = next; 145 } 146 } 147 148 void rtnl_unlock(void) 149 { 150 /* This fellow will unlock it for us. */ 151 netdev_run_todo(); 152 } 153 EXPORT_SYMBOL(rtnl_unlock); 154 155 int rtnl_trylock(void) 156 { 157 return mutex_trylock(&rtnl_mutex); 158 } 159 EXPORT_SYMBOL(rtnl_trylock); 160 161 int rtnl_is_locked(void) 162 { 163 return mutex_is_locked(&rtnl_mutex); 164 } 165 EXPORT_SYMBOL(rtnl_is_locked); 166 167 bool refcount_dec_and_rtnl_lock(refcount_t *r) 168 { 169 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 170 } 171 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 172 173 #ifdef CONFIG_PROVE_LOCKING 174 bool lockdep_rtnl_is_held(void) 175 { 176 return lockdep_is_held(&rtnl_mutex); 177 } 178 EXPORT_SYMBOL(lockdep_rtnl_is_held); 179 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 180 181 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 182 183 static inline int rtm_msgindex(int msgtype) 184 { 185 int msgindex = msgtype - RTM_BASE; 186 187 /* 188 * msgindex < 0 implies someone tried to register a netlink 189 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 190 * the message type has not been added to linux/rtnetlink.h 191 */ 192 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 193 194 return msgindex; 195 } 196 197 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 198 { 199 struct rtnl_link __rcu **tab; 200 201 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 202 protocol = PF_UNSPEC; 203 204 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 205 if (!tab) 206 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 207 208 return rcu_dereference_rtnl(tab[msgtype]); 209 } 210 211 static int rtnl_register_internal(struct module *owner, 212 int protocol, int msgtype, 213 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 214 unsigned int flags) 215 { 216 struct rtnl_link *link, *old; 217 struct rtnl_link __rcu **tab; 218 int msgindex; 219 int ret = -ENOBUFS; 220 221 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 222 msgindex = rtm_msgindex(msgtype); 223 224 rtnl_lock(); 225 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 226 if (tab == NULL) { 227 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 228 if (!tab) 229 goto unlock; 230 231 /* ensures we see the 0 stores */ 232 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 233 } 234 235 old = rtnl_dereference(tab[msgindex]); 236 if (old) { 237 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 238 if (!link) 239 goto unlock; 240 } else { 241 link = kzalloc(sizeof(*link), GFP_KERNEL); 242 if (!link) 243 goto unlock; 244 } 245 246 WARN_ON(link->owner && link->owner != owner); 247 link->owner = owner; 248 249 WARN_ON(doit && link->doit && link->doit != doit); 250 if (doit) 251 link->doit = doit; 252 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 253 if (dumpit) 254 link->dumpit = dumpit; 255 256 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 257 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 258 link->flags |= flags; 259 260 /* publish protocol:msgtype */ 261 rcu_assign_pointer(tab[msgindex], link); 262 ret = 0; 263 if (old) 264 kfree_rcu(old, rcu); 265 unlock: 266 rtnl_unlock(); 267 return ret; 268 } 269 270 /** 271 * rtnl_register_module - Register a rtnetlink message type 272 * 273 * @owner: module registering the hook (THIS_MODULE) 274 * @protocol: Protocol family or PF_UNSPEC 275 * @msgtype: rtnetlink message type 276 * @doit: Function pointer called for each request message 277 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 278 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 279 * 280 * Like rtnl_register, but for use by removable modules. 281 */ 282 int rtnl_register_module(struct module *owner, 283 int protocol, int msgtype, 284 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 285 unsigned int flags) 286 { 287 return rtnl_register_internal(owner, protocol, msgtype, 288 doit, dumpit, flags); 289 } 290 EXPORT_SYMBOL_GPL(rtnl_register_module); 291 292 /** 293 * rtnl_register - Register a rtnetlink message type 294 * @protocol: Protocol family or PF_UNSPEC 295 * @msgtype: rtnetlink message type 296 * @doit: Function pointer called for each request message 297 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 298 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 299 * 300 * Registers the specified function pointers (at least one of them has 301 * to be non-NULL) to be called whenever a request message for the 302 * specified protocol family and message type is received. 303 * 304 * The special protocol family PF_UNSPEC may be used to define fallback 305 * function pointers for the case when no entry for the specific protocol 306 * family exists. 307 */ 308 void rtnl_register(int protocol, int msgtype, 309 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 310 unsigned int flags) 311 { 312 int err; 313 314 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 315 flags); 316 if (err) 317 pr_err("Unable to register rtnetlink message handler, " 318 "protocol = %d, message type = %d\n", protocol, msgtype); 319 } 320 321 /** 322 * rtnl_unregister - Unregister a rtnetlink message type 323 * @protocol: Protocol family or PF_UNSPEC 324 * @msgtype: rtnetlink message type 325 * 326 * Returns 0 on success or a negative error code. 327 */ 328 int rtnl_unregister(int protocol, int msgtype) 329 { 330 struct rtnl_link __rcu **tab; 331 struct rtnl_link *link; 332 int msgindex; 333 334 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 335 msgindex = rtm_msgindex(msgtype); 336 337 rtnl_lock(); 338 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 339 if (!tab) { 340 rtnl_unlock(); 341 return -ENOENT; 342 } 343 344 link = rtnl_dereference(tab[msgindex]); 345 RCU_INIT_POINTER(tab[msgindex], NULL); 346 rtnl_unlock(); 347 348 kfree_rcu(link, rcu); 349 350 return 0; 351 } 352 EXPORT_SYMBOL_GPL(rtnl_unregister); 353 354 /** 355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 356 * @protocol : Protocol family or PF_UNSPEC 357 * 358 * Identical to calling rtnl_unregster() for all registered message types 359 * of a certain protocol family. 360 */ 361 void rtnl_unregister_all(int protocol) 362 { 363 struct rtnl_link __rcu **tab; 364 struct rtnl_link *link; 365 int msgindex; 366 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 368 369 rtnl_lock(); 370 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 371 if (!tab) { 372 rtnl_unlock(); 373 return; 374 } 375 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 376 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 377 link = rtnl_dereference(tab[msgindex]); 378 if (!link) 379 continue; 380 381 RCU_INIT_POINTER(tab[msgindex], NULL); 382 kfree_rcu(link, rcu); 383 } 384 rtnl_unlock(); 385 386 synchronize_net(); 387 388 kfree(tab); 389 } 390 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 391 392 static LIST_HEAD(link_ops); 393 394 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 395 { 396 const struct rtnl_link_ops *ops; 397 398 list_for_each_entry(ops, &link_ops, list) { 399 if (!strcmp(ops->kind, kind)) 400 return ops; 401 } 402 return NULL; 403 } 404 405 /** 406 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 407 * @ops: struct rtnl_link_ops * to register 408 * 409 * The caller must hold the rtnl_mutex. This function should be used 410 * by drivers that create devices during module initialization. It 411 * must be called before registering the devices. 412 * 413 * Returns 0 on success or a negative error code. 414 */ 415 int __rtnl_link_register(struct rtnl_link_ops *ops) 416 { 417 if (rtnl_link_ops_get(ops->kind)) 418 return -EEXIST; 419 420 /* The check for alloc/setup is here because if ops 421 * does not have that filled up, it is not possible 422 * to use the ops for creating device. So do not 423 * fill up dellink as well. That disables rtnl_dellink. 424 */ 425 if ((ops->alloc || ops->setup) && !ops->dellink) 426 ops->dellink = unregister_netdevice_queue; 427 428 list_add_tail(&ops->list, &link_ops); 429 return 0; 430 } 431 EXPORT_SYMBOL_GPL(__rtnl_link_register); 432 433 /** 434 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 435 * @ops: struct rtnl_link_ops * to register 436 * 437 * Returns 0 on success or a negative error code. 438 */ 439 int rtnl_link_register(struct rtnl_link_ops *ops) 440 { 441 int err; 442 443 /* Sanity-check max sizes to avoid stack buffer overflow. */ 444 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 445 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 446 return -EINVAL; 447 448 rtnl_lock(); 449 err = __rtnl_link_register(ops); 450 rtnl_unlock(); 451 return err; 452 } 453 EXPORT_SYMBOL_GPL(rtnl_link_register); 454 455 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 456 { 457 struct net_device *dev; 458 LIST_HEAD(list_kill); 459 460 for_each_netdev(net, dev) { 461 if (dev->rtnl_link_ops == ops) 462 ops->dellink(dev, &list_kill); 463 } 464 unregister_netdevice_many(&list_kill); 465 } 466 467 /** 468 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 469 * @ops: struct rtnl_link_ops * to unregister 470 * 471 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 472 * integrity (hold pernet_ops_rwsem for writing to close the race 473 * with setup_net() and cleanup_net()). 474 */ 475 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 476 { 477 struct net *net; 478 479 for_each_net(net) { 480 __rtnl_kill_links(net, ops); 481 } 482 list_del(&ops->list); 483 } 484 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 485 486 /* Return with the rtnl_lock held when there are no network 487 * devices unregistering in any network namespace. 488 */ 489 static void rtnl_lock_unregistering_all(void) 490 { 491 struct net *net; 492 bool unregistering; 493 DEFINE_WAIT_FUNC(wait, woken_wake_function); 494 495 add_wait_queue(&netdev_unregistering_wq, &wait); 496 for (;;) { 497 unregistering = false; 498 rtnl_lock(); 499 /* We held write locked pernet_ops_rwsem, and parallel 500 * setup_net() and cleanup_net() are not possible. 501 */ 502 for_each_net(net) { 503 if (atomic_read(&net->dev_unreg_count) > 0) { 504 unregistering = true; 505 break; 506 } 507 } 508 if (!unregistering) 509 break; 510 __rtnl_unlock(); 511 512 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 513 } 514 remove_wait_queue(&netdev_unregistering_wq, &wait); 515 } 516 517 /** 518 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 519 * @ops: struct rtnl_link_ops * to unregister 520 */ 521 void rtnl_link_unregister(struct rtnl_link_ops *ops) 522 { 523 /* Close the race with setup_net() and cleanup_net() */ 524 down_write(&pernet_ops_rwsem); 525 rtnl_lock_unregistering_all(); 526 __rtnl_link_unregister(ops); 527 rtnl_unlock(); 528 up_write(&pernet_ops_rwsem); 529 } 530 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 531 532 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 533 { 534 struct net_device *master_dev; 535 const struct rtnl_link_ops *ops; 536 size_t size = 0; 537 538 rcu_read_lock(); 539 540 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 541 if (!master_dev) 542 goto out; 543 544 ops = master_dev->rtnl_link_ops; 545 if (!ops || !ops->get_slave_size) 546 goto out; 547 /* IFLA_INFO_SLAVE_DATA + nested data */ 548 size = nla_total_size(sizeof(struct nlattr)) + 549 ops->get_slave_size(master_dev, dev); 550 551 out: 552 rcu_read_unlock(); 553 return size; 554 } 555 556 static size_t rtnl_link_get_size(const struct net_device *dev) 557 { 558 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 559 size_t size; 560 561 if (!ops) 562 return 0; 563 564 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 565 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 566 567 if (ops->get_size) 568 /* IFLA_INFO_DATA + nested data */ 569 size += nla_total_size(sizeof(struct nlattr)) + 570 ops->get_size(dev); 571 572 if (ops->get_xstats_size) 573 /* IFLA_INFO_XSTATS */ 574 size += nla_total_size(ops->get_xstats_size(dev)); 575 576 size += rtnl_link_get_slave_info_data_size(dev); 577 578 return size; 579 } 580 581 static LIST_HEAD(rtnl_af_ops); 582 583 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 584 { 585 const struct rtnl_af_ops *ops; 586 587 ASSERT_RTNL(); 588 589 list_for_each_entry(ops, &rtnl_af_ops, list) { 590 if (ops->family == family) 591 return ops; 592 } 593 594 return NULL; 595 } 596 597 /** 598 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 599 * @ops: struct rtnl_af_ops * to register 600 * 601 * Returns 0 on success or a negative error code. 602 */ 603 void rtnl_af_register(struct rtnl_af_ops *ops) 604 { 605 rtnl_lock(); 606 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 607 rtnl_unlock(); 608 } 609 EXPORT_SYMBOL_GPL(rtnl_af_register); 610 611 /** 612 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 613 * @ops: struct rtnl_af_ops * to unregister 614 */ 615 void rtnl_af_unregister(struct rtnl_af_ops *ops) 616 { 617 rtnl_lock(); 618 list_del_rcu(&ops->list); 619 rtnl_unlock(); 620 621 synchronize_rcu(); 622 } 623 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 624 625 static size_t rtnl_link_get_af_size(const struct net_device *dev, 626 u32 ext_filter_mask) 627 { 628 struct rtnl_af_ops *af_ops; 629 size_t size; 630 631 /* IFLA_AF_SPEC */ 632 size = nla_total_size(sizeof(struct nlattr)); 633 634 rcu_read_lock(); 635 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 636 if (af_ops->get_link_af_size) { 637 /* AF_* + nested data */ 638 size += nla_total_size(sizeof(struct nlattr)) + 639 af_ops->get_link_af_size(dev, ext_filter_mask); 640 } 641 } 642 rcu_read_unlock(); 643 644 return size; 645 } 646 647 static bool rtnl_have_link_slave_info(const struct net_device *dev) 648 { 649 struct net_device *master_dev; 650 bool ret = false; 651 652 rcu_read_lock(); 653 654 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 655 if (master_dev && master_dev->rtnl_link_ops) 656 ret = true; 657 rcu_read_unlock(); 658 return ret; 659 } 660 661 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 662 const struct net_device *dev) 663 { 664 struct net_device *master_dev; 665 const struct rtnl_link_ops *ops; 666 struct nlattr *slave_data; 667 int err; 668 669 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 670 if (!master_dev) 671 return 0; 672 ops = master_dev->rtnl_link_ops; 673 if (!ops) 674 return 0; 675 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 676 return -EMSGSIZE; 677 if (ops->fill_slave_info) { 678 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 679 if (!slave_data) 680 return -EMSGSIZE; 681 err = ops->fill_slave_info(skb, master_dev, dev); 682 if (err < 0) 683 goto err_cancel_slave_data; 684 nla_nest_end(skb, slave_data); 685 } 686 return 0; 687 688 err_cancel_slave_data: 689 nla_nest_cancel(skb, slave_data); 690 return err; 691 } 692 693 static int rtnl_link_info_fill(struct sk_buff *skb, 694 const struct net_device *dev) 695 { 696 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 697 struct nlattr *data; 698 int err; 699 700 if (!ops) 701 return 0; 702 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 703 return -EMSGSIZE; 704 if (ops->fill_xstats) { 705 err = ops->fill_xstats(skb, dev); 706 if (err < 0) 707 return err; 708 } 709 if (ops->fill_info) { 710 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 711 if (data == NULL) 712 return -EMSGSIZE; 713 err = ops->fill_info(skb, dev); 714 if (err < 0) 715 goto err_cancel_data; 716 nla_nest_end(skb, data); 717 } 718 return 0; 719 720 err_cancel_data: 721 nla_nest_cancel(skb, data); 722 return err; 723 } 724 725 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 726 { 727 struct nlattr *linkinfo; 728 int err = -EMSGSIZE; 729 730 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 731 if (linkinfo == NULL) 732 goto out; 733 734 err = rtnl_link_info_fill(skb, dev); 735 if (err < 0) 736 goto err_cancel_link; 737 738 err = rtnl_link_slave_info_fill(skb, dev); 739 if (err < 0) 740 goto err_cancel_link; 741 742 nla_nest_end(skb, linkinfo); 743 return 0; 744 745 err_cancel_link: 746 nla_nest_cancel(skb, linkinfo); 747 out: 748 return err; 749 } 750 751 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 752 { 753 struct sock *rtnl = net->rtnl; 754 755 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 756 } 757 758 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 759 { 760 struct sock *rtnl = net->rtnl; 761 762 return nlmsg_unicast(rtnl, skb, pid); 763 } 764 EXPORT_SYMBOL(rtnl_unicast); 765 766 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 767 const struct nlmsghdr *nlh, gfp_t flags) 768 { 769 struct sock *rtnl = net->rtnl; 770 771 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 772 } 773 EXPORT_SYMBOL(rtnl_notify); 774 775 void rtnl_set_sk_err(struct net *net, u32 group, int error) 776 { 777 struct sock *rtnl = net->rtnl; 778 779 netlink_set_err(rtnl, 0, group, error); 780 } 781 EXPORT_SYMBOL(rtnl_set_sk_err); 782 783 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 784 { 785 struct nlattr *mx; 786 int i, valid = 0; 787 788 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 789 if (metrics == dst_default_metrics.metrics) 790 return 0; 791 792 mx = nla_nest_start_noflag(skb, RTA_METRICS); 793 if (mx == NULL) 794 return -ENOBUFS; 795 796 for (i = 0; i < RTAX_MAX; i++) { 797 if (metrics[i]) { 798 if (i == RTAX_CC_ALGO - 1) { 799 char tmp[TCP_CA_NAME_MAX], *name; 800 801 name = tcp_ca_get_name_by_key(metrics[i], tmp); 802 if (!name) 803 continue; 804 if (nla_put_string(skb, i + 1, name)) 805 goto nla_put_failure; 806 } else if (i == RTAX_FEATURES - 1) { 807 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 808 809 if (!user_features) 810 continue; 811 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 812 if (nla_put_u32(skb, i + 1, user_features)) 813 goto nla_put_failure; 814 } else { 815 if (nla_put_u32(skb, i + 1, metrics[i])) 816 goto nla_put_failure; 817 } 818 valid++; 819 } 820 } 821 822 if (!valid) { 823 nla_nest_cancel(skb, mx); 824 return 0; 825 } 826 827 return nla_nest_end(skb, mx); 828 829 nla_put_failure: 830 nla_nest_cancel(skb, mx); 831 return -EMSGSIZE; 832 } 833 EXPORT_SYMBOL(rtnetlink_put_metrics); 834 835 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 836 long expires, u32 error) 837 { 838 struct rta_cacheinfo ci = { 839 .rta_error = error, 840 .rta_id = id, 841 }; 842 843 if (dst) { 844 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 845 ci.rta_used = dst->__use; 846 ci.rta_clntref = rcuref_read(&dst->__rcuref); 847 } 848 if (expires) { 849 unsigned long clock; 850 851 clock = jiffies_to_clock_t(abs(expires)); 852 clock = min_t(unsigned long, clock, INT_MAX); 853 ci.rta_expires = (expires > 0) ? clock : -clock; 854 } 855 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 856 } 857 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 858 859 static void set_operstate(struct net_device *dev, unsigned char transition) 860 { 861 unsigned char operstate = dev->operstate; 862 863 switch (transition) { 864 case IF_OPER_UP: 865 if ((operstate == IF_OPER_DORMANT || 866 operstate == IF_OPER_TESTING || 867 operstate == IF_OPER_UNKNOWN) && 868 !netif_dormant(dev) && !netif_testing(dev)) 869 operstate = IF_OPER_UP; 870 break; 871 872 case IF_OPER_TESTING: 873 if (netif_oper_up(dev)) 874 operstate = IF_OPER_TESTING; 875 break; 876 877 case IF_OPER_DORMANT: 878 if (netif_oper_up(dev)) 879 operstate = IF_OPER_DORMANT; 880 break; 881 } 882 883 if (dev->operstate != operstate) { 884 write_lock(&dev_base_lock); 885 dev->operstate = operstate; 886 write_unlock(&dev_base_lock); 887 netdev_state_change(dev); 888 } 889 } 890 891 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 892 { 893 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 894 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 895 } 896 897 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 898 const struct ifinfomsg *ifm) 899 { 900 unsigned int flags = ifm->ifi_flags; 901 902 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 903 if (ifm->ifi_change) 904 flags = (flags & ifm->ifi_change) | 905 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 906 907 return flags; 908 } 909 910 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 911 const struct rtnl_link_stats64 *b) 912 { 913 a->rx_packets = b->rx_packets; 914 a->tx_packets = b->tx_packets; 915 a->rx_bytes = b->rx_bytes; 916 a->tx_bytes = b->tx_bytes; 917 a->rx_errors = b->rx_errors; 918 a->tx_errors = b->tx_errors; 919 a->rx_dropped = b->rx_dropped; 920 a->tx_dropped = b->tx_dropped; 921 922 a->multicast = b->multicast; 923 a->collisions = b->collisions; 924 925 a->rx_length_errors = b->rx_length_errors; 926 a->rx_over_errors = b->rx_over_errors; 927 a->rx_crc_errors = b->rx_crc_errors; 928 a->rx_frame_errors = b->rx_frame_errors; 929 a->rx_fifo_errors = b->rx_fifo_errors; 930 a->rx_missed_errors = b->rx_missed_errors; 931 932 a->tx_aborted_errors = b->tx_aborted_errors; 933 a->tx_carrier_errors = b->tx_carrier_errors; 934 a->tx_fifo_errors = b->tx_fifo_errors; 935 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 936 a->tx_window_errors = b->tx_window_errors; 937 938 a->rx_compressed = b->rx_compressed; 939 a->tx_compressed = b->tx_compressed; 940 941 a->rx_nohandler = b->rx_nohandler; 942 } 943 944 /* All VF info */ 945 static inline int rtnl_vfinfo_size(const struct net_device *dev, 946 u32 ext_filter_mask) 947 { 948 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 949 int num_vfs = dev_num_vf(dev->dev.parent); 950 size_t size = nla_total_size(0); 951 size += num_vfs * 952 (nla_total_size(0) + 953 nla_total_size(sizeof(struct ifla_vf_mac)) + 954 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 955 nla_total_size(sizeof(struct ifla_vf_vlan)) + 956 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 957 nla_total_size(MAX_VLAN_LIST_LEN * 958 sizeof(struct ifla_vf_vlan_info)) + 959 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 960 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 961 nla_total_size(sizeof(struct ifla_vf_rate)) + 962 nla_total_size(sizeof(struct ifla_vf_link_state)) + 963 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 964 nla_total_size(sizeof(struct ifla_vf_trust))); 965 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 966 size += num_vfs * 967 (nla_total_size(0) + /* nest IFLA_VF_STATS */ 968 /* IFLA_VF_STATS_RX_PACKETS */ 969 nla_total_size_64bit(sizeof(__u64)) + 970 /* IFLA_VF_STATS_TX_PACKETS */ 971 nla_total_size_64bit(sizeof(__u64)) + 972 /* IFLA_VF_STATS_RX_BYTES */ 973 nla_total_size_64bit(sizeof(__u64)) + 974 /* IFLA_VF_STATS_TX_BYTES */ 975 nla_total_size_64bit(sizeof(__u64)) + 976 /* IFLA_VF_STATS_BROADCAST */ 977 nla_total_size_64bit(sizeof(__u64)) + 978 /* IFLA_VF_STATS_MULTICAST */ 979 nla_total_size_64bit(sizeof(__u64)) + 980 /* IFLA_VF_STATS_RX_DROPPED */ 981 nla_total_size_64bit(sizeof(__u64)) + 982 /* IFLA_VF_STATS_TX_DROPPED */ 983 nla_total_size_64bit(sizeof(__u64))); 984 } 985 return size; 986 } else 987 return 0; 988 } 989 990 static size_t rtnl_port_size(const struct net_device *dev, 991 u32 ext_filter_mask) 992 { 993 size_t port_size = nla_total_size(4) /* PORT_VF */ 994 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 995 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 996 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 997 + nla_total_size(1) /* PROT_VDP_REQUEST */ 998 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 999 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 1000 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 1001 + port_size; 1002 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 1003 + port_size; 1004 1005 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1006 !(ext_filter_mask & RTEXT_FILTER_VF)) 1007 return 0; 1008 if (dev_num_vf(dev->dev.parent)) 1009 return port_self_size + vf_ports_size + 1010 vf_port_size * dev_num_vf(dev->dev.parent); 1011 else 1012 return port_self_size; 1013 } 1014 1015 static size_t rtnl_xdp_size(void) 1016 { 1017 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1018 nla_total_size(1) + /* XDP_ATTACHED */ 1019 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1020 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1021 1022 return xdp_size; 1023 } 1024 1025 static size_t rtnl_prop_list_size(const struct net_device *dev) 1026 { 1027 struct netdev_name_node *name_node; 1028 size_t size; 1029 1030 if (list_empty(&dev->name_node->list)) 1031 return 0; 1032 size = nla_total_size(0); 1033 list_for_each_entry(name_node, &dev->name_node->list, list) 1034 size += nla_total_size(ALTIFNAMSIZ); 1035 return size; 1036 } 1037 1038 static size_t rtnl_proto_down_size(const struct net_device *dev) 1039 { 1040 size_t size = nla_total_size(1); 1041 1042 if (dev->proto_down_reason) 1043 size += nla_total_size(0) + nla_total_size(4); 1044 1045 return size; 1046 } 1047 1048 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1049 { 1050 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1051 1052 if (dev->devlink_port) 1053 size += devlink_nl_port_handle_size(dev->devlink_port); 1054 1055 return size; 1056 } 1057 1058 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1059 u32 ext_filter_mask) 1060 { 1061 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1062 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1063 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1064 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1065 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1066 + nla_total_size(sizeof(struct rtnl_link_stats)) 1067 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1068 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1069 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1070 + nla_total_size(4) /* IFLA_TXQLEN */ 1071 + nla_total_size(4) /* IFLA_WEIGHT */ 1072 + nla_total_size(4) /* IFLA_MTU */ 1073 + nla_total_size(4) /* IFLA_LINK */ 1074 + nla_total_size(4) /* IFLA_MASTER */ 1075 + nla_total_size(1) /* IFLA_CARRIER */ 1076 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1077 + nla_total_size(4) /* IFLA_ALLMULTI */ 1078 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1079 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1080 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1081 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1082 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1083 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */ 1084 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */ 1085 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1086 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1087 + nla_total_size(1) /* IFLA_OPERSTATE */ 1088 + nla_total_size(1) /* IFLA_LINKMODE */ 1089 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1090 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1091 + nla_total_size(4) /* IFLA_GROUP */ 1092 + nla_total_size(ext_filter_mask 1093 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1094 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1095 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1096 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1097 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1098 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1099 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1100 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1101 + rtnl_xdp_size() /* IFLA_XDP */ 1102 + nla_total_size(4) /* IFLA_EVENT */ 1103 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1104 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1105 + rtnl_proto_down_size(dev) /* proto down */ 1106 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1107 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1108 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1109 + nla_total_size(4) /* IFLA_MIN_MTU */ 1110 + nla_total_size(4) /* IFLA_MAX_MTU */ 1111 + rtnl_prop_list_size(dev) 1112 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1113 + rtnl_devlink_port_size(dev) 1114 + 0; 1115 } 1116 1117 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1118 { 1119 struct nlattr *vf_ports; 1120 struct nlattr *vf_port; 1121 int vf; 1122 int err; 1123 1124 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1125 if (!vf_ports) 1126 return -EMSGSIZE; 1127 1128 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1129 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1130 if (!vf_port) 1131 goto nla_put_failure; 1132 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1133 goto nla_put_failure; 1134 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1135 if (err == -EMSGSIZE) 1136 goto nla_put_failure; 1137 if (err) { 1138 nla_nest_cancel(skb, vf_port); 1139 continue; 1140 } 1141 nla_nest_end(skb, vf_port); 1142 } 1143 1144 nla_nest_end(skb, vf_ports); 1145 1146 return 0; 1147 1148 nla_put_failure: 1149 nla_nest_cancel(skb, vf_ports); 1150 return -EMSGSIZE; 1151 } 1152 1153 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1154 { 1155 struct nlattr *port_self; 1156 int err; 1157 1158 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1159 if (!port_self) 1160 return -EMSGSIZE; 1161 1162 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1163 if (err) { 1164 nla_nest_cancel(skb, port_self); 1165 return (err == -EMSGSIZE) ? err : 0; 1166 } 1167 1168 nla_nest_end(skb, port_self); 1169 1170 return 0; 1171 } 1172 1173 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1174 u32 ext_filter_mask) 1175 { 1176 int err; 1177 1178 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1179 !(ext_filter_mask & RTEXT_FILTER_VF)) 1180 return 0; 1181 1182 err = rtnl_port_self_fill(skb, dev); 1183 if (err) 1184 return err; 1185 1186 if (dev_num_vf(dev->dev.parent)) { 1187 err = rtnl_vf_ports_fill(skb, dev); 1188 if (err) 1189 return err; 1190 } 1191 1192 return 0; 1193 } 1194 1195 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1196 { 1197 int err; 1198 struct netdev_phys_item_id ppid; 1199 1200 err = dev_get_phys_port_id(dev, &ppid); 1201 if (err) { 1202 if (err == -EOPNOTSUPP) 1203 return 0; 1204 return err; 1205 } 1206 1207 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1208 return -EMSGSIZE; 1209 1210 return 0; 1211 } 1212 1213 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1214 { 1215 char name[IFNAMSIZ]; 1216 int err; 1217 1218 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1219 if (err) { 1220 if (err == -EOPNOTSUPP) 1221 return 0; 1222 return err; 1223 } 1224 1225 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1226 return -EMSGSIZE; 1227 1228 return 0; 1229 } 1230 1231 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1232 { 1233 struct netdev_phys_item_id ppid = { }; 1234 int err; 1235 1236 err = dev_get_port_parent_id(dev, &ppid, false); 1237 if (err) { 1238 if (err == -EOPNOTSUPP) 1239 return 0; 1240 return err; 1241 } 1242 1243 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1244 return -EMSGSIZE; 1245 1246 return 0; 1247 } 1248 1249 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1250 struct net_device *dev) 1251 { 1252 struct rtnl_link_stats64 *sp; 1253 struct nlattr *attr; 1254 1255 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1256 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1257 if (!attr) 1258 return -EMSGSIZE; 1259 1260 sp = nla_data(attr); 1261 dev_get_stats(dev, sp); 1262 1263 attr = nla_reserve(skb, IFLA_STATS, 1264 sizeof(struct rtnl_link_stats)); 1265 if (!attr) 1266 return -EMSGSIZE; 1267 1268 copy_rtnl_link_stats(nla_data(attr), sp); 1269 1270 return 0; 1271 } 1272 1273 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1274 struct net_device *dev, 1275 int vfs_num, 1276 struct nlattr *vfinfo, 1277 u32 ext_filter_mask) 1278 { 1279 struct ifla_vf_rss_query_en vf_rss_query_en; 1280 struct nlattr *vf, *vfstats, *vfvlanlist; 1281 struct ifla_vf_link_state vf_linkstate; 1282 struct ifla_vf_vlan_info vf_vlan_info; 1283 struct ifla_vf_spoofchk vf_spoofchk; 1284 struct ifla_vf_tx_rate vf_tx_rate; 1285 struct ifla_vf_stats vf_stats; 1286 struct ifla_vf_trust vf_trust; 1287 struct ifla_vf_vlan vf_vlan; 1288 struct ifla_vf_rate vf_rate; 1289 struct ifla_vf_mac vf_mac; 1290 struct ifla_vf_broadcast vf_broadcast; 1291 struct ifla_vf_info ivi; 1292 struct ifla_vf_guid node_guid; 1293 struct ifla_vf_guid port_guid; 1294 1295 memset(&ivi, 0, sizeof(ivi)); 1296 1297 /* Not all SR-IOV capable drivers support the 1298 * spoofcheck and "RSS query enable" query. Preset to 1299 * -1 so the user space tool can detect that the driver 1300 * didn't report anything. 1301 */ 1302 ivi.spoofchk = -1; 1303 ivi.rss_query_en = -1; 1304 ivi.trusted = -1; 1305 /* The default value for VF link state is "auto" 1306 * IFLA_VF_LINK_STATE_AUTO which equals zero 1307 */ 1308 ivi.linkstate = 0; 1309 /* VLAN Protocol by default is 802.1Q */ 1310 ivi.vlan_proto = htons(ETH_P_8021Q); 1311 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1312 return 0; 1313 1314 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1315 memset(&node_guid, 0, sizeof(node_guid)); 1316 memset(&port_guid, 0, sizeof(port_guid)); 1317 1318 vf_mac.vf = 1319 vf_vlan.vf = 1320 vf_vlan_info.vf = 1321 vf_rate.vf = 1322 vf_tx_rate.vf = 1323 vf_spoofchk.vf = 1324 vf_linkstate.vf = 1325 vf_rss_query_en.vf = 1326 vf_trust.vf = 1327 node_guid.vf = 1328 port_guid.vf = ivi.vf; 1329 1330 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1331 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1332 vf_vlan.vlan = ivi.vlan; 1333 vf_vlan.qos = ivi.qos; 1334 vf_vlan_info.vlan = ivi.vlan; 1335 vf_vlan_info.qos = ivi.qos; 1336 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1337 vf_tx_rate.rate = ivi.max_tx_rate; 1338 vf_rate.min_tx_rate = ivi.min_tx_rate; 1339 vf_rate.max_tx_rate = ivi.max_tx_rate; 1340 vf_spoofchk.setting = ivi.spoofchk; 1341 vf_linkstate.link_state = ivi.linkstate; 1342 vf_rss_query_en.setting = ivi.rss_query_en; 1343 vf_trust.setting = ivi.trusted; 1344 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1345 if (!vf) 1346 goto nla_put_vfinfo_failure; 1347 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1348 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1349 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1350 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1351 &vf_rate) || 1352 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1353 &vf_tx_rate) || 1354 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1355 &vf_spoofchk) || 1356 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1357 &vf_linkstate) || 1358 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1359 sizeof(vf_rss_query_en), 1360 &vf_rss_query_en) || 1361 nla_put(skb, IFLA_VF_TRUST, 1362 sizeof(vf_trust), &vf_trust)) 1363 goto nla_put_vf_failure; 1364 1365 if (dev->netdev_ops->ndo_get_vf_guid && 1366 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1367 &port_guid)) { 1368 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1369 &node_guid) || 1370 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1371 &port_guid)) 1372 goto nla_put_vf_failure; 1373 } 1374 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1375 if (!vfvlanlist) 1376 goto nla_put_vf_failure; 1377 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1378 &vf_vlan_info)) { 1379 nla_nest_cancel(skb, vfvlanlist); 1380 goto nla_put_vf_failure; 1381 } 1382 nla_nest_end(skb, vfvlanlist); 1383 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 1384 memset(&vf_stats, 0, sizeof(vf_stats)); 1385 if (dev->netdev_ops->ndo_get_vf_stats) 1386 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1387 &vf_stats); 1388 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1389 if (!vfstats) 1390 goto nla_put_vf_failure; 1391 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1392 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1393 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1394 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1395 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1396 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1397 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1398 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1399 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1400 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1401 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1402 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1403 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1404 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1405 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1406 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1407 nla_nest_cancel(skb, vfstats); 1408 goto nla_put_vf_failure; 1409 } 1410 nla_nest_end(skb, vfstats); 1411 } 1412 nla_nest_end(skb, vf); 1413 return 0; 1414 1415 nla_put_vf_failure: 1416 nla_nest_cancel(skb, vf); 1417 nla_put_vfinfo_failure: 1418 nla_nest_cancel(skb, vfinfo); 1419 return -EMSGSIZE; 1420 } 1421 1422 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1423 struct net_device *dev, 1424 u32 ext_filter_mask) 1425 { 1426 struct nlattr *vfinfo; 1427 int i, num_vfs; 1428 1429 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1430 return 0; 1431 1432 num_vfs = dev_num_vf(dev->dev.parent); 1433 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1434 return -EMSGSIZE; 1435 1436 if (!dev->netdev_ops->ndo_get_vf_config) 1437 return 0; 1438 1439 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1440 if (!vfinfo) 1441 return -EMSGSIZE; 1442 1443 for (i = 0; i < num_vfs; i++) { 1444 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask)) 1445 return -EMSGSIZE; 1446 } 1447 1448 nla_nest_end(skb, vfinfo); 1449 return 0; 1450 } 1451 1452 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1453 { 1454 struct rtnl_link_ifmap map; 1455 1456 memset(&map, 0, sizeof(map)); 1457 map.mem_start = dev->mem_start; 1458 map.mem_end = dev->mem_end; 1459 map.base_addr = dev->base_addr; 1460 map.irq = dev->irq; 1461 map.dma = dev->dma; 1462 map.port = dev->if_port; 1463 1464 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1465 return -EMSGSIZE; 1466 1467 return 0; 1468 } 1469 1470 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1471 { 1472 const struct bpf_prog *generic_xdp_prog; 1473 1474 ASSERT_RTNL(); 1475 1476 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1477 if (!generic_xdp_prog) 1478 return 0; 1479 return generic_xdp_prog->aux->id; 1480 } 1481 1482 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1483 { 1484 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1485 } 1486 1487 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1488 { 1489 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1490 } 1491 1492 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1493 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1494 u32 (*get_prog_id)(struct net_device *dev)) 1495 { 1496 u32 curr_id; 1497 int err; 1498 1499 curr_id = get_prog_id(dev); 1500 if (!curr_id) 1501 return 0; 1502 1503 *prog_id = curr_id; 1504 err = nla_put_u32(skb, attr, curr_id); 1505 if (err) 1506 return err; 1507 1508 if (*mode != XDP_ATTACHED_NONE) 1509 *mode = XDP_ATTACHED_MULTI; 1510 else 1511 *mode = tgt_mode; 1512 1513 return 0; 1514 } 1515 1516 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1517 { 1518 struct nlattr *xdp; 1519 u32 prog_id; 1520 int err; 1521 u8 mode; 1522 1523 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1524 if (!xdp) 1525 return -EMSGSIZE; 1526 1527 prog_id = 0; 1528 mode = XDP_ATTACHED_NONE; 1529 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1530 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1531 if (err) 1532 goto err_cancel; 1533 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1534 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1535 if (err) 1536 goto err_cancel; 1537 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1538 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1539 if (err) 1540 goto err_cancel; 1541 1542 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1543 if (err) 1544 goto err_cancel; 1545 1546 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1547 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1548 if (err) 1549 goto err_cancel; 1550 } 1551 1552 nla_nest_end(skb, xdp); 1553 return 0; 1554 1555 err_cancel: 1556 nla_nest_cancel(skb, xdp); 1557 return err; 1558 } 1559 1560 static u32 rtnl_get_event(unsigned long event) 1561 { 1562 u32 rtnl_event_type = IFLA_EVENT_NONE; 1563 1564 switch (event) { 1565 case NETDEV_REBOOT: 1566 rtnl_event_type = IFLA_EVENT_REBOOT; 1567 break; 1568 case NETDEV_FEAT_CHANGE: 1569 rtnl_event_type = IFLA_EVENT_FEATURES; 1570 break; 1571 case NETDEV_BONDING_FAILOVER: 1572 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1573 break; 1574 case NETDEV_NOTIFY_PEERS: 1575 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1576 break; 1577 case NETDEV_RESEND_IGMP: 1578 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1579 break; 1580 case NETDEV_CHANGEINFODATA: 1581 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1582 break; 1583 default: 1584 break; 1585 } 1586 1587 return rtnl_event_type; 1588 } 1589 1590 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1591 { 1592 const struct net_device *upper_dev; 1593 int ret = 0; 1594 1595 rcu_read_lock(); 1596 1597 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1598 if (upper_dev) 1599 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1600 1601 rcu_read_unlock(); 1602 return ret; 1603 } 1604 1605 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1606 bool force) 1607 { 1608 int ifindex = dev_get_iflink(dev); 1609 1610 if (force || dev->ifindex != ifindex) 1611 return nla_put_u32(skb, IFLA_LINK, ifindex); 1612 1613 return 0; 1614 } 1615 1616 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1617 struct net_device *dev) 1618 { 1619 char buf[IFALIASZ]; 1620 int ret; 1621 1622 ret = dev_get_alias(dev, buf, sizeof(buf)); 1623 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1624 } 1625 1626 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1627 const struct net_device *dev, 1628 struct net *src_net, gfp_t gfp) 1629 { 1630 bool put_iflink = false; 1631 1632 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1633 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1634 1635 if (!net_eq(dev_net(dev), link_net)) { 1636 int id = peernet2id_alloc(src_net, link_net, gfp); 1637 1638 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1639 return -EMSGSIZE; 1640 1641 put_iflink = true; 1642 } 1643 } 1644 1645 return nla_put_iflink(skb, dev, put_iflink); 1646 } 1647 1648 static int rtnl_fill_link_af(struct sk_buff *skb, 1649 const struct net_device *dev, 1650 u32 ext_filter_mask) 1651 { 1652 const struct rtnl_af_ops *af_ops; 1653 struct nlattr *af_spec; 1654 1655 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1656 if (!af_spec) 1657 return -EMSGSIZE; 1658 1659 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1660 struct nlattr *af; 1661 int err; 1662 1663 if (!af_ops->fill_link_af) 1664 continue; 1665 1666 af = nla_nest_start_noflag(skb, af_ops->family); 1667 if (!af) 1668 return -EMSGSIZE; 1669 1670 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1671 /* 1672 * Caller may return ENODATA to indicate that there 1673 * was no data to be dumped. This is not an error, it 1674 * means we should trim the attribute header and 1675 * continue. 1676 */ 1677 if (err == -ENODATA) 1678 nla_nest_cancel(skb, af); 1679 else if (err < 0) 1680 return -EMSGSIZE; 1681 1682 nla_nest_end(skb, af); 1683 } 1684 1685 nla_nest_end(skb, af_spec); 1686 return 0; 1687 } 1688 1689 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1690 const struct net_device *dev) 1691 { 1692 struct netdev_name_node *name_node; 1693 int count = 0; 1694 1695 list_for_each_entry(name_node, &dev->name_node->list, list) { 1696 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1697 return -EMSGSIZE; 1698 count++; 1699 } 1700 return count; 1701 } 1702 1703 static int rtnl_fill_prop_list(struct sk_buff *skb, 1704 const struct net_device *dev) 1705 { 1706 struct nlattr *prop_list; 1707 int ret; 1708 1709 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1710 if (!prop_list) 1711 return -EMSGSIZE; 1712 1713 ret = rtnl_fill_alt_ifnames(skb, dev); 1714 if (ret <= 0) 1715 goto nest_cancel; 1716 1717 nla_nest_end(skb, prop_list); 1718 return 0; 1719 1720 nest_cancel: 1721 nla_nest_cancel(skb, prop_list); 1722 return ret; 1723 } 1724 1725 static int rtnl_fill_proto_down(struct sk_buff *skb, 1726 const struct net_device *dev) 1727 { 1728 struct nlattr *pr; 1729 u32 preason; 1730 1731 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1732 goto nla_put_failure; 1733 1734 preason = dev->proto_down_reason; 1735 if (!preason) 1736 return 0; 1737 1738 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1739 if (!pr) 1740 return -EMSGSIZE; 1741 1742 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1743 nla_nest_cancel(skb, pr); 1744 goto nla_put_failure; 1745 } 1746 1747 nla_nest_end(skb, pr); 1748 return 0; 1749 1750 nla_put_failure: 1751 return -EMSGSIZE; 1752 } 1753 1754 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1755 const struct net_device *dev) 1756 { 1757 struct nlattr *devlink_port_nest; 1758 int ret; 1759 1760 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1761 if (!devlink_port_nest) 1762 return -EMSGSIZE; 1763 1764 if (dev->devlink_port) { 1765 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1766 if (ret < 0) 1767 goto nest_cancel; 1768 } 1769 1770 nla_nest_end(skb, devlink_port_nest); 1771 return 0; 1772 1773 nest_cancel: 1774 nla_nest_cancel(skb, devlink_port_nest); 1775 return ret; 1776 } 1777 1778 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1779 struct net_device *dev, struct net *src_net, 1780 int type, u32 pid, u32 seq, u32 change, 1781 unsigned int flags, u32 ext_filter_mask, 1782 u32 event, int *new_nsid, int new_ifindex, 1783 int tgt_netnsid, gfp_t gfp) 1784 { 1785 struct ifinfomsg *ifm; 1786 struct nlmsghdr *nlh; 1787 struct Qdisc *qdisc; 1788 1789 ASSERT_RTNL(); 1790 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1791 if (nlh == NULL) 1792 return -EMSGSIZE; 1793 1794 ifm = nlmsg_data(nlh); 1795 ifm->ifi_family = AF_UNSPEC; 1796 ifm->__ifi_pad = 0; 1797 ifm->ifi_type = dev->type; 1798 ifm->ifi_index = dev->ifindex; 1799 ifm->ifi_flags = dev_get_flags(dev); 1800 ifm->ifi_change = change; 1801 1802 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1803 goto nla_put_failure; 1804 1805 qdisc = rtnl_dereference(dev->qdisc); 1806 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1807 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1808 nla_put_u8(skb, IFLA_OPERSTATE, 1809 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1810 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1811 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1812 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1813 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1814 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1815 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1816 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) || 1817 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1818 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1819 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1820 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || 1821 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) || 1822 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) || 1823 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) || 1824 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) || 1825 #ifdef CONFIG_RPS 1826 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1827 #endif 1828 put_master_ifindex(skb, dev) || 1829 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1830 (qdisc && 1831 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || 1832 nla_put_ifalias(skb, dev) || 1833 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1834 atomic_read(&dev->carrier_up_count) + 1835 atomic_read(&dev->carrier_down_count)) || 1836 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1837 atomic_read(&dev->carrier_up_count)) || 1838 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1839 atomic_read(&dev->carrier_down_count))) 1840 goto nla_put_failure; 1841 1842 if (rtnl_fill_proto_down(skb, dev)) 1843 goto nla_put_failure; 1844 1845 if (event != IFLA_EVENT_NONE) { 1846 if (nla_put_u32(skb, IFLA_EVENT, event)) 1847 goto nla_put_failure; 1848 } 1849 1850 if (rtnl_fill_link_ifmap(skb, dev)) 1851 goto nla_put_failure; 1852 1853 if (dev->addr_len) { 1854 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1855 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1856 goto nla_put_failure; 1857 } 1858 1859 if (rtnl_phys_port_id_fill(skb, dev)) 1860 goto nla_put_failure; 1861 1862 if (rtnl_phys_port_name_fill(skb, dev)) 1863 goto nla_put_failure; 1864 1865 if (rtnl_phys_switch_id_fill(skb, dev)) 1866 goto nla_put_failure; 1867 1868 if (rtnl_fill_stats(skb, dev)) 1869 goto nla_put_failure; 1870 1871 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1872 goto nla_put_failure; 1873 1874 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1875 goto nla_put_failure; 1876 1877 if (rtnl_xdp_fill(skb, dev)) 1878 goto nla_put_failure; 1879 1880 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1881 if (rtnl_link_fill(skb, dev) < 0) 1882 goto nla_put_failure; 1883 } 1884 1885 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1886 goto nla_put_failure; 1887 1888 if (new_nsid && 1889 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1890 goto nla_put_failure; 1891 if (new_ifindex && 1892 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1893 goto nla_put_failure; 1894 1895 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1896 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1897 goto nla_put_failure; 1898 1899 rcu_read_lock(); 1900 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1901 goto nla_put_failure_rcu; 1902 rcu_read_unlock(); 1903 1904 if (rtnl_fill_prop_list(skb, dev)) 1905 goto nla_put_failure; 1906 1907 if (dev->dev.parent && 1908 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1909 dev_name(dev->dev.parent))) 1910 goto nla_put_failure; 1911 1912 if (dev->dev.parent && dev->dev.parent->bus && 1913 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1914 dev->dev.parent->bus->name)) 1915 goto nla_put_failure; 1916 1917 if (rtnl_fill_devlink_port(skb, dev)) 1918 goto nla_put_failure; 1919 1920 nlmsg_end(skb, nlh); 1921 return 0; 1922 1923 nla_put_failure_rcu: 1924 rcu_read_unlock(); 1925 nla_put_failure: 1926 nlmsg_cancel(skb, nlh); 1927 return -EMSGSIZE; 1928 } 1929 1930 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1931 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1932 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1933 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1934 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1935 [IFLA_MTU] = { .type = NLA_U32 }, 1936 [IFLA_LINK] = { .type = NLA_U32 }, 1937 [IFLA_MASTER] = { .type = NLA_U32 }, 1938 [IFLA_CARRIER] = { .type = NLA_U8 }, 1939 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1940 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1941 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1942 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1943 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1944 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1945 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1946 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1947 * allow 0-length string (needed to remove an alias). 1948 */ 1949 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1950 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1951 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1952 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1953 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1954 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1955 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1956 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1957 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1958 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1959 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1960 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1961 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1962 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1963 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1964 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1965 [IFLA_XDP] = { .type = NLA_NESTED }, 1966 [IFLA_EVENT] = { .type = NLA_U32 }, 1967 [IFLA_GROUP] = { .type = NLA_U32 }, 1968 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1969 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1970 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1971 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 1972 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 1973 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 1974 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 1975 .len = ALTIFNAMSIZ - 1 }, 1976 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 1977 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 1978 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 1979 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 1980 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 1981 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 1982 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 1983 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 1984 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 1985 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 1986 }; 1987 1988 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1989 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 1990 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 1991 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 1992 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1993 }; 1994 1995 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1996 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1997 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 1998 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1999 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 2000 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 2001 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 2002 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 2003 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 2004 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 2005 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 2006 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 2007 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2008 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2009 }; 2010 2011 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 2012 [IFLA_PORT_VF] = { .type = NLA_U32 }, 2013 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 2014 .len = PORT_PROFILE_MAX }, 2015 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2016 .len = PORT_UUID_MAX }, 2017 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2018 .len = PORT_UUID_MAX }, 2019 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2020 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2021 2022 /* Unused, but we need to keep it here since user space could 2023 * fill it. It's also broken with regard to NLA_BINARY use in 2024 * combination with structs. 2025 */ 2026 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2027 .len = sizeof(struct ifla_port_vsi) }, 2028 }; 2029 2030 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2031 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2032 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2033 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2034 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2035 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2036 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2037 }; 2038 2039 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2040 { 2041 const struct rtnl_link_ops *ops = NULL; 2042 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2043 2044 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2045 return NULL; 2046 2047 if (linfo[IFLA_INFO_KIND]) { 2048 char kind[MODULE_NAME_LEN]; 2049 2050 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2051 ops = rtnl_link_ops_get(kind); 2052 } 2053 2054 return ops; 2055 } 2056 2057 static bool link_master_filtered(struct net_device *dev, int master_idx) 2058 { 2059 struct net_device *master; 2060 2061 if (!master_idx) 2062 return false; 2063 2064 master = netdev_master_upper_dev_get(dev); 2065 2066 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2067 * another invalid value for ifindex to denote "no master". 2068 */ 2069 if (master_idx == -1) 2070 return !!master; 2071 2072 if (!master || master->ifindex != master_idx) 2073 return true; 2074 2075 return false; 2076 } 2077 2078 static bool link_kind_filtered(const struct net_device *dev, 2079 const struct rtnl_link_ops *kind_ops) 2080 { 2081 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2082 return true; 2083 2084 return false; 2085 } 2086 2087 static bool link_dump_filtered(struct net_device *dev, 2088 int master_idx, 2089 const struct rtnl_link_ops *kind_ops) 2090 { 2091 if (link_master_filtered(dev, master_idx) || 2092 link_kind_filtered(dev, kind_ops)) 2093 return true; 2094 2095 return false; 2096 } 2097 2098 /** 2099 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2100 * @sk: netlink socket 2101 * @netnsid: network namespace identifier 2102 * 2103 * Returns the network namespace identified by netnsid on success or an error 2104 * pointer on failure. 2105 */ 2106 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2107 { 2108 struct net *net; 2109 2110 net = get_net_ns_by_id(sock_net(sk), netnsid); 2111 if (!net) 2112 return ERR_PTR(-EINVAL); 2113 2114 /* For now, the caller is required to have CAP_NET_ADMIN in 2115 * the user namespace owning the target net ns. 2116 */ 2117 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2118 put_net(net); 2119 return ERR_PTR(-EACCES); 2120 } 2121 return net; 2122 } 2123 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2124 2125 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2126 bool strict_check, struct nlattr **tb, 2127 struct netlink_ext_ack *extack) 2128 { 2129 int hdrlen; 2130 2131 if (strict_check) { 2132 struct ifinfomsg *ifm; 2133 2134 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2135 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2136 return -EINVAL; 2137 } 2138 2139 ifm = nlmsg_data(nlh); 2140 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2141 ifm->ifi_change) { 2142 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2143 return -EINVAL; 2144 } 2145 if (ifm->ifi_index) { 2146 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2147 return -EINVAL; 2148 } 2149 2150 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2151 IFLA_MAX, ifla_policy, 2152 extack); 2153 } 2154 2155 /* A hack to preserve kernel<->userspace interface. 2156 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2157 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2158 * what iproute2 < v3.9.0 used. 2159 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2160 * attribute, its netlink message is shorter than struct ifinfomsg. 2161 */ 2162 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2163 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2164 2165 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2166 extack); 2167 } 2168 2169 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2170 { 2171 struct netlink_ext_ack *extack = cb->extack; 2172 const struct nlmsghdr *nlh = cb->nlh; 2173 struct net *net = sock_net(skb->sk); 2174 struct net *tgt_net = net; 2175 int h, s_h; 2176 int idx = 0, s_idx; 2177 struct net_device *dev; 2178 struct hlist_head *head; 2179 struct nlattr *tb[IFLA_MAX+1]; 2180 u32 ext_filter_mask = 0; 2181 const struct rtnl_link_ops *kind_ops = NULL; 2182 unsigned int flags = NLM_F_MULTI; 2183 int master_idx = 0; 2184 int netnsid = -1; 2185 int err, i; 2186 2187 s_h = cb->args[0]; 2188 s_idx = cb->args[1]; 2189 2190 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2191 if (err < 0) { 2192 if (cb->strict_check) 2193 return err; 2194 2195 goto walk_entries; 2196 } 2197 2198 for (i = 0; i <= IFLA_MAX; ++i) { 2199 if (!tb[i]) 2200 continue; 2201 2202 /* new attributes should only be added with strict checking */ 2203 switch (i) { 2204 case IFLA_TARGET_NETNSID: 2205 netnsid = nla_get_s32(tb[i]); 2206 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2207 if (IS_ERR(tgt_net)) { 2208 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2209 return PTR_ERR(tgt_net); 2210 } 2211 break; 2212 case IFLA_EXT_MASK: 2213 ext_filter_mask = nla_get_u32(tb[i]); 2214 break; 2215 case IFLA_MASTER: 2216 master_idx = nla_get_u32(tb[i]); 2217 break; 2218 case IFLA_LINKINFO: 2219 kind_ops = linkinfo_to_kind_ops(tb[i]); 2220 break; 2221 default: 2222 if (cb->strict_check) { 2223 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2224 return -EINVAL; 2225 } 2226 } 2227 } 2228 2229 if (master_idx || kind_ops) 2230 flags |= NLM_F_DUMP_FILTERED; 2231 2232 walk_entries: 2233 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 2234 idx = 0; 2235 head = &tgt_net->dev_index_head[h]; 2236 hlist_for_each_entry(dev, head, index_hlist) { 2237 if (link_dump_filtered(dev, master_idx, kind_ops)) 2238 goto cont; 2239 if (idx < s_idx) 2240 goto cont; 2241 err = rtnl_fill_ifinfo(skb, dev, net, 2242 RTM_NEWLINK, 2243 NETLINK_CB(cb->skb).portid, 2244 nlh->nlmsg_seq, 0, flags, 2245 ext_filter_mask, 0, NULL, 0, 2246 netnsid, GFP_KERNEL); 2247 2248 if (err < 0) { 2249 if (likely(skb->len)) 2250 goto out; 2251 2252 goto out_err; 2253 } 2254 cont: 2255 idx++; 2256 } 2257 } 2258 out: 2259 err = skb->len; 2260 out_err: 2261 cb->args[1] = idx; 2262 cb->args[0] = h; 2263 cb->seq = tgt_net->dev_base_seq; 2264 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2265 if (netnsid >= 0) 2266 put_net(tgt_net); 2267 2268 return err; 2269 } 2270 2271 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 2272 struct netlink_ext_ack *exterr) 2273 { 2274 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, 2275 exterr); 2276 } 2277 EXPORT_SYMBOL(rtnl_nla_parse_ifla); 2278 2279 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2280 { 2281 struct net *net; 2282 /* Examine the link attributes and figure out which 2283 * network namespace we are talking about. 2284 */ 2285 if (tb[IFLA_NET_NS_PID]) 2286 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2287 else if (tb[IFLA_NET_NS_FD]) 2288 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2289 else 2290 net = get_net(src_net); 2291 return net; 2292 } 2293 EXPORT_SYMBOL(rtnl_link_get_net); 2294 2295 /* Figure out which network namespace we are talking about by 2296 * examining the link attributes in the following order: 2297 * 2298 * 1. IFLA_NET_NS_PID 2299 * 2. IFLA_NET_NS_FD 2300 * 3. IFLA_TARGET_NETNSID 2301 */ 2302 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2303 struct nlattr *tb[]) 2304 { 2305 struct net *net; 2306 2307 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2308 return rtnl_link_get_net(src_net, tb); 2309 2310 if (!tb[IFLA_TARGET_NETNSID]) 2311 return get_net(src_net); 2312 2313 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2314 if (!net) 2315 return ERR_PTR(-EINVAL); 2316 2317 return net; 2318 } 2319 2320 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2321 struct net *src_net, 2322 struct nlattr *tb[], int cap) 2323 { 2324 struct net *net; 2325 2326 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2327 if (IS_ERR(net)) 2328 return net; 2329 2330 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2331 put_net(net); 2332 return ERR_PTR(-EPERM); 2333 } 2334 2335 return net; 2336 } 2337 2338 /* Verify that rtnetlink requests do not pass additional properties 2339 * potentially referring to different network namespaces. 2340 */ 2341 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2342 struct netlink_ext_ack *extack, 2343 bool netns_id_only) 2344 { 2345 2346 if (netns_id_only) { 2347 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2348 return 0; 2349 2350 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2351 return -EOPNOTSUPP; 2352 } 2353 2354 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2355 goto invalid_attr; 2356 2357 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2358 goto invalid_attr; 2359 2360 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2361 goto invalid_attr; 2362 2363 return 0; 2364 2365 invalid_attr: 2366 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2367 return -EINVAL; 2368 } 2369 2370 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2371 int max_tx_rate) 2372 { 2373 const struct net_device_ops *ops = dev->netdev_ops; 2374 2375 if (!ops->ndo_set_vf_rate) 2376 return -EOPNOTSUPP; 2377 if (max_tx_rate && max_tx_rate < min_tx_rate) 2378 return -EINVAL; 2379 2380 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2381 } 2382 2383 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2384 struct netlink_ext_ack *extack) 2385 { 2386 if (tb[IFLA_ADDRESS] && 2387 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2388 return -EINVAL; 2389 2390 if (tb[IFLA_BROADCAST] && 2391 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2392 return -EINVAL; 2393 2394 if (tb[IFLA_GSO_MAX_SIZE] && 2395 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { 2396 NL_SET_ERR_MSG(extack, "too big gso_max_size"); 2397 return -EINVAL; 2398 } 2399 2400 if (tb[IFLA_GSO_MAX_SEGS] && 2401 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || 2402 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { 2403 NL_SET_ERR_MSG(extack, "too big gso_max_segs"); 2404 return -EINVAL; 2405 } 2406 2407 if (tb[IFLA_GRO_MAX_SIZE] && 2408 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { 2409 NL_SET_ERR_MSG(extack, "too big gro_max_size"); 2410 return -EINVAL; 2411 } 2412 2413 if (tb[IFLA_GSO_IPV4_MAX_SIZE] && 2414 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { 2415 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); 2416 return -EINVAL; 2417 } 2418 2419 if (tb[IFLA_GRO_IPV4_MAX_SIZE] && 2420 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { 2421 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); 2422 return -EINVAL; 2423 } 2424 2425 if (tb[IFLA_AF_SPEC]) { 2426 struct nlattr *af; 2427 int rem, err; 2428 2429 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2430 const struct rtnl_af_ops *af_ops; 2431 2432 af_ops = rtnl_af_lookup(nla_type(af)); 2433 if (!af_ops) 2434 return -EAFNOSUPPORT; 2435 2436 if (!af_ops->set_link_af) 2437 return -EOPNOTSUPP; 2438 2439 if (af_ops->validate_link_af) { 2440 err = af_ops->validate_link_af(dev, af, extack); 2441 if (err < 0) 2442 return err; 2443 } 2444 } 2445 } 2446 2447 return 0; 2448 } 2449 2450 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2451 int guid_type) 2452 { 2453 const struct net_device_ops *ops = dev->netdev_ops; 2454 2455 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2456 } 2457 2458 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2459 { 2460 if (dev->type != ARPHRD_INFINIBAND) 2461 return -EOPNOTSUPP; 2462 2463 return handle_infiniband_guid(dev, ivt, guid_type); 2464 } 2465 2466 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2467 { 2468 const struct net_device_ops *ops = dev->netdev_ops; 2469 int err = -EINVAL; 2470 2471 if (tb[IFLA_VF_MAC]) { 2472 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2473 2474 if (ivm->vf >= INT_MAX) 2475 return -EINVAL; 2476 err = -EOPNOTSUPP; 2477 if (ops->ndo_set_vf_mac) 2478 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2479 ivm->mac); 2480 if (err < 0) 2481 return err; 2482 } 2483 2484 if (tb[IFLA_VF_VLAN]) { 2485 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2486 2487 if (ivv->vf >= INT_MAX) 2488 return -EINVAL; 2489 err = -EOPNOTSUPP; 2490 if (ops->ndo_set_vf_vlan) 2491 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2492 ivv->qos, 2493 htons(ETH_P_8021Q)); 2494 if (err < 0) 2495 return err; 2496 } 2497 2498 if (tb[IFLA_VF_VLAN_LIST]) { 2499 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2500 struct nlattr *attr; 2501 int rem, len = 0; 2502 2503 err = -EOPNOTSUPP; 2504 if (!ops->ndo_set_vf_vlan) 2505 return err; 2506 2507 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2508 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2509 nla_len(attr) < NLA_HDRLEN) { 2510 return -EINVAL; 2511 } 2512 if (len >= MAX_VLAN_LIST_LEN) 2513 return -EOPNOTSUPP; 2514 ivvl[len] = nla_data(attr); 2515 2516 len++; 2517 } 2518 if (len == 0) 2519 return -EINVAL; 2520 2521 if (ivvl[0]->vf >= INT_MAX) 2522 return -EINVAL; 2523 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2524 ivvl[0]->qos, ivvl[0]->vlan_proto); 2525 if (err < 0) 2526 return err; 2527 } 2528 2529 if (tb[IFLA_VF_TX_RATE]) { 2530 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2531 struct ifla_vf_info ivf; 2532 2533 if (ivt->vf >= INT_MAX) 2534 return -EINVAL; 2535 err = -EOPNOTSUPP; 2536 if (ops->ndo_get_vf_config) 2537 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2538 if (err < 0) 2539 return err; 2540 2541 err = rtnl_set_vf_rate(dev, ivt->vf, 2542 ivf.min_tx_rate, ivt->rate); 2543 if (err < 0) 2544 return err; 2545 } 2546 2547 if (tb[IFLA_VF_RATE]) { 2548 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2549 2550 if (ivt->vf >= INT_MAX) 2551 return -EINVAL; 2552 2553 err = rtnl_set_vf_rate(dev, ivt->vf, 2554 ivt->min_tx_rate, ivt->max_tx_rate); 2555 if (err < 0) 2556 return err; 2557 } 2558 2559 if (tb[IFLA_VF_SPOOFCHK]) { 2560 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2561 2562 if (ivs->vf >= INT_MAX) 2563 return -EINVAL; 2564 err = -EOPNOTSUPP; 2565 if (ops->ndo_set_vf_spoofchk) 2566 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2567 ivs->setting); 2568 if (err < 0) 2569 return err; 2570 } 2571 2572 if (tb[IFLA_VF_LINK_STATE]) { 2573 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2574 2575 if (ivl->vf >= INT_MAX) 2576 return -EINVAL; 2577 err = -EOPNOTSUPP; 2578 if (ops->ndo_set_vf_link_state) 2579 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2580 ivl->link_state); 2581 if (err < 0) 2582 return err; 2583 } 2584 2585 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2586 struct ifla_vf_rss_query_en *ivrssq_en; 2587 2588 err = -EOPNOTSUPP; 2589 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2590 if (ivrssq_en->vf >= INT_MAX) 2591 return -EINVAL; 2592 if (ops->ndo_set_vf_rss_query_en) 2593 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2594 ivrssq_en->setting); 2595 if (err < 0) 2596 return err; 2597 } 2598 2599 if (tb[IFLA_VF_TRUST]) { 2600 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2601 2602 if (ivt->vf >= INT_MAX) 2603 return -EINVAL; 2604 err = -EOPNOTSUPP; 2605 if (ops->ndo_set_vf_trust) 2606 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2607 if (err < 0) 2608 return err; 2609 } 2610 2611 if (tb[IFLA_VF_IB_NODE_GUID]) { 2612 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2613 2614 if (ivt->vf >= INT_MAX) 2615 return -EINVAL; 2616 if (!ops->ndo_set_vf_guid) 2617 return -EOPNOTSUPP; 2618 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2619 } 2620 2621 if (tb[IFLA_VF_IB_PORT_GUID]) { 2622 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2623 2624 if (ivt->vf >= INT_MAX) 2625 return -EINVAL; 2626 if (!ops->ndo_set_vf_guid) 2627 return -EOPNOTSUPP; 2628 2629 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2630 } 2631 2632 return err; 2633 } 2634 2635 static int do_set_master(struct net_device *dev, int ifindex, 2636 struct netlink_ext_ack *extack) 2637 { 2638 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2639 const struct net_device_ops *ops; 2640 int err; 2641 2642 if (upper_dev) { 2643 if (upper_dev->ifindex == ifindex) 2644 return 0; 2645 ops = upper_dev->netdev_ops; 2646 if (ops->ndo_del_slave) { 2647 err = ops->ndo_del_slave(upper_dev, dev); 2648 if (err) 2649 return err; 2650 } else { 2651 return -EOPNOTSUPP; 2652 } 2653 } 2654 2655 if (ifindex) { 2656 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2657 if (!upper_dev) 2658 return -EINVAL; 2659 ops = upper_dev->netdev_ops; 2660 if (ops->ndo_add_slave) { 2661 err = ops->ndo_add_slave(upper_dev, dev, extack); 2662 if (err) 2663 return err; 2664 } else { 2665 return -EOPNOTSUPP; 2666 } 2667 } 2668 return 0; 2669 } 2670 2671 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2672 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2673 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2674 }; 2675 2676 static int do_set_proto_down(struct net_device *dev, 2677 struct nlattr *nl_proto_down, 2678 struct nlattr *nl_proto_down_reason, 2679 struct netlink_ext_ack *extack) 2680 { 2681 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2682 unsigned long mask = 0; 2683 u32 value; 2684 bool proto_down; 2685 int err; 2686 2687 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { 2688 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2689 return -EOPNOTSUPP; 2690 } 2691 2692 if (nl_proto_down_reason) { 2693 err = nla_parse_nested_deprecated(pdreason, 2694 IFLA_PROTO_DOWN_REASON_MAX, 2695 nl_proto_down_reason, 2696 ifla_proto_down_reason_policy, 2697 NULL); 2698 if (err < 0) 2699 return err; 2700 2701 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2702 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2703 return -EINVAL; 2704 } 2705 2706 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2707 2708 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2709 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2710 2711 dev_change_proto_down_reason(dev, mask, value); 2712 } 2713 2714 if (nl_proto_down) { 2715 proto_down = nla_get_u8(nl_proto_down); 2716 2717 /* Don't turn off protodown if there are active reasons */ 2718 if (!proto_down && dev->proto_down_reason) { 2719 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2720 return -EBUSY; 2721 } 2722 err = dev_change_proto_down(dev, 2723 proto_down); 2724 if (err) 2725 return err; 2726 } 2727 2728 return 0; 2729 } 2730 2731 #define DO_SETLINK_MODIFIED 0x01 2732 /* notify flag means notify + modified. */ 2733 #define DO_SETLINK_NOTIFY 0x03 2734 static int do_setlink(const struct sk_buff *skb, 2735 struct net_device *dev, struct ifinfomsg *ifm, 2736 struct netlink_ext_ack *extack, 2737 struct nlattr **tb, int status) 2738 { 2739 const struct net_device_ops *ops = dev->netdev_ops; 2740 char ifname[IFNAMSIZ]; 2741 int err; 2742 2743 if (tb[IFLA_IFNAME]) 2744 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2745 else 2746 ifname[0] = '\0'; 2747 2748 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2749 const char *pat = ifname[0] ? ifname : NULL; 2750 struct net *net; 2751 int new_ifindex; 2752 2753 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2754 tb, CAP_NET_ADMIN); 2755 if (IS_ERR(net)) { 2756 err = PTR_ERR(net); 2757 goto errout; 2758 } 2759 2760 if (tb[IFLA_NEW_IFINDEX]) 2761 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2762 else 2763 new_ifindex = 0; 2764 2765 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2766 put_net(net); 2767 if (err) 2768 goto errout; 2769 status |= DO_SETLINK_MODIFIED; 2770 } 2771 2772 if (tb[IFLA_MAP]) { 2773 struct rtnl_link_ifmap *u_map; 2774 struct ifmap k_map; 2775 2776 if (!ops->ndo_set_config) { 2777 err = -EOPNOTSUPP; 2778 goto errout; 2779 } 2780 2781 if (!netif_device_present(dev)) { 2782 err = -ENODEV; 2783 goto errout; 2784 } 2785 2786 u_map = nla_data(tb[IFLA_MAP]); 2787 k_map.mem_start = (unsigned long) u_map->mem_start; 2788 k_map.mem_end = (unsigned long) u_map->mem_end; 2789 k_map.base_addr = (unsigned short) u_map->base_addr; 2790 k_map.irq = (unsigned char) u_map->irq; 2791 k_map.dma = (unsigned char) u_map->dma; 2792 k_map.port = (unsigned char) u_map->port; 2793 2794 err = ops->ndo_set_config(dev, &k_map); 2795 if (err < 0) 2796 goto errout; 2797 2798 status |= DO_SETLINK_NOTIFY; 2799 } 2800 2801 if (tb[IFLA_ADDRESS]) { 2802 struct sockaddr *sa; 2803 int len; 2804 2805 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2806 sizeof(*sa)); 2807 sa = kmalloc(len, GFP_KERNEL); 2808 if (!sa) { 2809 err = -ENOMEM; 2810 goto errout; 2811 } 2812 sa->sa_family = dev->type; 2813 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2814 dev->addr_len); 2815 err = dev_set_mac_address_user(dev, sa, extack); 2816 kfree(sa); 2817 if (err) 2818 goto errout; 2819 status |= DO_SETLINK_MODIFIED; 2820 } 2821 2822 if (tb[IFLA_MTU]) { 2823 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2824 if (err < 0) 2825 goto errout; 2826 status |= DO_SETLINK_MODIFIED; 2827 } 2828 2829 if (tb[IFLA_GROUP]) { 2830 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2831 status |= DO_SETLINK_NOTIFY; 2832 } 2833 2834 /* 2835 * Interface selected by interface index but interface 2836 * name provided implies that a name change has been 2837 * requested. 2838 */ 2839 if (ifm->ifi_index > 0 && ifname[0]) { 2840 err = dev_change_name(dev, ifname); 2841 if (err < 0) 2842 goto errout; 2843 status |= DO_SETLINK_MODIFIED; 2844 } 2845 2846 if (tb[IFLA_IFALIAS]) { 2847 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2848 nla_len(tb[IFLA_IFALIAS])); 2849 if (err < 0) 2850 goto errout; 2851 status |= DO_SETLINK_NOTIFY; 2852 } 2853 2854 if (tb[IFLA_BROADCAST]) { 2855 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2856 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2857 } 2858 2859 if (tb[IFLA_MASTER]) { 2860 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2861 if (err) 2862 goto errout; 2863 status |= DO_SETLINK_MODIFIED; 2864 } 2865 2866 if (ifm->ifi_flags || ifm->ifi_change) { 2867 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2868 extack); 2869 if (err < 0) 2870 goto errout; 2871 } 2872 2873 if (tb[IFLA_CARRIER]) { 2874 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2875 if (err) 2876 goto errout; 2877 status |= DO_SETLINK_MODIFIED; 2878 } 2879 2880 if (tb[IFLA_TXQLEN]) { 2881 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2882 2883 err = dev_change_tx_queue_len(dev, value); 2884 if (err) 2885 goto errout; 2886 status |= DO_SETLINK_MODIFIED; 2887 } 2888 2889 if (tb[IFLA_GSO_MAX_SIZE]) { 2890 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2891 2892 if (dev->gso_max_size ^ max_size) { 2893 netif_set_gso_max_size(dev, max_size); 2894 status |= DO_SETLINK_MODIFIED; 2895 } 2896 } 2897 2898 if (tb[IFLA_GSO_MAX_SEGS]) { 2899 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2900 2901 if (dev->gso_max_segs ^ max_segs) { 2902 netif_set_gso_max_segs(dev, max_segs); 2903 status |= DO_SETLINK_MODIFIED; 2904 } 2905 } 2906 2907 if (tb[IFLA_GRO_MAX_SIZE]) { 2908 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2909 2910 if (dev->gro_max_size ^ gro_max_size) { 2911 netif_set_gro_max_size(dev, gro_max_size); 2912 status |= DO_SETLINK_MODIFIED; 2913 } 2914 } 2915 2916 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { 2917 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); 2918 2919 if (dev->gso_ipv4_max_size ^ max_size) { 2920 netif_set_gso_ipv4_max_size(dev, max_size); 2921 status |= DO_SETLINK_MODIFIED; 2922 } 2923 } 2924 2925 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { 2926 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]); 2927 2928 if (dev->gro_ipv4_max_size ^ gro_max_size) { 2929 netif_set_gro_ipv4_max_size(dev, gro_max_size); 2930 status |= DO_SETLINK_MODIFIED; 2931 } 2932 } 2933 2934 if (tb[IFLA_OPERSTATE]) 2935 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2936 2937 if (tb[IFLA_LINKMODE]) { 2938 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2939 2940 write_lock(&dev_base_lock); 2941 if (dev->link_mode ^ value) 2942 status |= DO_SETLINK_NOTIFY; 2943 dev->link_mode = value; 2944 write_unlock(&dev_base_lock); 2945 } 2946 2947 if (tb[IFLA_VFINFO_LIST]) { 2948 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2949 struct nlattr *attr; 2950 int rem; 2951 2952 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2953 if (nla_type(attr) != IFLA_VF_INFO || 2954 nla_len(attr) < NLA_HDRLEN) { 2955 err = -EINVAL; 2956 goto errout; 2957 } 2958 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 2959 attr, 2960 ifla_vf_policy, 2961 NULL); 2962 if (err < 0) 2963 goto errout; 2964 err = do_setvfinfo(dev, vfinfo); 2965 if (err < 0) 2966 goto errout; 2967 status |= DO_SETLINK_NOTIFY; 2968 } 2969 } 2970 err = 0; 2971 2972 if (tb[IFLA_VF_PORTS]) { 2973 struct nlattr *port[IFLA_PORT_MAX+1]; 2974 struct nlattr *attr; 2975 int vf; 2976 int rem; 2977 2978 err = -EOPNOTSUPP; 2979 if (!ops->ndo_set_vf_port) 2980 goto errout; 2981 2982 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2983 if (nla_type(attr) != IFLA_VF_PORT || 2984 nla_len(attr) < NLA_HDRLEN) { 2985 err = -EINVAL; 2986 goto errout; 2987 } 2988 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2989 attr, 2990 ifla_port_policy, 2991 NULL); 2992 if (err < 0) 2993 goto errout; 2994 if (!port[IFLA_PORT_VF]) { 2995 err = -EOPNOTSUPP; 2996 goto errout; 2997 } 2998 vf = nla_get_u32(port[IFLA_PORT_VF]); 2999 err = ops->ndo_set_vf_port(dev, vf, port); 3000 if (err < 0) 3001 goto errout; 3002 status |= DO_SETLINK_NOTIFY; 3003 } 3004 } 3005 err = 0; 3006 3007 if (tb[IFLA_PORT_SELF]) { 3008 struct nlattr *port[IFLA_PORT_MAX+1]; 3009 3010 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3011 tb[IFLA_PORT_SELF], 3012 ifla_port_policy, NULL); 3013 if (err < 0) 3014 goto errout; 3015 3016 err = -EOPNOTSUPP; 3017 if (ops->ndo_set_vf_port) 3018 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 3019 if (err < 0) 3020 goto errout; 3021 status |= DO_SETLINK_NOTIFY; 3022 } 3023 3024 if (tb[IFLA_AF_SPEC]) { 3025 struct nlattr *af; 3026 int rem; 3027 3028 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 3029 const struct rtnl_af_ops *af_ops; 3030 3031 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 3032 3033 err = af_ops->set_link_af(dev, af, extack); 3034 if (err < 0) 3035 goto errout; 3036 3037 status |= DO_SETLINK_NOTIFY; 3038 } 3039 } 3040 err = 0; 3041 3042 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 3043 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 3044 tb[IFLA_PROTO_DOWN_REASON], extack); 3045 if (err) 3046 goto errout; 3047 status |= DO_SETLINK_NOTIFY; 3048 } 3049 3050 if (tb[IFLA_XDP]) { 3051 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3052 u32 xdp_flags = 0; 3053 3054 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3055 tb[IFLA_XDP], 3056 ifla_xdp_policy, NULL); 3057 if (err < 0) 3058 goto errout; 3059 3060 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3061 err = -EINVAL; 3062 goto errout; 3063 } 3064 3065 if (xdp[IFLA_XDP_FLAGS]) { 3066 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3067 if (xdp_flags & ~XDP_FLAGS_MASK) { 3068 err = -EINVAL; 3069 goto errout; 3070 } 3071 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3072 err = -EINVAL; 3073 goto errout; 3074 } 3075 } 3076 3077 if (xdp[IFLA_XDP_FD]) { 3078 int expected_fd = -1; 3079 3080 if (xdp_flags & XDP_FLAGS_REPLACE) { 3081 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3082 err = -EINVAL; 3083 goto errout; 3084 } 3085 expected_fd = 3086 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3087 } 3088 3089 err = dev_change_xdp_fd(dev, extack, 3090 nla_get_s32(xdp[IFLA_XDP_FD]), 3091 expected_fd, 3092 xdp_flags); 3093 if (err) 3094 goto errout; 3095 status |= DO_SETLINK_NOTIFY; 3096 } 3097 } 3098 3099 errout: 3100 if (status & DO_SETLINK_MODIFIED) { 3101 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3102 netdev_state_change(dev); 3103 3104 if (err < 0) 3105 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3106 dev->name); 3107 } 3108 3109 return err; 3110 } 3111 3112 static struct net_device *rtnl_dev_get(struct net *net, 3113 struct nlattr *tb[]) 3114 { 3115 char ifname[ALTIFNAMSIZ]; 3116 3117 if (tb[IFLA_IFNAME]) 3118 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3119 else if (tb[IFLA_ALT_IFNAME]) 3120 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3121 else 3122 return NULL; 3123 3124 return __dev_get_by_name(net, ifname); 3125 } 3126 3127 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3128 struct netlink_ext_ack *extack) 3129 { 3130 struct net *net = sock_net(skb->sk); 3131 struct ifinfomsg *ifm; 3132 struct net_device *dev; 3133 int err; 3134 struct nlattr *tb[IFLA_MAX+1]; 3135 3136 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3137 ifla_policy, extack); 3138 if (err < 0) 3139 goto errout; 3140 3141 err = rtnl_ensure_unique_netns(tb, extack, false); 3142 if (err < 0) 3143 goto errout; 3144 3145 err = -EINVAL; 3146 ifm = nlmsg_data(nlh); 3147 if (ifm->ifi_index > 0) 3148 dev = __dev_get_by_index(net, ifm->ifi_index); 3149 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3150 dev = rtnl_dev_get(net, tb); 3151 else 3152 goto errout; 3153 3154 if (dev == NULL) { 3155 err = -ENODEV; 3156 goto errout; 3157 } 3158 3159 err = validate_linkmsg(dev, tb, extack); 3160 if (err < 0) 3161 goto errout; 3162 3163 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3164 errout: 3165 return err; 3166 } 3167 3168 static int rtnl_group_dellink(const struct net *net, int group) 3169 { 3170 struct net_device *dev, *aux; 3171 LIST_HEAD(list_kill); 3172 bool found = false; 3173 3174 if (!group) 3175 return -EPERM; 3176 3177 for_each_netdev(net, dev) { 3178 if (dev->group == group) { 3179 const struct rtnl_link_ops *ops; 3180 3181 found = true; 3182 ops = dev->rtnl_link_ops; 3183 if (!ops || !ops->dellink) 3184 return -EOPNOTSUPP; 3185 } 3186 } 3187 3188 if (!found) 3189 return -ENODEV; 3190 3191 for_each_netdev_safe(net, dev, aux) { 3192 if (dev->group == group) { 3193 const struct rtnl_link_ops *ops; 3194 3195 ops = dev->rtnl_link_ops; 3196 ops->dellink(dev, &list_kill); 3197 } 3198 } 3199 unregister_netdevice_many(&list_kill); 3200 3201 return 0; 3202 } 3203 3204 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3205 { 3206 const struct rtnl_link_ops *ops; 3207 LIST_HEAD(list_kill); 3208 3209 ops = dev->rtnl_link_ops; 3210 if (!ops || !ops->dellink) 3211 return -EOPNOTSUPP; 3212 3213 ops->dellink(dev, &list_kill); 3214 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3215 3216 return 0; 3217 } 3218 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3219 3220 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3221 struct netlink_ext_ack *extack) 3222 { 3223 struct net *net = sock_net(skb->sk); 3224 u32 portid = NETLINK_CB(skb).portid; 3225 struct net *tgt_net = net; 3226 struct net_device *dev = NULL; 3227 struct ifinfomsg *ifm; 3228 struct nlattr *tb[IFLA_MAX+1]; 3229 int err; 3230 int netnsid = -1; 3231 3232 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3233 ifla_policy, extack); 3234 if (err < 0) 3235 return err; 3236 3237 err = rtnl_ensure_unique_netns(tb, extack, true); 3238 if (err < 0) 3239 return err; 3240 3241 if (tb[IFLA_TARGET_NETNSID]) { 3242 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3243 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3244 if (IS_ERR(tgt_net)) 3245 return PTR_ERR(tgt_net); 3246 } 3247 3248 err = -EINVAL; 3249 ifm = nlmsg_data(nlh); 3250 if (ifm->ifi_index > 0) 3251 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3252 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3253 dev = rtnl_dev_get(net, tb); 3254 else if (tb[IFLA_GROUP]) 3255 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3256 else 3257 goto out; 3258 3259 if (!dev) { 3260 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3261 err = -ENODEV; 3262 3263 goto out; 3264 } 3265 3266 err = rtnl_delete_link(dev, portid, nlh); 3267 3268 out: 3269 if (netnsid >= 0) 3270 put_net(tgt_net); 3271 3272 return err; 3273 } 3274 3275 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3276 u32 portid, const struct nlmsghdr *nlh) 3277 { 3278 unsigned int old_flags; 3279 int err; 3280 3281 old_flags = dev->flags; 3282 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3283 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3284 NULL); 3285 if (err < 0) 3286 return err; 3287 } 3288 3289 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3290 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3291 } else { 3292 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3293 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3294 } 3295 return 0; 3296 } 3297 EXPORT_SYMBOL(rtnl_configure_link); 3298 3299 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3300 unsigned char name_assign_type, 3301 const struct rtnl_link_ops *ops, 3302 struct nlattr *tb[], 3303 struct netlink_ext_ack *extack) 3304 { 3305 struct net_device *dev; 3306 unsigned int num_tx_queues = 1; 3307 unsigned int num_rx_queues = 1; 3308 int err; 3309 3310 if (tb[IFLA_NUM_TX_QUEUES]) 3311 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3312 else if (ops->get_num_tx_queues) 3313 num_tx_queues = ops->get_num_tx_queues(); 3314 3315 if (tb[IFLA_NUM_RX_QUEUES]) 3316 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3317 else if (ops->get_num_rx_queues) 3318 num_rx_queues = ops->get_num_rx_queues(); 3319 3320 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3321 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3322 return ERR_PTR(-EINVAL); 3323 } 3324 3325 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3326 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3327 return ERR_PTR(-EINVAL); 3328 } 3329 3330 if (ops->alloc) { 3331 dev = ops->alloc(tb, ifname, name_assign_type, 3332 num_tx_queues, num_rx_queues); 3333 if (IS_ERR(dev)) 3334 return dev; 3335 } else { 3336 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3337 name_assign_type, ops->setup, 3338 num_tx_queues, num_rx_queues); 3339 } 3340 3341 if (!dev) 3342 return ERR_PTR(-ENOMEM); 3343 3344 err = validate_linkmsg(dev, tb, extack); 3345 if (err < 0) { 3346 free_netdev(dev); 3347 return ERR_PTR(err); 3348 } 3349 3350 dev_net_set(dev, net); 3351 dev->rtnl_link_ops = ops; 3352 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3353 3354 if (tb[IFLA_MTU]) { 3355 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3356 3357 err = dev_validate_mtu(dev, mtu, extack); 3358 if (err) { 3359 free_netdev(dev); 3360 return ERR_PTR(err); 3361 } 3362 dev->mtu = mtu; 3363 } 3364 if (tb[IFLA_ADDRESS]) { 3365 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3366 nla_len(tb[IFLA_ADDRESS])); 3367 dev->addr_assign_type = NET_ADDR_SET; 3368 } 3369 if (tb[IFLA_BROADCAST]) 3370 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3371 nla_len(tb[IFLA_BROADCAST])); 3372 if (tb[IFLA_TXQLEN]) 3373 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3374 if (tb[IFLA_OPERSTATE]) 3375 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3376 if (tb[IFLA_LINKMODE]) 3377 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3378 if (tb[IFLA_GROUP]) 3379 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3380 if (tb[IFLA_GSO_MAX_SIZE]) 3381 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3382 if (tb[IFLA_GSO_MAX_SEGS]) 3383 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3384 if (tb[IFLA_GRO_MAX_SIZE]) 3385 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3386 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) 3387 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE])); 3388 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) 3389 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE])); 3390 3391 return dev; 3392 } 3393 EXPORT_SYMBOL(rtnl_create_link); 3394 3395 static int rtnl_group_changelink(const struct sk_buff *skb, 3396 struct net *net, int group, 3397 struct ifinfomsg *ifm, 3398 struct netlink_ext_ack *extack, 3399 struct nlattr **tb) 3400 { 3401 struct net_device *dev, *aux; 3402 int err; 3403 3404 for_each_netdev_safe(net, dev, aux) { 3405 if (dev->group == group) { 3406 err = validate_linkmsg(dev, tb, extack); 3407 if (err < 0) 3408 return err; 3409 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3410 if (err < 0) 3411 return err; 3412 } 3413 } 3414 3415 return 0; 3416 } 3417 3418 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3419 const struct rtnl_link_ops *ops, 3420 const struct nlmsghdr *nlh, 3421 struct nlattr **tb, struct nlattr **data, 3422 struct netlink_ext_ack *extack) 3423 { 3424 unsigned char name_assign_type = NET_NAME_USER; 3425 struct net *net = sock_net(skb->sk); 3426 u32 portid = NETLINK_CB(skb).portid; 3427 struct net *dest_net, *link_net; 3428 struct net_device *dev; 3429 char ifname[IFNAMSIZ]; 3430 int err; 3431 3432 if (!ops->alloc && !ops->setup) 3433 return -EOPNOTSUPP; 3434 3435 if (tb[IFLA_IFNAME]) { 3436 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3437 } else { 3438 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3439 name_assign_type = NET_NAME_ENUM; 3440 } 3441 3442 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3443 if (IS_ERR(dest_net)) 3444 return PTR_ERR(dest_net); 3445 3446 if (tb[IFLA_LINK_NETNSID]) { 3447 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3448 3449 link_net = get_net_ns_by_id(dest_net, id); 3450 if (!link_net) { 3451 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3452 err = -EINVAL; 3453 goto out; 3454 } 3455 err = -EPERM; 3456 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3457 goto out; 3458 } else { 3459 link_net = NULL; 3460 } 3461 3462 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3463 name_assign_type, ops, tb, extack); 3464 if (IS_ERR(dev)) { 3465 err = PTR_ERR(dev); 3466 goto out; 3467 } 3468 3469 dev->ifindex = ifm->ifi_index; 3470 3471 if (ops->newlink) 3472 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3473 else 3474 err = register_netdevice(dev); 3475 if (err < 0) { 3476 free_netdev(dev); 3477 goto out; 3478 } 3479 3480 err = rtnl_configure_link(dev, ifm, portid, nlh); 3481 if (err < 0) 3482 goto out_unregister; 3483 if (link_net) { 3484 err = dev_change_net_namespace(dev, dest_net, ifname); 3485 if (err < 0) 3486 goto out_unregister; 3487 } 3488 if (tb[IFLA_MASTER]) { 3489 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3490 if (err) 3491 goto out_unregister; 3492 } 3493 out: 3494 if (link_net) 3495 put_net(link_net); 3496 put_net(dest_net); 3497 return err; 3498 out_unregister: 3499 if (ops->newlink) { 3500 LIST_HEAD(list_kill); 3501 3502 ops->dellink(dev, &list_kill); 3503 unregister_netdevice_many(&list_kill); 3504 } else { 3505 unregister_netdevice(dev); 3506 } 3507 goto out; 3508 } 3509 3510 struct rtnl_newlink_tbs { 3511 struct nlattr *tb[IFLA_MAX + 1]; 3512 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3513 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3514 }; 3515 3516 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3517 struct rtnl_newlink_tbs *tbs, 3518 struct netlink_ext_ack *extack) 3519 { 3520 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3521 struct nlattr ** const tb = tbs->tb; 3522 const struct rtnl_link_ops *m_ops; 3523 struct net_device *master_dev; 3524 struct net *net = sock_net(skb->sk); 3525 const struct rtnl_link_ops *ops; 3526 struct nlattr **slave_data; 3527 char kind[MODULE_NAME_LEN]; 3528 struct net_device *dev; 3529 struct ifinfomsg *ifm; 3530 struct nlattr **data; 3531 bool link_specified; 3532 int err; 3533 3534 #ifdef CONFIG_MODULES 3535 replay: 3536 #endif 3537 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3538 ifla_policy, extack); 3539 if (err < 0) 3540 return err; 3541 3542 err = rtnl_ensure_unique_netns(tb, extack, false); 3543 if (err < 0) 3544 return err; 3545 3546 ifm = nlmsg_data(nlh); 3547 if (ifm->ifi_index > 0) { 3548 link_specified = true; 3549 dev = __dev_get_by_index(net, ifm->ifi_index); 3550 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3551 link_specified = true; 3552 dev = rtnl_dev_get(net, tb); 3553 } else { 3554 link_specified = false; 3555 dev = NULL; 3556 } 3557 3558 master_dev = NULL; 3559 m_ops = NULL; 3560 if (dev) { 3561 master_dev = netdev_master_upper_dev_get(dev); 3562 if (master_dev) 3563 m_ops = master_dev->rtnl_link_ops; 3564 } 3565 3566 if (tb[IFLA_LINKINFO]) { 3567 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3568 tb[IFLA_LINKINFO], 3569 ifla_info_policy, NULL); 3570 if (err < 0) 3571 return err; 3572 } else 3573 memset(linkinfo, 0, sizeof(linkinfo)); 3574 3575 if (linkinfo[IFLA_INFO_KIND]) { 3576 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3577 ops = rtnl_link_ops_get(kind); 3578 } else { 3579 kind[0] = '\0'; 3580 ops = NULL; 3581 } 3582 3583 data = NULL; 3584 if (ops) { 3585 if (ops->maxtype > RTNL_MAX_TYPE) 3586 return -EINVAL; 3587 3588 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3589 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3590 linkinfo[IFLA_INFO_DATA], 3591 ops->policy, extack); 3592 if (err < 0) 3593 return err; 3594 data = tbs->attr; 3595 } 3596 if (ops->validate) { 3597 err = ops->validate(tb, data, extack); 3598 if (err < 0) 3599 return err; 3600 } 3601 } 3602 3603 slave_data = NULL; 3604 if (m_ops) { 3605 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3606 return -EINVAL; 3607 3608 if (m_ops->slave_maxtype && 3609 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3610 err = nla_parse_nested_deprecated(tbs->slave_attr, 3611 m_ops->slave_maxtype, 3612 linkinfo[IFLA_INFO_SLAVE_DATA], 3613 m_ops->slave_policy, 3614 extack); 3615 if (err < 0) 3616 return err; 3617 slave_data = tbs->slave_attr; 3618 } 3619 } 3620 3621 if (dev) { 3622 int status = 0; 3623 3624 if (nlh->nlmsg_flags & NLM_F_EXCL) 3625 return -EEXIST; 3626 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3627 return -EOPNOTSUPP; 3628 3629 err = validate_linkmsg(dev, tb, extack); 3630 if (err < 0) 3631 return err; 3632 3633 if (linkinfo[IFLA_INFO_DATA]) { 3634 if (!ops || ops != dev->rtnl_link_ops || 3635 !ops->changelink) 3636 return -EOPNOTSUPP; 3637 3638 err = ops->changelink(dev, tb, data, extack); 3639 if (err < 0) 3640 return err; 3641 status |= DO_SETLINK_NOTIFY; 3642 } 3643 3644 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3645 if (!m_ops || !m_ops->slave_changelink) 3646 return -EOPNOTSUPP; 3647 3648 err = m_ops->slave_changelink(master_dev, dev, tb, 3649 slave_data, extack); 3650 if (err < 0) 3651 return err; 3652 status |= DO_SETLINK_NOTIFY; 3653 } 3654 3655 return do_setlink(skb, dev, ifm, extack, tb, status); 3656 } 3657 3658 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3659 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3660 * or it's for a group 3661 */ 3662 if (link_specified) 3663 return -ENODEV; 3664 if (tb[IFLA_GROUP]) 3665 return rtnl_group_changelink(skb, net, 3666 nla_get_u32(tb[IFLA_GROUP]), 3667 ifm, extack, tb); 3668 return -ENODEV; 3669 } 3670 3671 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3672 return -EOPNOTSUPP; 3673 3674 if (!ops) { 3675 #ifdef CONFIG_MODULES 3676 if (kind[0]) { 3677 __rtnl_unlock(); 3678 request_module("rtnl-link-%s", kind); 3679 rtnl_lock(); 3680 ops = rtnl_link_ops_get(kind); 3681 if (ops) 3682 goto replay; 3683 } 3684 #endif 3685 NL_SET_ERR_MSG(extack, "Unknown device type"); 3686 return -EOPNOTSUPP; 3687 } 3688 3689 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3690 } 3691 3692 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3693 struct netlink_ext_ack *extack) 3694 { 3695 struct rtnl_newlink_tbs *tbs; 3696 int ret; 3697 3698 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3699 if (!tbs) 3700 return -ENOMEM; 3701 3702 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3703 kfree(tbs); 3704 return ret; 3705 } 3706 3707 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3708 const struct nlmsghdr *nlh, 3709 struct nlattr **tb, 3710 struct netlink_ext_ack *extack) 3711 { 3712 struct ifinfomsg *ifm; 3713 int i, err; 3714 3715 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3716 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3717 return -EINVAL; 3718 } 3719 3720 if (!netlink_strict_get_check(skb)) 3721 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3722 ifla_policy, extack); 3723 3724 ifm = nlmsg_data(nlh); 3725 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3726 ifm->ifi_change) { 3727 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3728 return -EINVAL; 3729 } 3730 3731 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3732 ifla_policy, extack); 3733 if (err) 3734 return err; 3735 3736 for (i = 0; i <= IFLA_MAX; i++) { 3737 if (!tb[i]) 3738 continue; 3739 3740 switch (i) { 3741 case IFLA_IFNAME: 3742 case IFLA_ALT_IFNAME: 3743 case IFLA_EXT_MASK: 3744 case IFLA_TARGET_NETNSID: 3745 break; 3746 default: 3747 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3748 return -EINVAL; 3749 } 3750 } 3751 3752 return 0; 3753 } 3754 3755 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3756 struct netlink_ext_ack *extack) 3757 { 3758 struct net *net = sock_net(skb->sk); 3759 struct net *tgt_net = net; 3760 struct ifinfomsg *ifm; 3761 struct nlattr *tb[IFLA_MAX+1]; 3762 struct net_device *dev = NULL; 3763 struct sk_buff *nskb; 3764 int netnsid = -1; 3765 int err; 3766 u32 ext_filter_mask = 0; 3767 3768 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3769 if (err < 0) 3770 return err; 3771 3772 err = rtnl_ensure_unique_netns(tb, extack, true); 3773 if (err < 0) 3774 return err; 3775 3776 if (tb[IFLA_TARGET_NETNSID]) { 3777 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3778 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3779 if (IS_ERR(tgt_net)) 3780 return PTR_ERR(tgt_net); 3781 } 3782 3783 if (tb[IFLA_EXT_MASK]) 3784 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3785 3786 err = -EINVAL; 3787 ifm = nlmsg_data(nlh); 3788 if (ifm->ifi_index > 0) 3789 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3790 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3791 dev = rtnl_dev_get(tgt_net, tb); 3792 else 3793 goto out; 3794 3795 err = -ENODEV; 3796 if (dev == NULL) 3797 goto out; 3798 3799 err = -ENOBUFS; 3800 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3801 if (nskb == NULL) 3802 goto out; 3803 3804 err = rtnl_fill_ifinfo(nskb, dev, net, 3805 RTM_NEWLINK, NETLINK_CB(skb).portid, 3806 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3807 0, NULL, 0, netnsid, GFP_KERNEL); 3808 if (err < 0) { 3809 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3810 WARN_ON(err == -EMSGSIZE); 3811 kfree_skb(nskb); 3812 } else 3813 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3814 out: 3815 if (netnsid >= 0) 3816 put_net(tgt_net); 3817 3818 return err; 3819 } 3820 3821 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3822 bool *changed, struct netlink_ext_ack *extack) 3823 { 3824 char *alt_ifname; 3825 size_t size; 3826 int err; 3827 3828 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3829 if (err) 3830 return err; 3831 3832 if (cmd == RTM_NEWLINKPROP) { 3833 size = rtnl_prop_list_size(dev); 3834 size += nla_total_size(ALTIFNAMSIZ); 3835 if (size >= U16_MAX) { 3836 NL_SET_ERR_MSG(extack, 3837 "effective property list too long"); 3838 return -EINVAL; 3839 } 3840 } 3841 3842 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3843 if (!alt_ifname) 3844 return -ENOMEM; 3845 3846 if (cmd == RTM_NEWLINKPROP) { 3847 err = netdev_name_node_alt_create(dev, alt_ifname); 3848 if (!err) 3849 alt_ifname = NULL; 3850 } else if (cmd == RTM_DELLINKPROP) { 3851 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3852 } else { 3853 WARN_ON_ONCE(1); 3854 err = -EINVAL; 3855 } 3856 3857 kfree(alt_ifname); 3858 if (!err) 3859 *changed = true; 3860 return err; 3861 } 3862 3863 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3864 struct netlink_ext_ack *extack) 3865 { 3866 struct net *net = sock_net(skb->sk); 3867 struct nlattr *tb[IFLA_MAX + 1]; 3868 struct net_device *dev; 3869 struct ifinfomsg *ifm; 3870 bool changed = false; 3871 struct nlattr *attr; 3872 int err, rem; 3873 3874 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3875 if (err) 3876 return err; 3877 3878 err = rtnl_ensure_unique_netns(tb, extack, true); 3879 if (err) 3880 return err; 3881 3882 ifm = nlmsg_data(nlh); 3883 if (ifm->ifi_index > 0) 3884 dev = __dev_get_by_index(net, ifm->ifi_index); 3885 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3886 dev = rtnl_dev_get(net, tb); 3887 else 3888 return -EINVAL; 3889 3890 if (!dev) 3891 return -ENODEV; 3892 3893 if (!tb[IFLA_PROP_LIST]) 3894 return 0; 3895 3896 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3897 switch (nla_type(attr)) { 3898 case IFLA_ALT_IFNAME: 3899 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3900 if (err) 3901 return err; 3902 break; 3903 } 3904 } 3905 3906 if (changed) 3907 netdev_state_change(dev); 3908 return 0; 3909 } 3910 3911 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3912 struct netlink_ext_ack *extack) 3913 { 3914 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3915 } 3916 3917 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3918 struct netlink_ext_ack *extack) 3919 { 3920 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3921 } 3922 3923 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3924 { 3925 struct net *net = sock_net(skb->sk); 3926 size_t min_ifinfo_dump_size = 0; 3927 struct nlattr *tb[IFLA_MAX+1]; 3928 u32 ext_filter_mask = 0; 3929 struct net_device *dev; 3930 int hdrlen; 3931 3932 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3933 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3934 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3935 3936 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3937 if (tb[IFLA_EXT_MASK]) 3938 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3939 } 3940 3941 if (!ext_filter_mask) 3942 return NLMSG_GOODSIZE; 3943 /* 3944 * traverse the list of net devices and compute the minimum 3945 * buffer size based upon the filter mask. 3946 */ 3947 rcu_read_lock(); 3948 for_each_netdev_rcu(net, dev) { 3949 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 3950 if_nlmsg_size(dev, ext_filter_mask)); 3951 } 3952 rcu_read_unlock(); 3953 3954 return nlmsg_total_size(min_ifinfo_dump_size); 3955 } 3956 3957 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3958 { 3959 int idx; 3960 int s_idx = cb->family; 3961 int type = cb->nlh->nlmsg_type - RTM_BASE; 3962 int ret = 0; 3963 3964 if (s_idx == 0) 3965 s_idx = 1; 3966 3967 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3968 struct rtnl_link __rcu **tab; 3969 struct rtnl_link *link; 3970 rtnl_dumpit_func dumpit; 3971 3972 if (idx < s_idx || idx == PF_PACKET) 3973 continue; 3974 3975 if (type < 0 || type >= RTM_NR_MSGTYPES) 3976 continue; 3977 3978 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 3979 if (!tab) 3980 continue; 3981 3982 link = rcu_dereference_rtnl(tab[type]); 3983 if (!link) 3984 continue; 3985 3986 dumpit = link->dumpit; 3987 if (!dumpit) 3988 continue; 3989 3990 if (idx > s_idx) { 3991 memset(&cb->args[0], 0, sizeof(cb->args)); 3992 cb->prev_seq = 0; 3993 cb->seq = 0; 3994 } 3995 ret = dumpit(skb, cb); 3996 if (ret) 3997 break; 3998 } 3999 cb->family = idx; 4000 4001 return skb->len ? : ret; 4002 } 4003 4004 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 4005 unsigned int change, 4006 u32 event, gfp_t flags, int *new_nsid, 4007 int new_ifindex, u32 portid, 4008 const struct nlmsghdr *nlh) 4009 { 4010 struct net *net = dev_net(dev); 4011 struct sk_buff *skb; 4012 int err = -ENOBUFS; 4013 u32 seq = 0; 4014 4015 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 4016 if (skb == NULL) 4017 goto errout; 4018 4019 if (nlmsg_report(nlh)) 4020 seq = nlmsg_seq(nlh); 4021 else 4022 portid = 0; 4023 4024 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 4025 type, portid, seq, change, 0, 0, event, 4026 new_nsid, new_ifindex, -1, flags); 4027 if (err < 0) { 4028 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 4029 WARN_ON(err == -EMSGSIZE); 4030 kfree_skb(skb); 4031 goto errout; 4032 } 4033 return skb; 4034 errout: 4035 if (err < 0) 4036 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4037 return NULL; 4038 } 4039 4040 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 4041 u32 portid, const struct nlmsghdr *nlh) 4042 { 4043 struct net *net = dev_net(dev); 4044 4045 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 4046 } 4047 4048 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 4049 unsigned int change, u32 event, 4050 gfp_t flags, int *new_nsid, int new_ifindex, 4051 u32 portid, const struct nlmsghdr *nlh) 4052 { 4053 struct sk_buff *skb; 4054 4055 if (dev->reg_state != NETREG_REGISTERED) 4056 return; 4057 4058 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 4059 new_ifindex, portid, nlh); 4060 if (skb) 4061 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 4062 } 4063 4064 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 4065 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 4066 { 4067 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4068 NULL, 0, portid, nlh); 4069 } 4070 4071 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4072 gfp_t flags, int *new_nsid, int new_ifindex) 4073 { 4074 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4075 new_nsid, new_ifindex, 0, NULL); 4076 } 4077 4078 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4079 struct net_device *dev, 4080 u8 *addr, u16 vid, u32 pid, u32 seq, 4081 int type, unsigned int flags, 4082 int nlflags, u16 ndm_state) 4083 { 4084 struct nlmsghdr *nlh; 4085 struct ndmsg *ndm; 4086 4087 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4088 if (!nlh) 4089 return -EMSGSIZE; 4090 4091 ndm = nlmsg_data(nlh); 4092 ndm->ndm_family = AF_BRIDGE; 4093 ndm->ndm_pad1 = 0; 4094 ndm->ndm_pad2 = 0; 4095 ndm->ndm_flags = flags; 4096 ndm->ndm_type = 0; 4097 ndm->ndm_ifindex = dev->ifindex; 4098 ndm->ndm_state = ndm_state; 4099 4100 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) 4101 goto nla_put_failure; 4102 if (vid) 4103 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4104 goto nla_put_failure; 4105 4106 nlmsg_end(skb, nlh); 4107 return 0; 4108 4109 nla_put_failure: 4110 nlmsg_cancel(skb, nlh); 4111 return -EMSGSIZE; 4112 } 4113 4114 static inline size_t rtnl_fdb_nlmsg_size(void) 4115 { 4116 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4117 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 4118 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4119 0; 4120 } 4121 4122 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4123 u16 ndm_state) 4124 { 4125 struct net *net = dev_net(dev); 4126 struct sk_buff *skb; 4127 int err = -ENOBUFS; 4128 4129 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); 4130 if (!skb) 4131 goto errout; 4132 4133 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4134 0, 0, type, NTF_SELF, 0, ndm_state); 4135 if (err < 0) { 4136 kfree_skb(skb); 4137 goto errout; 4138 } 4139 4140 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4141 return; 4142 errout: 4143 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4144 } 4145 4146 /* 4147 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4148 */ 4149 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4150 struct nlattr *tb[], 4151 struct net_device *dev, 4152 const unsigned char *addr, u16 vid, 4153 u16 flags) 4154 { 4155 int err = -EINVAL; 4156 4157 /* If aging addresses are supported device will need to 4158 * implement its own handler for this. 4159 */ 4160 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4161 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4162 return err; 4163 } 4164 4165 if (tb[NDA_FLAGS_EXT]) { 4166 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4167 return err; 4168 } 4169 4170 if (vid) { 4171 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4172 return err; 4173 } 4174 4175 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4176 err = dev_uc_add_excl(dev, addr); 4177 else if (is_multicast_ether_addr(addr)) 4178 err = dev_mc_add_excl(dev, addr); 4179 4180 /* Only return duplicate errors if NLM_F_EXCL is set */ 4181 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4182 err = 0; 4183 4184 return err; 4185 } 4186 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4187 4188 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4189 struct netlink_ext_ack *extack) 4190 { 4191 u16 vid = 0; 4192 4193 if (vlan_attr) { 4194 if (nla_len(vlan_attr) != sizeof(u16)) { 4195 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4196 return -EINVAL; 4197 } 4198 4199 vid = nla_get_u16(vlan_attr); 4200 4201 if (!vid || vid >= VLAN_VID_MASK) { 4202 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4203 return -EINVAL; 4204 } 4205 } 4206 *p_vid = vid; 4207 return 0; 4208 } 4209 4210 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4211 struct netlink_ext_ack *extack) 4212 { 4213 struct net *net = sock_net(skb->sk); 4214 struct ndmsg *ndm; 4215 struct nlattr *tb[NDA_MAX+1]; 4216 struct net_device *dev; 4217 u8 *addr; 4218 u16 vid; 4219 int err; 4220 4221 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4222 extack); 4223 if (err < 0) 4224 return err; 4225 4226 ndm = nlmsg_data(nlh); 4227 if (ndm->ndm_ifindex == 0) { 4228 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4229 return -EINVAL; 4230 } 4231 4232 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4233 if (dev == NULL) { 4234 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4235 return -ENODEV; 4236 } 4237 4238 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4239 NL_SET_ERR_MSG(extack, "invalid address"); 4240 return -EINVAL; 4241 } 4242 4243 if (dev->type != ARPHRD_ETHER) { 4244 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4245 return -EINVAL; 4246 } 4247 4248 addr = nla_data(tb[NDA_LLADDR]); 4249 4250 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4251 if (err) 4252 return err; 4253 4254 err = -EOPNOTSUPP; 4255 4256 /* Support fdb on master device the net/bridge default case */ 4257 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4258 netif_is_bridge_port(dev)) { 4259 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4260 const struct net_device_ops *ops = br_dev->netdev_ops; 4261 4262 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4263 nlh->nlmsg_flags, extack); 4264 if (err) 4265 goto out; 4266 else 4267 ndm->ndm_flags &= ~NTF_MASTER; 4268 } 4269 4270 /* Embedded bridge, macvlan, and any other device support */ 4271 if ((ndm->ndm_flags & NTF_SELF)) { 4272 if (dev->netdev_ops->ndo_fdb_add) 4273 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4274 vid, 4275 nlh->nlmsg_flags, 4276 extack); 4277 else 4278 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4279 nlh->nlmsg_flags); 4280 4281 if (!err) { 4282 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4283 ndm->ndm_state); 4284 ndm->ndm_flags &= ~NTF_SELF; 4285 } 4286 } 4287 out: 4288 return err; 4289 } 4290 4291 /* 4292 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4293 */ 4294 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4295 struct nlattr *tb[], 4296 struct net_device *dev, 4297 const unsigned char *addr, u16 vid) 4298 { 4299 int err = -EINVAL; 4300 4301 /* If aging addresses are supported device will need to 4302 * implement its own handler for this. 4303 */ 4304 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4305 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4306 return err; 4307 } 4308 4309 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4310 err = dev_uc_del(dev, addr); 4311 else if (is_multicast_ether_addr(addr)) 4312 err = dev_mc_del(dev, addr); 4313 4314 return err; 4315 } 4316 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4317 4318 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = { 4319 [NDA_VLAN] = { .type = NLA_U16 }, 4320 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 4321 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, 4322 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, 4323 }; 4324 4325 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4326 struct netlink_ext_ack *extack) 4327 { 4328 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4329 struct net *net = sock_net(skb->sk); 4330 const struct net_device_ops *ops; 4331 struct ndmsg *ndm; 4332 struct nlattr *tb[NDA_MAX+1]; 4333 struct net_device *dev; 4334 __u8 *addr = NULL; 4335 int err; 4336 u16 vid; 4337 4338 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4339 return -EPERM; 4340 4341 if (!del_bulk) { 4342 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4343 NULL, extack); 4344 } else { 4345 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, 4346 fdb_del_bulk_policy, extack); 4347 } 4348 if (err < 0) 4349 return err; 4350 4351 ndm = nlmsg_data(nlh); 4352 if (ndm->ndm_ifindex == 0) { 4353 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4354 return -EINVAL; 4355 } 4356 4357 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4358 if (dev == NULL) { 4359 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4360 return -ENODEV; 4361 } 4362 4363 if (!del_bulk) { 4364 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4365 NL_SET_ERR_MSG(extack, "invalid address"); 4366 return -EINVAL; 4367 } 4368 addr = nla_data(tb[NDA_LLADDR]); 4369 } 4370 4371 if (dev->type != ARPHRD_ETHER) { 4372 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4373 return -EINVAL; 4374 } 4375 4376 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4377 if (err) 4378 return err; 4379 4380 err = -EOPNOTSUPP; 4381 4382 /* Support fdb on master device the net/bridge default case */ 4383 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4384 netif_is_bridge_port(dev)) { 4385 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4386 4387 ops = br_dev->netdev_ops; 4388 if (!del_bulk) { 4389 if (ops->ndo_fdb_del) 4390 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4391 } else { 4392 if (ops->ndo_fdb_del_bulk) 4393 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4394 extack); 4395 } 4396 4397 if (err) 4398 goto out; 4399 else 4400 ndm->ndm_flags &= ~NTF_MASTER; 4401 } 4402 4403 /* Embedded bridge, macvlan, and any other device support */ 4404 if (ndm->ndm_flags & NTF_SELF) { 4405 ops = dev->netdev_ops; 4406 if (!del_bulk) { 4407 if (ops->ndo_fdb_del) 4408 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4409 else 4410 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4411 } else { 4412 /* in case err was cleared by NTF_MASTER call */ 4413 err = -EOPNOTSUPP; 4414 if (ops->ndo_fdb_del_bulk) 4415 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4416 extack); 4417 } 4418 4419 if (!err) { 4420 if (!del_bulk) 4421 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4422 ndm->ndm_state); 4423 ndm->ndm_flags &= ~NTF_SELF; 4424 } 4425 } 4426 out: 4427 return err; 4428 } 4429 4430 static int nlmsg_populate_fdb(struct sk_buff *skb, 4431 struct netlink_callback *cb, 4432 struct net_device *dev, 4433 int *idx, 4434 struct netdev_hw_addr_list *list) 4435 { 4436 struct netdev_hw_addr *ha; 4437 int err; 4438 u32 portid, seq; 4439 4440 portid = NETLINK_CB(cb->skb).portid; 4441 seq = cb->nlh->nlmsg_seq; 4442 4443 list_for_each_entry(ha, &list->list, list) { 4444 if (*idx < cb->args[2]) 4445 goto skip; 4446 4447 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4448 portid, seq, 4449 RTM_NEWNEIGH, NTF_SELF, 4450 NLM_F_MULTI, NUD_PERMANENT); 4451 if (err < 0) 4452 return err; 4453 skip: 4454 *idx += 1; 4455 } 4456 return 0; 4457 } 4458 4459 /** 4460 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4461 * @skb: socket buffer to store message in 4462 * @cb: netlink callback 4463 * @dev: netdevice 4464 * @filter_dev: ignored 4465 * @idx: the number of FDB table entries dumped is added to *@idx 4466 * 4467 * Default netdevice operation to dump the existing unicast address list. 4468 * Returns number of addresses from list put in skb. 4469 */ 4470 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4471 struct netlink_callback *cb, 4472 struct net_device *dev, 4473 struct net_device *filter_dev, 4474 int *idx) 4475 { 4476 int err; 4477 4478 if (dev->type != ARPHRD_ETHER) 4479 return -EINVAL; 4480 4481 netif_addr_lock_bh(dev); 4482 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4483 if (err) 4484 goto out; 4485 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4486 out: 4487 netif_addr_unlock_bh(dev); 4488 return err; 4489 } 4490 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4491 4492 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4493 int *br_idx, int *brport_idx, 4494 struct netlink_ext_ack *extack) 4495 { 4496 struct nlattr *tb[NDA_MAX + 1]; 4497 struct ndmsg *ndm; 4498 int err, i; 4499 4500 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4501 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4502 return -EINVAL; 4503 } 4504 4505 ndm = nlmsg_data(nlh); 4506 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4507 ndm->ndm_flags || ndm->ndm_type) { 4508 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4509 return -EINVAL; 4510 } 4511 4512 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4513 NDA_MAX, NULL, extack); 4514 if (err < 0) 4515 return err; 4516 4517 *brport_idx = ndm->ndm_ifindex; 4518 for (i = 0; i <= NDA_MAX; ++i) { 4519 if (!tb[i]) 4520 continue; 4521 4522 switch (i) { 4523 case NDA_IFINDEX: 4524 if (nla_len(tb[i]) != sizeof(u32)) { 4525 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4526 return -EINVAL; 4527 } 4528 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4529 break; 4530 case NDA_MASTER: 4531 if (nla_len(tb[i]) != sizeof(u32)) { 4532 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4533 return -EINVAL; 4534 } 4535 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4536 break; 4537 default: 4538 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4539 return -EINVAL; 4540 } 4541 } 4542 4543 return 0; 4544 } 4545 4546 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4547 int *br_idx, int *brport_idx, 4548 struct netlink_ext_ack *extack) 4549 { 4550 struct nlattr *tb[IFLA_MAX+1]; 4551 int err; 4552 4553 /* A hack to preserve kernel<->userspace interface. 4554 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4555 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4556 * So, check for ndmsg with an optional u32 attribute (not used here). 4557 * Fortunately these sizes don't conflict with the size of ifinfomsg 4558 * with an optional attribute. 4559 */ 4560 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4561 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4562 nla_attr_size(sizeof(u32)))) { 4563 struct ifinfomsg *ifm; 4564 4565 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4566 tb, IFLA_MAX, ifla_policy, 4567 extack); 4568 if (err < 0) { 4569 return -EINVAL; 4570 } else if (err == 0) { 4571 if (tb[IFLA_MASTER]) 4572 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4573 } 4574 4575 ifm = nlmsg_data(nlh); 4576 *brport_idx = ifm->ifi_index; 4577 } 4578 return 0; 4579 } 4580 4581 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4582 { 4583 struct net_device *dev; 4584 struct net_device *br_dev = NULL; 4585 const struct net_device_ops *ops = NULL; 4586 const struct net_device_ops *cops = NULL; 4587 struct net *net = sock_net(skb->sk); 4588 struct hlist_head *head; 4589 int brport_idx = 0; 4590 int br_idx = 0; 4591 int h, s_h; 4592 int idx = 0, s_idx; 4593 int err = 0; 4594 int fidx = 0; 4595 4596 if (cb->strict_check) 4597 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4598 cb->extack); 4599 else 4600 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4601 cb->extack); 4602 if (err < 0) 4603 return err; 4604 4605 if (br_idx) { 4606 br_dev = __dev_get_by_index(net, br_idx); 4607 if (!br_dev) 4608 return -ENODEV; 4609 4610 ops = br_dev->netdev_ops; 4611 } 4612 4613 s_h = cb->args[0]; 4614 s_idx = cb->args[1]; 4615 4616 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4617 idx = 0; 4618 head = &net->dev_index_head[h]; 4619 hlist_for_each_entry(dev, head, index_hlist) { 4620 4621 if (brport_idx && (dev->ifindex != brport_idx)) 4622 continue; 4623 4624 if (!br_idx) { /* user did not specify a specific bridge */ 4625 if (netif_is_bridge_port(dev)) { 4626 br_dev = netdev_master_upper_dev_get(dev); 4627 cops = br_dev->netdev_ops; 4628 } 4629 } else { 4630 if (dev != br_dev && 4631 !netif_is_bridge_port(dev)) 4632 continue; 4633 4634 if (br_dev != netdev_master_upper_dev_get(dev) && 4635 !netif_is_bridge_master(dev)) 4636 continue; 4637 cops = ops; 4638 } 4639 4640 if (idx < s_idx) 4641 goto cont; 4642 4643 if (netif_is_bridge_port(dev)) { 4644 if (cops && cops->ndo_fdb_dump) { 4645 err = cops->ndo_fdb_dump(skb, cb, 4646 br_dev, dev, 4647 &fidx); 4648 if (err == -EMSGSIZE) 4649 goto out; 4650 } 4651 } 4652 4653 if (dev->netdev_ops->ndo_fdb_dump) 4654 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4655 dev, NULL, 4656 &fidx); 4657 else 4658 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4659 &fidx); 4660 if (err == -EMSGSIZE) 4661 goto out; 4662 4663 cops = NULL; 4664 4665 /* reset fdb offset to 0 for rest of the interfaces */ 4666 cb->args[2] = 0; 4667 fidx = 0; 4668 cont: 4669 idx++; 4670 } 4671 } 4672 4673 out: 4674 cb->args[0] = h; 4675 cb->args[1] = idx; 4676 cb->args[2] = fidx; 4677 4678 return skb->len; 4679 } 4680 4681 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4682 struct nlattr **tb, u8 *ndm_flags, 4683 int *br_idx, int *brport_idx, u8 **addr, 4684 u16 *vid, struct netlink_ext_ack *extack) 4685 { 4686 struct ndmsg *ndm; 4687 int err, i; 4688 4689 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4690 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4691 return -EINVAL; 4692 } 4693 4694 ndm = nlmsg_data(nlh); 4695 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4696 ndm->ndm_type) { 4697 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4698 return -EINVAL; 4699 } 4700 4701 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4702 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4703 return -EINVAL; 4704 } 4705 4706 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4707 NDA_MAX, nda_policy, extack); 4708 if (err < 0) 4709 return err; 4710 4711 *ndm_flags = ndm->ndm_flags; 4712 *brport_idx = ndm->ndm_ifindex; 4713 for (i = 0; i <= NDA_MAX; ++i) { 4714 if (!tb[i]) 4715 continue; 4716 4717 switch (i) { 4718 case NDA_MASTER: 4719 *br_idx = nla_get_u32(tb[i]); 4720 break; 4721 case NDA_LLADDR: 4722 if (nla_len(tb[i]) != ETH_ALEN) { 4723 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4724 return -EINVAL; 4725 } 4726 *addr = nla_data(tb[i]); 4727 break; 4728 case NDA_VLAN: 4729 err = fdb_vid_parse(tb[i], vid, extack); 4730 if (err) 4731 return err; 4732 break; 4733 case NDA_VNI: 4734 break; 4735 default: 4736 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4737 return -EINVAL; 4738 } 4739 } 4740 4741 return 0; 4742 } 4743 4744 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4745 struct netlink_ext_ack *extack) 4746 { 4747 struct net_device *dev = NULL, *br_dev = NULL; 4748 const struct net_device_ops *ops = NULL; 4749 struct net *net = sock_net(in_skb->sk); 4750 struct nlattr *tb[NDA_MAX + 1]; 4751 struct sk_buff *skb; 4752 int brport_idx = 0; 4753 u8 ndm_flags = 0; 4754 int br_idx = 0; 4755 u8 *addr = NULL; 4756 u16 vid = 0; 4757 int err; 4758 4759 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4760 &brport_idx, &addr, &vid, extack); 4761 if (err < 0) 4762 return err; 4763 4764 if (!addr) { 4765 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4766 return -EINVAL; 4767 } 4768 4769 if (brport_idx) { 4770 dev = __dev_get_by_index(net, brport_idx); 4771 if (!dev) { 4772 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4773 return -ENODEV; 4774 } 4775 } 4776 4777 if (br_idx) { 4778 if (dev) { 4779 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4780 return -EINVAL; 4781 } 4782 4783 br_dev = __dev_get_by_index(net, br_idx); 4784 if (!br_dev) { 4785 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4786 return -EINVAL; 4787 } 4788 ops = br_dev->netdev_ops; 4789 } 4790 4791 if (dev) { 4792 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4793 if (!netif_is_bridge_port(dev)) { 4794 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4795 return -EINVAL; 4796 } 4797 br_dev = netdev_master_upper_dev_get(dev); 4798 if (!br_dev) { 4799 NL_SET_ERR_MSG(extack, "Master of device not found"); 4800 return -EINVAL; 4801 } 4802 ops = br_dev->netdev_ops; 4803 } else { 4804 if (!(ndm_flags & NTF_SELF)) { 4805 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4806 return -EINVAL; 4807 } 4808 ops = dev->netdev_ops; 4809 } 4810 } 4811 4812 if (!br_dev && !dev) { 4813 NL_SET_ERR_MSG(extack, "No device specified"); 4814 return -ENODEV; 4815 } 4816 4817 if (!ops || !ops->ndo_fdb_get) { 4818 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4819 return -EOPNOTSUPP; 4820 } 4821 4822 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4823 if (!skb) 4824 return -ENOBUFS; 4825 4826 if (br_dev) 4827 dev = br_dev; 4828 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4829 NETLINK_CB(in_skb).portid, 4830 nlh->nlmsg_seq, extack); 4831 if (err) 4832 goto out; 4833 4834 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4835 out: 4836 kfree_skb(skb); 4837 return err; 4838 } 4839 4840 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4841 unsigned int attrnum, unsigned int flag) 4842 { 4843 if (mask & flag) 4844 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4845 return 0; 4846 } 4847 4848 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4849 struct net_device *dev, u16 mode, 4850 u32 flags, u32 mask, int nlflags, 4851 u32 filter_mask, 4852 int (*vlan_fill)(struct sk_buff *skb, 4853 struct net_device *dev, 4854 u32 filter_mask)) 4855 { 4856 struct nlmsghdr *nlh; 4857 struct ifinfomsg *ifm; 4858 struct nlattr *br_afspec; 4859 struct nlattr *protinfo; 4860 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4861 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4862 int err = 0; 4863 4864 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4865 if (nlh == NULL) 4866 return -EMSGSIZE; 4867 4868 ifm = nlmsg_data(nlh); 4869 ifm->ifi_family = AF_BRIDGE; 4870 ifm->__ifi_pad = 0; 4871 ifm->ifi_type = dev->type; 4872 ifm->ifi_index = dev->ifindex; 4873 ifm->ifi_flags = dev_get_flags(dev); 4874 ifm->ifi_change = 0; 4875 4876 4877 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4878 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4879 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4880 (br_dev && 4881 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4882 (dev->addr_len && 4883 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4884 (dev->ifindex != dev_get_iflink(dev) && 4885 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4886 goto nla_put_failure; 4887 4888 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4889 if (!br_afspec) 4890 goto nla_put_failure; 4891 4892 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4893 nla_nest_cancel(skb, br_afspec); 4894 goto nla_put_failure; 4895 } 4896 4897 if (mode != BRIDGE_MODE_UNDEF) { 4898 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4899 nla_nest_cancel(skb, br_afspec); 4900 goto nla_put_failure; 4901 } 4902 } 4903 if (vlan_fill) { 4904 err = vlan_fill(skb, dev, filter_mask); 4905 if (err) { 4906 nla_nest_cancel(skb, br_afspec); 4907 goto nla_put_failure; 4908 } 4909 } 4910 nla_nest_end(skb, br_afspec); 4911 4912 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4913 if (!protinfo) 4914 goto nla_put_failure; 4915 4916 if (brport_nla_put_flag(skb, flags, mask, 4917 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4918 brport_nla_put_flag(skb, flags, mask, 4919 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4920 brport_nla_put_flag(skb, flags, mask, 4921 IFLA_BRPORT_FAST_LEAVE, 4922 BR_MULTICAST_FAST_LEAVE) || 4923 brport_nla_put_flag(skb, flags, mask, 4924 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4925 brport_nla_put_flag(skb, flags, mask, 4926 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4927 brport_nla_put_flag(skb, flags, mask, 4928 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4929 brport_nla_put_flag(skb, flags, mask, 4930 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4931 brport_nla_put_flag(skb, flags, mask, 4932 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4933 brport_nla_put_flag(skb, flags, mask, 4934 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4935 brport_nla_put_flag(skb, flags, mask, 4936 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4937 nla_nest_cancel(skb, protinfo); 4938 goto nla_put_failure; 4939 } 4940 4941 nla_nest_end(skb, protinfo); 4942 4943 nlmsg_end(skb, nlh); 4944 return 0; 4945 nla_put_failure: 4946 nlmsg_cancel(skb, nlh); 4947 return err ? err : -EMSGSIZE; 4948 } 4949 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4950 4951 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4952 bool strict_check, u32 *filter_mask, 4953 struct netlink_ext_ack *extack) 4954 { 4955 struct nlattr *tb[IFLA_MAX+1]; 4956 int err, i; 4957 4958 if (strict_check) { 4959 struct ifinfomsg *ifm; 4960 4961 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4962 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4963 return -EINVAL; 4964 } 4965 4966 ifm = nlmsg_data(nlh); 4967 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4968 ifm->ifi_change || ifm->ifi_index) { 4969 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4970 return -EINVAL; 4971 } 4972 4973 err = nlmsg_parse_deprecated_strict(nlh, 4974 sizeof(struct ifinfomsg), 4975 tb, IFLA_MAX, ifla_policy, 4976 extack); 4977 } else { 4978 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4979 tb, IFLA_MAX, ifla_policy, 4980 extack); 4981 } 4982 if (err < 0) 4983 return err; 4984 4985 /* new attributes should only be added with strict checking */ 4986 for (i = 0; i <= IFLA_MAX; ++i) { 4987 if (!tb[i]) 4988 continue; 4989 4990 switch (i) { 4991 case IFLA_EXT_MASK: 4992 *filter_mask = nla_get_u32(tb[i]); 4993 break; 4994 default: 4995 if (strict_check) { 4996 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 4997 return -EINVAL; 4998 } 4999 } 5000 } 5001 5002 return 0; 5003 } 5004 5005 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 5006 { 5007 const struct nlmsghdr *nlh = cb->nlh; 5008 struct net *net = sock_net(skb->sk); 5009 struct net_device *dev; 5010 int idx = 0; 5011 u32 portid = NETLINK_CB(cb->skb).portid; 5012 u32 seq = nlh->nlmsg_seq; 5013 u32 filter_mask = 0; 5014 int err; 5015 5016 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 5017 cb->extack); 5018 if (err < 0 && cb->strict_check) 5019 return err; 5020 5021 rcu_read_lock(); 5022 for_each_netdev_rcu(net, dev) { 5023 const struct net_device_ops *ops = dev->netdev_ops; 5024 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5025 5026 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 5027 if (idx >= cb->args[0]) { 5028 err = br_dev->netdev_ops->ndo_bridge_getlink( 5029 skb, portid, seq, dev, 5030 filter_mask, NLM_F_MULTI); 5031 if (err < 0 && err != -EOPNOTSUPP) { 5032 if (likely(skb->len)) 5033 break; 5034 5035 goto out_err; 5036 } 5037 } 5038 idx++; 5039 } 5040 5041 if (ops->ndo_bridge_getlink) { 5042 if (idx >= cb->args[0]) { 5043 err = ops->ndo_bridge_getlink(skb, portid, 5044 seq, dev, 5045 filter_mask, 5046 NLM_F_MULTI); 5047 if (err < 0 && err != -EOPNOTSUPP) { 5048 if (likely(skb->len)) 5049 break; 5050 5051 goto out_err; 5052 } 5053 } 5054 idx++; 5055 } 5056 } 5057 err = skb->len; 5058 out_err: 5059 rcu_read_unlock(); 5060 cb->args[0] = idx; 5061 5062 return err; 5063 } 5064 5065 static inline size_t bridge_nlmsg_size(void) 5066 { 5067 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5068 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5069 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5070 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 5071 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5072 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5073 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5074 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5075 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5076 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5077 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5078 } 5079 5080 static int rtnl_bridge_notify(struct net_device *dev) 5081 { 5082 struct net *net = dev_net(dev); 5083 struct sk_buff *skb; 5084 int err = -EOPNOTSUPP; 5085 5086 if (!dev->netdev_ops->ndo_bridge_getlink) 5087 return 0; 5088 5089 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5090 if (!skb) { 5091 err = -ENOMEM; 5092 goto errout; 5093 } 5094 5095 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5096 if (err < 0) 5097 goto errout; 5098 5099 /* Notification info is only filled for bridge ports, not the bridge 5100 * device itself. Therefore, a zero notification length is valid and 5101 * should not result in an error. 5102 */ 5103 if (!skb->len) 5104 goto errout; 5105 5106 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5107 return 0; 5108 errout: 5109 WARN_ON(err == -EMSGSIZE); 5110 kfree_skb(skb); 5111 if (err) 5112 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5113 return err; 5114 } 5115 5116 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5117 struct netlink_ext_ack *extack) 5118 { 5119 struct net *net = sock_net(skb->sk); 5120 struct ifinfomsg *ifm; 5121 struct net_device *dev; 5122 struct nlattr *br_spec, *attr = NULL; 5123 int rem, err = -EOPNOTSUPP; 5124 u16 flags = 0; 5125 bool have_flags = false; 5126 5127 if (nlmsg_len(nlh) < sizeof(*ifm)) 5128 return -EINVAL; 5129 5130 ifm = nlmsg_data(nlh); 5131 if (ifm->ifi_family != AF_BRIDGE) 5132 return -EPFNOSUPPORT; 5133 5134 dev = __dev_get_by_index(net, ifm->ifi_index); 5135 if (!dev) { 5136 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5137 return -ENODEV; 5138 } 5139 5140 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5141 if (br_spec) { 5142 nla_for_each_nested(attr, br_spec, rem) { 5143 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5144 if (nla_len(attr) < sizeof(flags)) 5145 return -EINVAL; 5146 5147 have_flags = true; 5148 flags = nla_get_u16(attr); 5149 break; 5150 } 5151 } 5152 } 5153 5154 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5155 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5156 5157 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5158 err = -EOPNOTSUPP; 5159 goto out; 5160 } 5161 5162 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5163 extack); 5164 if (err) 5165 goto out; 5166 5167 flags &= ~BRIDGE_FLAGS_MASTER; 5168 } 5169 5170 if ((flags & BRIDGE_FLAGS_SELF)) { 5171 if (!dev->netdev_ops->ndo_bridge_setlink) 5172 err = -EOPNOTSUPP; 5173 else 5174 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5175 flags, 5176 extack); 5177 if (!err) { 5178 flags &= ~BRIDGE_FLAGS_SELF; 5179 5180 /* Generate event to notify upper layer of bridge 5181 * change 5182 */ 5183 err = rtnl_bridge_notify(dev); 5184 } 5185 } 5186 5187 if (have_flags) 5188 memcpy(nla_data(attr), &flags, sizeof(flags)); 5189 out: 5190 return err; 5191 } 5192 5193 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5194 struct netlink_ext_ack *extack) 5195 { 5196 struct net *net = sock_net(skb->sk); 5197 struct ifinfomsg *ifm; 5198 struct net_device *dev; 5199 struct nlattr *br_spec, *attr = NULL; 5200 int rem, err = -EOPNOTSUPP; 5201 u16 flags = 0; 5202 bool have_flags = false; 5203 5204 if (nlmsg_len(nlh) < sizeof(*ifm)) 5205 return -EINVAL; 5206 5207 ifm = nlmsg_data(nlh); 5208 if (ifm->ifi_family != AF_BRIDGE) 5209 return -EPFNOSUPPORT; 5210 5211 dev = __dev_get_by_index(net, ifm->ifi_index); 5212 if (!dev) { 5213 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5214 return -ENODEV; 5215 } 5216 5217 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5218 if (br_spec) { 5219 nla_for_each_nested(attr, br_spec, rem) { 5220 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5221 if (nla_len(attr) < sizeof(flags)) 5222 return -EINVAL; 5223 5224 have_flags = true; 5225 flags = nla_get_u16(attr); 5226 break; 5227 } 5228 } 5229 } 5230 5231 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5232 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5233 5234 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5235 err = -EOPNOTSUPP; 5236 goto out; 5237 } 5238 5239 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5240 if (err) 5241 goto out; 5242 5243 flags &= ~BRIDGE_FLAGS_MASTER; 5244 } 5245 5246 if ((flags & BRIDGE_FLAGS_SELF)) { 5247 if (!dev->netdev_ops->ndo_bridge_dellink) 5248 err = -EOPNOTSUPP; 5249 else 5250 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5251 flags); 5252 5253 if (!err) { 5254 flags &= ~BRIDGE_FLAGS_SELF; 5255 5256 /* Generate event to notify upper layer of bridge 5257 * change 5258 */ 5259 err = rtnl_bridge_notify(dev); 5260 } 5261 } 5262 5263 if (have_flags) 5264 memcpy(nla_data(attr), &flags, sizeof(flags)); 5265 out: 5266 return err; 5267 } 5268 5269 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5270 { 5271 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5272 (!idxattr || idxattr == attrid); 5273 } 5274 5275 static bool 5276 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5277 { 5278 return dev->netdev_ops && 5279 dev->netdev_ops->ndo_has_offload_stats && 5280 dev->netdev_ops->ndo_get_offload_stats && 5281 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5282 } 5283 5284 static unsigned int 5285 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5286 { 5287 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5288 sizeof(struct rtnl_link_stats64) : 0; 5289 } 5290 5291 static int 5292 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5293 struct sk_buff *skb) 5294 { 5295 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5296 struct nlattr *attr = NULL; 5297 void *attr_data; 5298 int err; 5299 5300 if (!size) 5301 return -ENODATA; 5302 5303 attr = nla_reserve_64bit(skb, attr_id, size, 5304 IFLA_OFFLOAD_XSTATS_UNSPEC); 5305 if (!attr) 5306 return -EMSGSIZE; 5307 5308 attr_data = nla_data(attr); 5309 memset(attr_data, 0, size); 5310 5311 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5312 if (err) 5313 return err; 5314 5315 return 0; 5316 } 5317 5318 static unsigned int 5319 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5320 enum netdev_offload_xstats_type type) 5321 { 5322 bool enabled = netdev_offload_xstats_enabled(dev, type); 5323 5324 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5325 } 5326 5327 struct rtnl_offload_xstats_request_used { 5328 bool request; 5329 bool used; 5330 }; 5331 5332 static int 5333 rtnl_offload_xstats_get_stats(struct net_device *dev, 5334 enum netdev_offload_xstats_type type, 5335 struct rtnl_offload_xstats_request_used *ru, 5336 struct rtnl_hw_stats64 *stats, 5337 struct netlink_ext_ack *extack) 5338 { 5339 bool request; 5340 bool used; 5341 int err; 5342 5343 request = netdev_offload_xstats_enabled(dev, type); 5344 if (!request) { 5345 used = false; 5346 goto out; 5347 } 5348 5349 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5350 if (err) 5351 return err; 5352 5353 out: 5354 if (ru) { 5355 ru->request = request; 5356 ru->used = used; 5357 } 5358 return 0; 5359 } 5360 5361 static int 5362 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5363 struct rtnl_offload_xstats_request_used *ru) 5364 { 5365 struct nlattr *nest; 5366 5367 nest = nla_nest_start(skb, attr_id); 5368 if (!nest) 5369 return -EMSGSIZE; 5370 5371 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5372 goto nla_put_failure; 5373 5374 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5375 goto nla_put_failure; 5376 5377 nla_nest_end(skb, nest); 5378 return 0; 5379 5380 nla_put_failure: 5381 nla_nest_cancel(skb, nest); 5382 return -EMSGSIZE; 5383 } 5384 5385 static int 5386 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5387 struct netlink_ext_ack *extack) 5388 { 5389 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5390 struct rtnl_offload_xstats_request_used ru_l3; 5391 struct nlattr *nest; 5392 int err; 5393 5394 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5395 if (err) 5396 return err; 5397 5398 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5399 if (!nest) 5400 return -EMSGSIZE; 5401 5402 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5403 IFLA_OFFLOAD_XSTATS_L3_STATS, 5404 &ru_l3)) 5405 goto nla_put_failure; 5406 5407 nla_nest_end(skb, nest); 5408 return 0; 5409 5410 nla_put_failure: 5411 nla_nest_cancel(skb, nest); 5412 return -EMSGSIZE; 5413 } 5414 5415 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5416 int *prividx, u32 off_filter_mask, 5417 struct netlink_ext_ack *extack) 5418 { 5419 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5420 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5421 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5422 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5423 bool have_data = false; 5424 int err; 5425 5426 if (*prividx <= attr_id_cpu_hit && 5427 (off_filter_mask & 5428 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5429 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5430 if (!err) { 5431 have_data = true; 5432 } else if (err != -ENODATA) { 5433 *prividx = attr_id_cpu_hit; 5434 return err; 5435 } 5436 } 5437 5438 if (*prividx <= attr_id_hw_s_info && 5439 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5440 *prividx = attr_id_hw_s_info; 5441 5442 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5443 if (err) 5444 return err; 5445 5446 have_data = true; 5447 *prividx = 0; 5448 } 5449 5450 if (*prividx <= attr_id_l3_stats && 5451 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5452 unsigned int size_l3; 5453 struct nlattr *attr; 5454 5455 *prividx = attr_id_l3_stats; 5456 5457 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5458 if (!size_l3) 5459 goto skip_l3_stats; 5460 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5461 IFLA_OFFLOAD_XSTATS_UNSPEC); 5462 if (!attr) 5463 return -EMSGSIZE; 5464 5465 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5466 nla_data(attr), extack); 5467 if (err) 5468 return err; 5469 5470 have_data = true; 5471 skip_l3_stats: 5472 *prividx = 0; 5473 } 5474 5475 if (!have_data) 5476 return -ENODATA; 5477 5478 *prividx = 0; 5479 return 0; 5480 } 5481 5482 static unsigned int 5483 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5484 enum netdev_offload_xstats_type type) 5485 { 5486 bool enabled = netdev_offload_xstats_enabled(dev, type); 5487 5488 return nla_total_size(0) + 5489 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5490 nla_total_size(sizeof(u8)) + 5491 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5492 (enabled ? nla_total_size(sizeof(u8)) : 0) + 5493 0; 5494 } 5495 5496 static unsigned int 5497 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5498 { 5499 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5500 5501 return nla_total_size(0) + 5502 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5503 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5504 0; 5505 } 5506 5507 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5508 u32 off_filter_mask) 5509 { 5510 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5511 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5512 int nla_size = 0; 5513 int size; 5514 5515 if (off_filter_mask & 5516 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5517 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5518 nla_size += nla_total_size_64bit(size); 5519 } 5520 5521 if (off_filter_mask & 5522 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5523 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5524 5525 if (off_filter_mask & 5526 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5527 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5528 nla_size += nla_total_size_64bit(size); 5529 } 5530 5531 if (nla_size != 0) 5532 nla_size += nla_total_size(0); 5533 5534 return nla_size; 5535 } 5536 5537 struct rtnl_stats_dump_filters { 5538 /* mask[0] filters outer attributes. Then individual nests have their 5539 * filtering mask at the index of the nested attribute. 5540 */ 5541 u32 mask[IFLA_STATS_MAX + 1]; 5542 }; 5543 5544 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5545 int type, u32 pid, u32 seq, u32 change, 5546 unsigned int flags, 5547 const struct rtnl_stats_dump_filters *filters, 5548 int *idxattr, int *prividx, 5549 struct netlink_ext_ack *extack) 5550 { 5551 unsigned int filter_mask = filters->mask[0]; 5552 struct if_stats_msg *ifsm; 5553 struct nlmsghdr *nlh; 5554 struct nlattr *attr; 5555 int s_prividx = *prividx; 5556 int err; 5557 5558 ASSERT_RTNL(); 5559 5560 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5561 if (!nlh) 5562 return -EMSGSIZE; 5563 5564 ifsm = nlmsg_data(nlh); 5565 ifsm->family = PF_UNSPEC; 5566 ifsm->pad1 = 0; 5567 ifsm->pad2 = 0; 5568 ifsm->ifindex = dev->ifindex; 5569 ifsm->filter_mask = filter_mask; 5570 5571 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5572 struct rtnl_link_stats64 *sp; 5573 5574 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5575 sizeof(struct rtnl_link_stats64), 5576 IFLA_STATS_UNSPEC); 5577 if (!attr) { 5578 err = -EMSGSIZE; 5579 goto nla_put_failure; 5580 } 5581 5582 sp = nla_data(attr); 5583 dev_get_stats(dev, sp); 5584 } 5585 5586 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5587 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5588 5589 if (ops && ops->fill_linkxstats) { 5590 *idxattr = IFLA_STATS_LINK_XSTATS; 5591 attr = nla_nest_start_noflag(skb, 5592 IFLA_STATS_LINK_XSTATS); 5593 if (!attr) { 5594 err = -EMSGSIZE; 5595 goto nla_put_failure; 5596 } 5597 5598 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5599 nla_nest_end(skb, attr); 5600 if (err) 5601 goto nla_put_failure; 5602 *idxattr = 0; 5603 } 5604 } 5605 5606 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5607 *idxattr)) { 5608 const struct rtnl_link_ops *ops = NULL; 5609 const struct net_device *master; 5610 5611 master = netdev_master_upper_dev_get(dev); 5612 if (master) 5613 ops = master->rtnl_link_ops; 5614 if (ops && ops->fill_linkxstats) { 5615 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5616 attr = nla_nest_start_noflag(skb, 5617 IFLA_STATS_LINK_XSTATS_SLAVE); 5618 if (!attr) { 5619 err = -EMSGSIZE; 5620 goto nla_put_failure; 5621 } 5622 5623 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5624 nla_nest_end(skb, attr); 5625 if (err) 5626 goto nla_put_failure; 5627 *idxattr = 0; 5628 } 5629 } 5630 5631 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5632 *idxattr)) { 5633 u32 off_filter_mask; 5634 5635 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5636 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5637 attr = nla_nest_start_noflag(skb, 5638 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5639 if (!attr) { 5640 err = -EMSGSIZE; 5641 goto nla_put_failure; 5642 } 5643 5644 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5645 off_filter_mask, extack); 5646 if (err == -ENODATA) 5647 nla_nest_cancel(skb, attr); 5648 else 5649 nla_nest_end(skb, attr); 5650 5651 if (err && err != -ENODATA) 5652 goto nla_put_failure; 5653 *idxattr = 0; 5654 } 5655 5656 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5657 struct rtnl_af_ops *af_ops; 5658 5659 *idxattr = IFLA_STATS_AF_SPEC; 5660 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5661 if (!attr) { 5662 err = -EMSGSIZE; 5663 goto nla_put_failure; 5664 } 5665 5666 rcu_read_lock(); 5667 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5668 if (af_ops->fill_stats_af) { 5669 struct nlattr *af; 5670 5671 af = nla_nest_start_noflag(skb, 5672 af_ops->family); 5673 if (!af) { 5674 rcu_read_unlock(); 5675 err = -EMSGSIZE; 5676 goto nla_put_failure; 5677 } 5678 err = af_ops->fill_stats_af(skb, dev); 5679 5680 if (err == -ENODATA) { 5681 nla_nest_cancel(skb, af); 5682 } else if (err < 0) { 5683 rcu_read_unlock(); 5684 goto nla_put_failure; 5685 } 5686 5687 nla_nest_end(skb, af); 5688 } 5689 } 5690 rcu_read_unlock(); 5691 5692 nla_nest_end(skb, attr); 5693 5694 *idxattr = 0; 5695 } 5696 5697 nlmsg_end(skb, nlh); 5698 5699 return 0; 5700 5701 nla_put_failure: 5702 /* not a multi message or no progress mean a real error */ 5703 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5704 nlmsg_cancel(skb, nlh); 5705 else 5706 nlmsg_end(skb, nlh); 5707 5708 return err; 5709 } 5710 5711 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5712 const struct rtnl_stats_dump_filters *filters) 5713 { 5714 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5715 unsigned int filter_mask = filters->mask[0]; 5716 5717 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5718 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5719 5720 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5721 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5722 int attr = IFLA_STATS_LINK_XSTATS; 5723 5724 if (ops && ops->get_linkxstats_size) { 5725 size += nla_total_size(ops->get_linkxstats_size(dev, 5726 attr)); 5727 /* for IFLA_STATS_LINK_XSTATS */ 5728 size += nla_total_size(0); 5729 } 5730 } 5731 5732 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5733 struct net_device *_dev = (struct net_device *)dev; 5734 const struct rtnl_link_ops *ops = NULL; 5735 const struct net_device *master; 5736 5737 /* netdev_master_upper_dev_get can't take const */ 5738 master = netdev_master_upper_dev_get(_dev); 5739 if (master) 5740 ops = master->rtnl_link_ops; 5741 if (ops && ops->get_linkxstats_size) { 5742 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5743 5744 size += nla_total_size(ops->get_linkxstats_size(dev, 5745 attr)); 5746 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5747 size += nla_total_size(0); 5748 } 5749 } 5750 5751 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5752 u32 off_filter_mask; 5753 5754 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5755 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5756 } 5757 5758 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5759 struct rtnl_af_ops *af_ops; 5760 5761 /* for IFLA_STATS_AF_SPEC */ 5762 size += nla_total_size(0); 5763 5764 rcu_read_lock(); 5765 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5766 if (af_ops->get_stats_af_size) { 5767 size += nla_total_size( 5768 af_ops->get_stats_af_size(dev)); 5769 5770 /* for AF_* */ 5771 size += nla_total_size(0); 5772 } 5773 } 5774 rcu_read_unlock(); 5775 } 5776 5777 return size; 5778 } 5779 5780 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5781 5782 static const struct nla_policy 5783 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5784 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5785 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5786 }; 5787 5788 static const struct nla_policy 5789 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5790 [IFLA_STATS_GET_FILTERS] = 5791 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5792 }; 5793 5794 static const struct nla_policy 5795 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5796 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5797 }; 5798 5799 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5800 struct rtnl_stats_dump_filters *filters, 5801 struct netlink_ext_ack *extack) 5802 { 5803 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5804 int err; 5805 int at; 5806 5807 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5808 rtnl_stats_get_policy_filters, extack); 5809 if (err < 0) 5810 return err; 5811 5812 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5813 if (tb[at]) { 5814 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5815 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5816 return -EINVAL; 5817 } 5818 filters->mask[at] = nla_get_u32(tb[at]); 5819 } 5820 } 5821 5822 return 0; 5823 } 5824 5825 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5826 u32 filter_mask, 5827 struct rtnl_stats_dump_filters *filters, 5828 struct netlink_ext_ack *extack) 5829 { 5830 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5831 int err; 5832 int i; 5833 5834 filters->mask[0] = filter_mask; 5835 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5836 filters->mask[i] = -1U; 5837 5838 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5839 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5840 if (err < 0) 5841 return err; 5842 5843 if (tb[IFLA_STATS_GET_FILTERS]) { 5844 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5845 filters, extack); 5846 if (err) 5847 return err; 5848 } 5849 5850 return 0; 5851 } 5852 5853 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5854 bool is_dump, struct netlink_ext_ack *extack) 5855 { 5856 struct if_stats_msg *ifsm; 5857 5858 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5859 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5860 return -EINVAL; 5861 } 5862 5863 if (!strict_check) 5864 return 0; 5865 5866 ifsm = nlmsg_data(nlh); 5867 5868 /* only requests using strict checks can pass data to influence 5869 * the dump. The legacy exception is filter_mask. 5870 */ 5871 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5872 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5873 return -EINVAL; 5874 } 5875 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5876 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5877 return -EINVAL; 5878 } 5879 5880 return 0; 5881 } 5882 5883 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5884 struct netlink_ext_ack *extack) 5885 { 5886 struct rtnl_stats_dump_filters filters; 5887 struct net *net = sock_net(skb->sk); 5888 struct net_device *dev = NULL; 5889 int idxattr = 0, prividx = 0; 5890 struct if_stats_msg *ifsm; 5891 struct sk_buff *nskb; 5892 int err; 5893 5894 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5895 false, extack); 5896 if (err) 5897 return err; 5898 5899 ifsm = nlmsg_data(nlh); 5900 if (ifsm->ifindex > 0) 5901 dev = __dev_get_by_index(net, ifsm->ifindex); 5902 else 5903 return -EINVAL; 5904 5905 if (!dev) 5906 return -ENODEV; 5907 5908 if (!ifsm->filter_mask) { 5909 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5910 return -EINVAL; 5911 } 5912 5913 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5914 if (err) 5915 return err; 5916 5917 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5918 if (!nskb) 5919 return -ENOBUFS; 5920 5921 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5922 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5923 0, &filters, &idxattr, &prividx, extack); 5924 if (err < 0) { 5925 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5926 WARN_ON(err == -EMSGSIZE); 5927 kfree_skb(nskb); 5928 } else { 5929 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5930 } 5931 5932 return err; 5933 } 5934 5935 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5936 { 5937 struct netlink_ext_ack *extack = cb->extack; 5938 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5939 struct rtnl_stats_dump_filters filters; 5940 struct net *net = sock_net(skb->sk); 5941 unsigned int flags = NLM_F_MULTI; 5942 struct if_stats_msg *ifsm; 5943 struct hlist_head *head; 5944 struct net_device *dev; 5945 int idx = 0; 5946 5947 s_h = cb->args[0]; 5948 s_idx = cb->args[1]; 5949 s_idxattr = cb->args[2]; 5950 s_prividx = cb->args[3]; 5951 5952 cb->seq = net->dev_base_seq; 5953 5954 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 5955 if (err) 5956 return err; 5957 5958 ifsm = nlmsg_data(cb->nlh); 5959 if (!ifsm->filter_mask) { 5960 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 5961 return -EINVAL; 5962 } 5963 5964 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 5965 extack); 5966 if (err) 5967 return err; 5968 5969 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 5970 idx = 0; 5971 head = &net->dev_index_head[h]; 5972 hlist_for_each_entry(dev, head, index_hlist) { 5973 if (idx < s_idx) 5974 goto cont; 5975 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 5976 NETLINK_CB(cb->skb).portid, 5977 cb->nlh->nlmsg_seq, 0, 5978 flags, &filters, 5979 &s_idxattr, &s_prividx, 5980 extack); 5981 /* If we ran out of room on the first message, 5982 * we're in trouble 5983 */ 5984 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 5985 5986 if (err < 0) 5987 goto out; 5988 s_prividx = 0; 5989 s_idxattr = 0; 5990 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 5991 cont: 5992 idx++; 5993 } 5994 } 5995 out: 5996 cb->args[3] = s_prividx; 5997 cb->args[2] = s_idxattr; 5998 cb->args[1] = idx; 5999 cb->args[0] = h; 6000 6001 return skb->len; 6002 } 6003 6004 void rtnl_offload_xstats_notify(struct net_device *dev) 6005 { 6006 struct rtnl_stats_dump_filters response_filters = {}; 6007 struct net *net = dev_net(dev); 6008 int idxattr = 0, prividx = 0; 6009 struct sk_buff *skb; 6010 int err = -ENOBUFS; 6011 6012 ASSERT_RTNL(); 6013 6014 response_filters.mask[0] |= 6015 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6016 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6017 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6018 6019 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 6020 GFP_KERNEL); 6021 if (!skb) 6022 goto errout; 6023 6024 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 6025 &response_filters, &idxattr, &prividx, NULL); 6026 if (err < 0) { 6027 kfree_skb(skb); 6028 goto errout; 6029 } 6030 6031 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 6032 return; 6033 6034 errout: 6035 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 6036 } 6037 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 6038 6039 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 6040 struct netlink_ext_ack *extack) 6041 { 6042 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 6043 struct rtnl_stats_dump_filters response_filters = {}; 6044 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 6045 struct net *net = sock_net(skb->sk); 6046 struct net_device *dev = NULL; 6047 struct if_stats_msg *ifsm; 6048 bool notify = false; 6049 int err; 6050 6051 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6052 false, extack); 6053 if (err) 6054 return err; 6055 6056 ifsm = nlmsg_data(nlh); 6057 if (ifsm->family != AF_UNSPEC) { 6058 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 6059 return -EINVAL; 6060 } 6061 6062 if (ifsm->ifindex > 0) 6063 dev = __dev_get_by_index(net, ifsm->ifindex); 6064 else 6065 return -EINVAL; 6066 6067 if (!dev) 6068 return -ENODEV; 6069 6070 if (ifsm->filter_mask) { 6071 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6072 return -EINVAL; 6073 } 6074 6075 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6076 ifla_stats_set_policy, extack); 6077 if (err < 0) 6078 return err; 6079 6080 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6081 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6082 6083 if (req) 6084 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6085 else 6086 err = netdev_offload_xstats_disable(dev, t_l3); 6087 6088 if (!err) 6089 notify = true; 6090 else if (err != -EALREADY) 6091 return err; 6092 6093 response_filters.mask[0] |= 6094 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6095 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6096 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6097 } 6098 6099 if (notify) 6100 rtnl_offload_xstats_notify(dev); 6101 6102 return 0; 6103 } 6104 6105 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, 6106 struct netlink_ext_ack *extack) 6107 { 6108 struct br_port_msg *bpm; 6109 6110 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 6111 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request"); 6112 return -EINVAL; 6113 } 6114 6115 bpm = nlmsg_data(nlh); 6116 if (bpm->ifindex) { 6117 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request"); 6118 return -EINVAL; 6119 } 6120 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 6121 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 6122 return -EINVAL; 6123 } 6124 6125 return 0; 6126 } 6127 6128 struct rtnl_mdb_dump_ctx { 6129 long idx; 6130 }; 6131 6132 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 6133 { 6134 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; 6135 struct net *net = sock_net(skb->sk); 6136 struct net_device *dev; 6137 int idx, s_idx; 6138 int err; 6139 6140 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx); 6141 6142 if (cb->strict_check) { 6143 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); 6144 if (err) 6145 return err; 6146 } 6147 6148 s_idx = ctx->idx; 6149 idx = 0; 6150 6151 for_each_netdev(net, dev) { 6152 if (idx < s_idx) 6153 goto skip; 6154 if (!dev->netdev_ops->ndo_mdb_dump) 6155 goto skip; 6156 6157 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); 6158 if (err == -EMSGSIZE) 6159 goto out; 6160 /* Moving on to next device, reset markers and sequence 6161 * counters since they are all maintained per-device. 6162 */ 6163 memset(cb->ctx, 0, sizeof(cb->ctx)); 6164 cb->prev_seq = 0; 6165 cb->seq = 0; 6166 skip: 6167 idx++; 6168 } 6169 6170 out: 6171 ctx->idx = idx; 6172 return skb->len; 6173 } 6174 6175 static int rtnl_validate_mdb_entry(const struct nlattr *attr, 6176 struct netlink_ext_ack *extack) 6177 { 6178 struct br_mdb_entry *entry = nla_data(attr); 6179 6180 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6181 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6182 return -EINVAL; 6183 } 6184 6185 if (entry->ifindex == 0) { 6186 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed"); 6187 return -EINVAL; 6188 } 6189 6190 if (entry->addr.proto == htons(ETH_P_IP)) { 6191 if (!ipv4_is_multicast(entry->addr.u.ip4) && 6192 !ipv4_is_zeronet(entry->addr.u.ip4)) { 6193 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0"); 6194 return -EINVAL; 6195 } 6196 if (ipv4_is_local_multicast(entry->addr.u.ip4)) { 6197 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast"); 6198 return -EINVAL; 6199 } 6200 #if IS_ENABLED(CONFIG_IPV6) 6201 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 6202 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) { 6203 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes"); 6204 return -EINVAL; 6205 } 6206 #endif 6207 } else if (entry->addr.proto == 0) { 6208 /* L2 mdb */ 6209 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) { 6210 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast"); 6211 return -EINVAL; 6212 } 6213 } else { 6214 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6215 return -EINVAL; 6216 } 6217 6218 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6219 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6220 return -EINVAL; 6221 } 6222 if (entry->vid >= VLAN_VID_MASK) { 6223 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6224 return -EINVAL; 6225 } 6226 6227 return 0; 6228 } 6229 6230 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { 6231 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, 6232 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6233 rtnl_validate_mdb_entry, 6234 sizeof(struct br_mdb_entry)), 6235 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6236 }; 6237 6238 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 6239 struct netlink_ext_ack *extack) 6240 { 6241 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6242 struct net *net = sock_net(skb->sk); 6243 struct br_port_msg *bpm; 6244 struct net_device *dev; 6245 int err; 6246 6247 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6248 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6249 if (err) 6250 return err; 6251 6252 bpm = nlmsg_data(nlh); 6253 if (!bpm->ifindex) { 6254 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6255 return -EINVAL; 6256 } 6257 6258 dev = __dev_get_by_index(net, bpm->ifindex); 6259 if (!dev) { 6260 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6261 return -ENODEV; 6262 } 6263 6264 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6265 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6266 return -EINVAL; 6267 } 6268 6269 if (!dev->netdev_ops->ndo_mdb_add) { 6270 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6271 return -EOPNOTSUPP; 6272 } 6273 6274 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); 6275 } 6276 6277 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 6278 struct netlink_ext_ack *extack) 6279 { 6280 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6281 struct net *net = sock_net(skb->sk); 6282 struct br_port_msg *bpm; 6283 struct net_device *dev; 6284 int err; 6285 6286 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6287 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6288 if (err) 6289 return err; 6290 6291 bpm = nlmsg_data(nlh); 6292 if (!bpm->ifindex) { 6293 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6294 return -EINVAL; 6295 } 6296 6297 dev = __dev_get_by_index(net, bpm->ifindex); 6298 if (!dev) { 6299 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6300 return -ENODEV; 6301 } 6302 6303 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6304 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6305 return -EINVAL; 6306 } 6307 6308 if (!dev->netdev_ops->ndo_mdb_del) { 6309 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6310 return -EOPNOTSUPP; 6311 } 6312 6313 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); 6314 } 6315 6316 /* Process one rtnetlink message. */ 6317 6318 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6319 struct netlink_ext_ack *extack) 6320 { 6321 struct net *net = sock_net(skb->sk); 6322 struct rtnl_link *link; 6323 enum rtnl_kinds kind; 6324 struct module *owner; 6325 int err = -EOPNOTSUPP; 6326 rtnl_doit_func doit; 6327 unsigned int flags; 6328 int family; 6329 int type; 6330 6331 type = nlh->nlmsg_type; 6332 if (type > RTM_MAX) 6333 return -EOPNOTSUPP; 6334 6335 type -= RTM_BASE; 6336 6337 /* All the messages must have at least 1 byte length */ 6338 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6339 return 0; 6340 6341 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6342 kind = rtnl_msgtype_kind(type); 6343 6344 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6345 return -EPERM; 6346 6347 rcu_read_lock(); 6348 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6349 struct sock *rtnl; 6350 rtnl_dumpit_func dumpit; 6351 u32 min_dump_alloc = 0; 6352 6353 link = rtnl_get_link(family, type); 6354 if (!link || !link->dumpit) { 6355 family = PF_UNSPEC; 6356 link = rtnl_get_link(family, type); 6357 if (!link || !link->dumpit) 6358 goto err_unlock; 6359 } 6360 owner = link->owner; 6361 dumpit = link->dumpit; 6362 6363 if (type == RTM_GETLINK - RTM_BASE) 6364 min_dump_alloc = rtnl_calcit(skb, nlh); 6365 6366 err = 0; 6367 /* need to do this before rcu_read_unlock() */ 6368 if (!try_module_get(owner)) 6369 err = -EPROTONOSUPPORT; 6370 6371 rcu_read_unlock(); 6372 6373 rtnl = net->rtnl; 6374 if (err == 0) { 6375 struct netlink_dump_control c = { 6376 .dump = dumpit, 6377 .min_dump_alloc = min_dump_alloc, 6378 .module = owner, 6379 }; 6380 err = netlink_dump_start(rtnl, skb, nlh, &c); 6381 /* netlink_dump_start() will keep a reference on 6382 * module if dump is still in progress. 6383 */ 6384 module_put(owner); 6385 } 6386 return err; 6387 } 6388 6389 link = rtnl_get_link(family, type); 6390 if (!link || !link->doit) { 6391 family = PF_UNSPEC; 6392 link = rtnl_get_link(PF_UNSPEC, type); 6393 if (!link || !link->doit) 6394 goto out_unlock; 6395 } 6396 6397 owner = link->owner; 6398 if (!try_module_get(owner)) { 6399 err = -EPROTONOSUPPORT; 6400 goto out_unlock; 6401 } 6402 6403 flags = link->flags; 6404 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6405 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6406 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6407 module_put(owner); 6408 goto err_unlock; 6409 } 6410 6411 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6412 doit = link->doit; 6413 rcu_read_unlock(); 6414 if (doit) 6415 err = doit(skb, nlh, extack); 6416 module_put(owner); 6417 return err; 6418 } 6419 rcu_read_unlock(); 6420 6421 rtnl_lock(); 6422 link = rtnl_get_link(family, type); 6423 if (link && link->doit) 6424 err = link->doit(skb, nlh, extack); 6425 rtnl_unlock(); 6426 6427 module_put(owner); 6428 6429 return err; 6430 6431 out_unlock: 6432 rcu_read_unlock(); 6433 return err; 6434 6435 err_unlock: 6436 rcu_read_unlock(); 6437 return -EOPNOTSUPP; 6438 } 6439 6440 static void rtnetlink_rcv(struct sk_buff *skb) 6441 { 6442 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6443 } 6444 6445 static int rtnetlink_bind(struct net *net, int group) 6446 { 6447 switch (group) { 6448 case RTNLGRP_IPV4_MROUTE_R: 6449 case RTNLGRP_IPV6_MROUTE_R: 6450 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6451 return -EPERM; 6452 break; 6453 } 6454 return 0; 6455 } 6456 6457 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6458 { 6459 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6460 6461 switch (event) { 6462 case NETDEV_REBOOT: 6463 case NETDEV_CHANGEMTU: 6464 case NETDEV_CHANGEADDR: 6465 case NETDEV_CHANGENAME: 6466 case NETDEV_FEAT_CHANGE: 6467 case NETDEV_BONDING_FAILOVER: 6468 case NETDEV_POST_TYPE_CHANGE: 6469 case NETDEV_NOTIFY_PEERS: 6470 case NETDEV_CHANGEUPPER: 6471 case NETDEV_RESEND_IGMP: 6472 case NETDEV_CHANGEINFODATA: 6473 case NETDEV_CHANGELOWERSTATE: 6474 case NETDEV_CHANGE_TX_QUEUE_LEN: 6475 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6476 GFP_KERNEL, NULL, 0, 0, NULL); 6477 break; 6478 default: 6479 break; 6480 } 6481 return NOTIFY_DONE; 6482 } 6483 6484 static struct notifier_block rtnetlink_dev_notifier = { 6485 .notifier_call = rtnetlink_event, 6486 }; 6487 6488 6489 static int __net_init rtnetlink_net_init(struct net *net) 6490 { 6491 struct sock *sk; 6492 struct netlink_kernel_cfg cfg = { 6493 .groups = RTNLGRP_MAX, 6494 .input = rtnetlink_rcv, 6495 .cb_mutex = &rtnl_mutex, 6496 .flags = NL_CFG_F_NONROOT_RECV, 6497 .bind = rtnetlink_bind, 6498 }; 6499 6500 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6501 if (!sk) 6502 return -ENOMEM; 6503 net->rtnl = sk; 6504 return 0; 6505 } 6506 6507 static void __net_exit rtnetlink_net_exit(struct net *net) 6508 { 6509 netlink_kernel_release(net->rtnl); 6510 net->rtnl = NULL; 6511 } 6512 6513 static struct pernet_operations rtnetlink_net_ops = { 6514 .init = rtnetlink_net_init, 6515 .exit = rtnetlink_net_exit, 6516 }; 6517 6518 void __init rtnetlink_init(void) 6519 { 6520 if (register_pernet_subsys(&rtnetlink_net_ops)) 6521 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6522 6523 register_netdevice_notifier(&rtnetlink_dev_notifier); 6524 6525 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6526 rtnl_dump_ifinfo, 0); 6527 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6528 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6529 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6530 6531 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6532 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6533 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6534 6535 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6536 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6537 6538 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6539 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6540 RTNL_FLAG_BULK_DEL_SUPPORTED); 6541 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6542 6543 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6544 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6545 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6546 6547 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6548 0); 6549 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6550 6551 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, rtnl_mdb_dump, 0); 6552 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); 6553 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 0); 6554 } 6555