1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 #if IS_ENABLED(CONFIG_IPV6) 58 #include <net/addrconf.h> 59 #endif 60 61 #include "dev.h" 62 63 #define RTNL_MAX_TYPE 50 64 #define RTNL_SLAVE_MAX_TYPE 44 65 66 struct rtnl_link { 67 rtnl_doit_func doit; 68 rtnl_dumpit_func dumpit; 69 struct module *owner; 70 unsigned int flags; 71 struct rcu_head rcu; 72 }; 73 74 static DEFINE_MUTEX(rtnl_mutex); 75 76 void rtnl_lock(void) 77 { 78 mutex_lock(&rtnl_mutex); 79 } 80 EXPORT_SYMBOL(rtnl_lock); 81 82 int rtnl_lock_killable(void) 83 { 84 return mutex_lock_killable(&rtnl_mutex); 85 } 86 EXPORT_SYMBOL(rtnl_lock_killable); 87 88 static struct sk_buff *defer_kfree_skb_list; 89 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 90 { 91 if (head && tail) { 92 tail->next = defer_kfree_skb_list; 93 defer_kfree_skb_list = head; 94 } 95 } 96 EXPORT_SYMBOL(rtnl_kfree_skbs); 97 98 void __rtnl_unlock(void) 99 { 100 struct sk_buff *head = defer_kfree_skb_list; 101 102 defer_kfree_skb_list = NULL; 103 104 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 105 * is used. In some places, e.g. in cfg80211, we have code that will do 106 * something like 107 * rtnl_lock() 108 * wiphy_lock() 109 * ... 110 * rtnl_unlock() 111 * 112 * and because netdev_run_todo() acquires the RTNL for items on the list 113 * we could cause a situation such as this: 114 * Thread 1 Thread 2 115 * rtnl_lock() 116 * unregister_netdevice() 117 * __rtnl_unlock() 118 * rtnl_lock() 119 * wiphy_lock() 120 * rtnl_unlock() 121 * netdev_run_todo() 122 * __rtnl_unlock() 123 * 124 * // list not empty now 125 * // because of thread 2 126 * rtnl_lock() 127 * while (!list_empty(...)) 128 * rtnl_lock() 129 * wiphy_lock() 130 * **** DEADLOCK **** 131 * 132 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 133 * it's not used in cases where something is added to do the list. 134 */ 135 WARN_ON(!list_empty(&net_todo_list)); 136 137 mutex_unlock(&rtnl_mutex); 138 139 while (head) { 140 struct sk_buff *next = head->next; 141 142 kfree_skb(head); 143 cond_resched(); 144 head = next; 145 } 146 } 147 148 void rtnl_unlock(void) 149 { 150 /* This fellow will unlock it for us. */ 151 netdev_run_todo(); 152 } 153 EXPORT_SYMBOL(rtnl_unlock); 154 155 int rtnl_trylock(void) 156 { 157 return mutex_trylock(&rtnl_mutex); 158 } 159 EXPORT_SYMBOL(rtnl_trylock); 160 161 int rtnl_is_locked(void) 162 { 163 return mutex_is_locked(&rtnl_mutex); 164 } 165 EXPORT_SYMBOL(rtnl_is_locked); 166 167 bool refcount_dec_and_rtnl_lock(refcount_t *r) 168 { 169 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 170 } 171 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 172 173 #ifdef CONFIG_PROVE_LOCKING 174 bool lockdep_rtnl_is_held(void) 175 { 176 return lockdep_is_held(&rtnl_mutex); 177 } 178 EXPORT_SYMBOL(lockdep_rtnl_is_held); 179 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 180 181 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 182 183 static inline int rtm_msgindex(int msgtype) 184 { 185 int msgindex = msgtype - RTM_BASE; 186 187 /* 188 * msgindex < 0 implies someone tried to register a netlink 189 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 190 * the message type has not been added to linux/rtnetlink.h 191 */ 192 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 193 194 return msgindex; 195 } 196 197 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 198 { 199 struct rtnl_link __rcu **tab; 200 201 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 202 protocol = PF_UNSPEC; 203 204 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 205 if (!tab) 206 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 207 208 return rcu_dereference_rtnl(tab[msgtype]); 209 } 210 211 static int rtnl_register_internal(struct module *owner, 212 int protocol, int msgtype, 213 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 214 unsigned int flags) 215 { 216 struct rtnl_link *link, *old; 217 struct rtnl_link __rcu **tab; 218 int msgindex; 219 int ret = -ENOBUFS; 220 221 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 222 msgindex = rtm_msgindex(msgtype); 223 224 rtnl_lock(); 225 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 226 if (tab == NULL) { 227 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 228 if (!tab) 229 goto unlock; 230 231 /* ensures we see the 0 stores */ 232 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 233 } 234 235 old = rtnl_dereference(tab[msgindex]); 236 if (old) { 237 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 238 if (!link) 239 goto unlock; 240 } else { 241 link = kzalloc(sizeof(*link), GFP_KERNEL); 242 if (!link) 243 goto unlock; 244 } 245 246 WARN_ON(link->owner && link->owner != owner); 247 link->owner = owner; 248 249 WARN_ON(doit && link->doit && link->doit != doit); 250 if (doit) 251 link->doit = doit; 252 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 253 if (dumpit) 254 link->dumpit = dumpit; 255 256 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 257 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 258 link->flags |= flags; 259 260 /* publish protocol:msgtype */ 261 rcu_assign_pointer(tab[msgindex], link); 262 ret = 0; 263 if (old) 264 kfree_rcu(old, rcu); 265 unlock: 266 rtnl_unlock(); 267 return ret; 268 } 269 270 /** 271 * rtnl_register_module - Register a rtnetlink message type 272 * 273 * @owner: module registering the hook (THIS_MODULE) 274 * @protocol: Protocol family or PF_UNSPEC 275 * @msgtype: rtnetlink message type 276 * @doit: Function pointer called for each request message 277 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 278 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 279 * 280 * Like rtnl_register, but for use by removable modules. 281 */ 282 int rtnl_register_module(struct module *owner, 283 int protocol, int msgtype, 284 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 285 unsigned int flags) 286 { 287 return rtnl_register_internal(owner, protocol, msgtype, 288 doit, dumpit, flags); 289 } 290 EXPORT_SYMBOL_GPL(rtnl_register_module); 291 292 /** 293 * rtnl_register - Register a rtnetlink message type 294 * @protocol: Protocol family or PF_UNSPEC 295 * @msgtype: rtnetlink message type 296 * @doit: Function pointer called for each request message 297 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 298 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 299 * 300 * Registers the specified function pointers (at least one of them has 301 * to be non-NULL) to be called whenever a request message for the 302 * specified protocol family and message type is received. 303 * 304 * The special protocol family PF_UNSPEC may be used to define fallback 305 * function pointers for the case when no entry for the specific protocol 306 * family exists. 307 */ 308 void rtnl_register(int protocol, int msgtype, 309 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 310 unsigned int flags) 311 { 312 int err; 313 314 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 315 flags); 316 if (err) 317 pr_err("Unable to register rtnetlink message handler, " 318 "protocol = %d, message type = %d\n", protocol, msgtype); 319 } 320 321 /** 322 * rtnl_unregister - Unregister a rtnetlink message type 323 * @protocol: Protocol family or PF_UNSPEC 324 * @msgtype: rtnetlink message type 325 * 326 * Returns 0 on success or a negative error code. 327 */ 328 int rtnl_unregister(int protocol, int msgtype) 329 { 330 struct rtnl_link __rcu **tab; 331 struct rtnl_link *link; 332 int msgindex; 333 334 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 335 msgindex = rtm_msgindex(msgtype); 336 337 rtnl_lock(); 338 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 339 if (!tab) { 340 rtnl_unlock(); 341 return -ENOENT; 342 } 343 344 link = rtnl_dereference(tab[msgindex]); 345 RCU_INIT_POINTER(tab[msgindex], NULL); 346 rtnl_unlock(); 347 348 kfree_rcu(link, rcu); 349 350 return 0; 351 } 352 EXPORT_SYMBOL_GPL(rtnl_unregister); 353 354 /** 355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 356 * @protocol : Protocol family or PF_UNSPEC 357 * 358 * Identical to calling rtnl_unregster() for all registered message types 359 * of a certain protocol family. 360 */ 361 void rtnl_unregister_all(int protocol) 362 { 363 struct rtnl_link __rcu **tab; 364 struct rtnl_link *link; 365 int msgindex; 366 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 368 369 rtnl_lock(); 370 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 371 if (!tab) { 372 rtnl_unlock(); 373 return; 374 } 375 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 376 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 377 link = rtnl_dereference(tab[msgindex]); 378 if (!link) 379 continue; 380 381 RCU_INIT_POINTER(tab[msgindex], NULL); 382 kfree_rcu(link, rcu); 383 } 384 rtnl_unlock(); 385 386 synchronize_net(); 387 388 kfree(tab); 389 } 390 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 391 392 int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n) 393 { 394 const struct rtnl_msg_handler *handler; 395 int i, err; 396 397 for (i = 0, handler = handlers; i < n; i++, handler++) { 398 err = rtnl_register_internal(handler->owner, handler->protocol, 399 handler->msgtype, handler->doit, 400 handler->dumpit, handler->flags); 401 if (err) { 402 __rtnl_unregister_many(handlers, i); 403 break; 404 } 405 } 406 407 return err; 408 } 409 EXPORT_SYMBOL_GPL(__rtnl_register_many); 410 411 void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n) 412 { 413 const struct rtnl_msg_handler *handler; 414 int i; 415 416 for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--) 417 rtnl_unregister(handler->protocol, handler->msgtype); 418 } 419 EXPORT_SYMBOL_GPL(__rtnl_unregister_many); 420 421 static LIST_HEAD(link_ops); 422 423 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 424 { 425 const struct rtnl_link_ops *ops; 426 427 list_for_each_entry(ops, &link_ops, list) { 428 if (!strcmp(ops->kind, kind)) 429 return ops; 430 } 431 return NULL; 432 } 433 434 /** 435 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 436 * @ops: struct rtnl_link_ops * to register 437 * 438 * The caller must hold the rtnl_mutex. This function should be used 439 * by drivers that create devices during module initialization. It 440 * must be called before registering the devices. 441 * 442 * Returns 0 on success or a negative error code. 443 */ 444 int __rtnl_link_register(struct rtnl_link_ops *ops) 445 { 446 if (rtnl_link_ops_get(ops->kind)) 447 return -EEXIST; 448 449 /* The check for alloc/setup is here because if ops 450 * does not have that filled up, it is not possible 451 * to use the ops for creating device. So do not 452 * fill up dellink as well. That disables rtnl_dellink. 453 */ 454 if ((ops->alloc || ops->setup) && !ops->dellink) 455 ops->dellink = unregister_netdevice_queue; 456 457 list_add_tail(&ops->list, &link_ops); 458 return 0; 459 } 460 EXPORT_SYMBOL_GPL(__rtnl_link_register); 461 462 /** 463 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 464 * @ops: struct rtnl_link_ops * to register 465 * 466 * Returns 0 on success or a negative error code. 467 */ 468 int rtnl_link_register(struct rtnl_link_ops *ops) 469 { 470 int err; 471 472 /* Sanity-check max sizes to avoid stack buffer overflow. */ 473 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 474 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 475 return -EINVAL; 476 477 rtnl_lock(); 478 err = __rtnl_link_register(ops); 479 rtnl_unlock(); 480 return err; 481 } 482 EXPORT_SYMBOL_GPL(rtnl_link_register); 483 484 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 485 { 486 struct net_device *dev; 487 LIST_HEAD(list_kill); 488 489 for_each_netdev(net, dev) { 490 if (dev->rtnl_link_ops == ops) 491 ops->dellink(dev, &list_kill); 492 } 493 unregister_netdevice_many(&list_kill); 494 } 495 496 /** 497 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 498 * @ops: struct rtnl_link_ops * to unregister 499 * 500 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 501 * integrity (hold pernet_ops_rwsem for writing to close the race 502 * with setup_net() and cleanup_net()). 503 */ 504 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 505 { 506 struct net *net; 507 508 for_each_net(net) { 509 __rtnl_kill_links(net, ops); 510 } 511 list_del(&ops->list); 512 } 513 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 514 515 /* Return with the rtnl_lock held when there are no network 516 * devices unregistering in any network namespace. 517 */ 518 static void rtnl_lock_unregistering_all(void) 519 { 520 struct net *net; 521 bool unregistering; 522 DEFINE_WAIT_FUNC(wait, woken_wake_function); 523 524 add_wait_queue(&netdev_unregistering_wq, &wait); 525 for (;;) { 526 unregistering = false; 527 rtnl_lock(); 528 /* We held write locked pernet_ops_rwsem, and parallel 529 * setup_net() and cleanup_net() are not possible. 530 */ 531 for_each_net(net) { 532 if (atomic_read(&net->dev_unreg_count) > 0) { 533 unregistering = true; 534 break; 535 } 536 } 537 if (!unregistering) 538 break; 539 __rtnl_unlock(); 540 541 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 542 } 543 remove_wait_queue(&netdev_unregistering_wq, &wait); 544 } 545 546 /** 547 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 548 * @ops: struct rtnl_link_ops * to unregister 549 */ 550 void rtnl_link_unregister(struct rtnl_link_ops *ops) 551 { 552 /* Close the race with setup_net() and cleanup_net() */ 553 down_write(&pernet_ops_rwsem); 554 rtnl_lock_unregistering_all(); 555 __rtnl_link_unregister(ops); 556 rtnl_unlock(); 557 up_write(&pernet_ops_rwsem); 558 } 559 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 560 561 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 562 { 563 struct net_device *master_dev; 564 const struct rtnl_link_ops *ops; 565 size_t size = 0; 566 567 rcu_read_lock(); 568 569 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 570 if (!master_dev) 571 goto out; 572 573 ops = master_dev->rtnl_link_ops; 574 if (!ops || !ops->get_slave_size) 575 goto out; 576 /* IFLA_INFO_SLAVE_DATA + nested data */ 577 size = nla_total_size(sizeof(struct nlattr)) + 578 ops->get_slave_size(master_dev, dev); 579 580 out: 581 rcu_read_unlock(); 582 return size; 583 } 584 585 static size_t rtnl_link_get_size(const struct net_device *dev) 586 { 587 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 588 size_t size; 589 590 if (!ops) 591 return 0; 592 593 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 594 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 595 596 if (ops->get_size) 597 /* IFLA_INFO_DATA + nested data */ 598 size += nla_total_size(sizeof(struct nlattr)) + 599 ops->get_size(dev); 600 601 if (ops->get_xstats_size) 602 /* IFLA_INFO_XSTATS */ 603 size += nla_total_size(ops->get_xstats_size(dev)); 604 605 size += rtnl_link_get_slave_info_data_size(dev); 606 607 return size; 608 } 609 610 static LIST_HEAD(rtnl_af_ops); 611 612 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 613 { 614 const struct rtnl_af_ops *ops; 615 616 ASSERT_RTNL(); 617 618 list_for_each_entry(ops, &rtnl_af_ops, list) { 619 if (ops->family == family) 620 return ops; 621 } 622 623 return NULL; 624 } 625 626 /** 627 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 628 * @ops: struct rtnl_af_ops * to register 629 * 630 * Returns 0 on success or a negative error code. 631 */ 632 void rtnl_af_register(struct rtnl_af_ops *ops) 633 { 634 rtnl_lock(); 635 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 636 rtnl_unlock(); 637 } 638 EXPORT_SYMBOL_GPL(rtnl_af_register); 639 640 /** 641 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 642 * @ops: struct rtnl_af_ops * to unregister 643 */ 644 void rtnl_af_unregister(struct rtnl_af_ops *ops) 645 { 646 rtnl_lock(); 647 list_del_rcu(&ops->list); 648 rtnl_unlock(); 649 650 synchronize_rcu(); 651 } 652 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 653 654 static size_t rtnl_link_get_af_size(const struct net_device *dev, 655 u32 ext_filter_mask) 656 { 657 struct rtnl_af_ops *af_ops; 658 size_t size; 659 660 /* IFLA_AF_SPEC */ 661 size = nla_total_size(sizeof(struct nlattr)); 662 663 rcu_read_lock(); 664 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 665 if (af_ops->get_link_af_size) { 666 /* AF_* + nested data */ 667 size += nla_total_size(sizeof(struct nlattr)) + 668 af_ops->get_link_af_size(dev, ext_filter_mask); 669 } 670 } 671 rcu_read_unlock(); 672 673 return size; 674 } 675 676 static bool rtnl_have_link_slave_info(const struct net_device *dev) 677 { 678 struct net_device *master_dev; 679 bool ret = false; 680 681 rcu_read_lock(); 682 683 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 684 if (master_dev && master_dev->rtnl_link_ops) 685 ret = true; 686 rcu_read_unlock(); 687 return ret; 688 } 689 690 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 691 const struct net_device *dev) 692 { 693 struct net_device *master_dev; 694 const struct rtnl_link_ops *ops; 695 struct nlattr *slave_data; 696 int err; 697 698 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 699 if (!master_dev) 700 return 0; 701 ops = master_dev->rtnl_link_ops; 702 if (!ops) 703 return 0; 704 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 705 return -EMSGSIZE; 706 if (ops->fill_slave_info) { 707 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 708 if (!slave_data) 709 return -EMSGSIZE; 710 err = ops->fill_slave_info(skb, master_dev, dev); 711 if (err < 0) 712 goto err_cancel_slave_data; 713 nla_nest_end(skb, slave_data); 714 } 715 return 0; 716 717 err_cancel_slave_data: 718 nla_nest_cancel(skb, slave_data); 719 return err; 720 } 721 722 static int rtnl_link_info_fill(struct sk_buff *skb, 723 const struct net_device *dev) 724 { 725 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 726 struct nlattr *data; 727 int err; 728 729 if (!ops) 730 return 0; 731 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 732 return -EMSGSIZE; 733 if (ops->fill_xstats) { 734 err = ops->fill_xstats(skb, dev); 735 if (err < 0) 736 return err; 737 } 738 if (ops->fill_info) { 739 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 740 if (data == NULL) 741 return -EMSGSIZE; 742 err = ops->fill_info(skb, dev); 743 if (err < 0) 744 goto err_cancel_data; 745 nla_nest_end(skb, data); 746 } 747 return 0; 748 749 err_cancel_data: 750 nla_nest_cancel(skb, data); 751 return err; 752 } 753 754 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 755 { 756 struct nlattr *linkinfo; 757 int err = -EMSGSIZE; 758 759 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 760 if (linkinfo == NULL) 761 goto out; 762 763 err = rtnl_link_info_fill(skb, dev); 764 if (err < 0) 765 goto err_cancel_link; 766 767 err = rtnl_link_slave_info_fill(skb, dev); 768 if (err < 0) 769 goto err_cancel_link; 770 771 nla_nest_end(skb, linkinfo); 772 return 0; 773 774 err_cancel_link: 775 nla_nest_cancel(skb, linkinfo); 776 out: 777 return err; 778 } 779 780 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 781 { 782 struct sock *rtnl = net->rtnl; 783 784 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 785 } 786 787 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 788 { 789 struct sock *rtnl = net->rtnl; 790 791 return nlmsg_unicast(rtnl, skb, pid); 792 } 793 EXPORT_SYMBOL(rtnl_unicast); 794 795 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 796 const struct nlmsghdr *nlh, gfp_t flags) 797 { 798 struct sock *rtnl = net->rtnl; 799 800 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 801 } 802 EXPORT_SYMBOL(rtnl_notify); 803 804 void rtnl_set_sk_err(struct net *net, u32 group, int error) 805 { 806 struct sock *rtnl = net->rtnl; 807 808 netlink_set_err(rtnl, 0, group, error); 809 } 810 EXPORT_SYMBOL(rtnl_set_sk_err); 811 812 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 813 { 814 struct nlattr *mx; 815 int i, valid = 0; 816 817 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 818 if (metrics == dst_default_metrics.metrics) 819 return 0; 820 821 mx = nla_nest_start_noflag(skb, RTA_METRICS); 822 if (mx == NULL) 823 return -ENOBUFS; 824 825 for (i = 0; i < RTAX_MAX; i++) { 826 if (metrics[i]) { 827 if (i == RTAX_CC_ALGO - 1) { 828 char tmp[TCP_CA_NAME_MAX], *name; 829 830 name = tcp_ca_get_name_by_key(metrics[i], tmp); 831 if (!name) 832 continue; 833 if (nla_put_string(skb, i + 1, name)) 834 goto nla_put_failure; 835 } else if (i == RTAX_FEATURES - 1) { 836 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 837 838 if (!user_features) 839 continue; 840 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 841 if (nla_put_u32(skb, i + 1, user_features)) 842 goto nla_put_failure; 843 } else { 844 if (nla_put_u32(skb, i + 1, metrics[i])) 845 goto nla_put_failure; 846 } 847 valid++; 848 } 849 } 850 851 if (!valid) { 852 nla_nest_cancel(skb, mx); 853 return 0; 854 } 855 856 return nla_nest_end(skb, mx); 857 858 nla_put_failure: 859 nla_nest_cancel(skb, mx); 860 return -EMSGSIZE; 861 } 862 EXPORT_SYMBOL(rtnetlink_put_metrics); 863 864 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 865 long expires, u32 error) 866 { 867 struct rta_cacheinfo ci = { 868 .rta_error = error, 869 .rta_id = id, 870 }; 871 872 if (dst) { 873 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 874 ci.rta_used = dst->__use; 875 ci.rta_clntref = rcuref_read(&dst->__rcuref); 876 } 877 if (expires) { 878 unsigned long clock; 879 880 clock = jiffies_to_clock_t(abs(expires)); 881 clock = min_t(unsigned long, clock, INT_MAX); 882 ci.rta_expires = (expires > 0) ? clock : -clock; 883 } 884 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 885 } 886 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 887 888 static void set_operstate(struct net_device *dev, unsigned char transition) 889 { 890 unsigned char operstate = dev->operstate; 891 892 switch (transition) { 893 case IF_OPER_UP: 894 if ((operstate == IF_OPER_DORMANT || 895 operstate == IF_OPER_TESTING || 896 operstate == IF_OPER_UNKNOWN) && 897 !netif_dormant(dev) && !netif_testing(dev)) 898 operstate = IF_OPER_UP; 899 break; 900 901 case IF_OPER_TESTING: 902 if (netif_oper_up(dev)) 903 operstate = IF_OPER_TESTING; 904 break; 905 906 case IF_OPER_DORMANT: 907 if (netif_oper_up(dev)) 908 operstate = IF_OPER_DORMANT; 909 break; 910 } 911 912 if (READ_ONCE(dev->operstate) != operstate) { 913 write_lock(&dev_base_lock); 914 WRITE_ONCE(dev->operstate, operstate); 915 write_unlock(&dev_base_lock); 916 netdev_state_change(dev); 917 } 918 } 919 920 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 921 { 922 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 923 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 924 } 925 926 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 927 const struct ifinfomsg *ifm) 928 { 929 unsigned int flags = ifm->ifi_flags; 930 931 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 932 if (ifm->ifi_change) 933 flags = (flags & ifm->ifi_change) | 934 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 935 936 return flags; 937 } 938 939 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 940 const struct rtnl_link_stats64 *b) 941 { 942 a->rx_packets = b->rx_packets; 943 a->tx_packets = b->tx_packets; 944 a->rx_bytes = b->rx_bytes; 945 a->tx_bytes = b->tx_bytes; 946 a->rx_errors = b->rx_errors; 947 a->tx_errors = b->tx_errors; 948 a->rx_dropped = b->rx_dropped; 949 a->tx_dropped = b->tx_dropped; 950 951 a->multicast = b->multicast; 952 a->collisions = b->collisions; 953 954 a->rx_length_errors = b->rx_length_errors; 955 a->rx_over_errors = b->rx_over_errors; 956 a->rx_crc_errors = b->rx_crc_errors; 957 a->rx_frame_errors = b->rx_frame_errors; 958 a->rx_fifo_errors = b->rx_fifo_errors; 959 a->rx_missed_errors = b->rx_missed_errors; 960 961 a->tx_aborted_errors = b->tx_aborted_errors; 962 a->tx_carrier_errors = b->tx_carrier_errors; 963 a->tx_fifo_errors = b->tx_fifo_errors; 964 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 965 a->tx_window_errors = b->tx_window_errors; 966 967 a->rx_compressed = b->rx_compressed; 968 a->tx_compressed = b->tx_compressed; 969 970 a->rx_nohandler = b->rx_nohandler; 971 } 972 973 /* All VF info */ 974 static inline int rtnl_vfinfo_size(const struct net_device *dev, 975 u32 ext_filter_mask) 976 { 977 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 978 int num_vfs = dev_num_vf(dev->dev.parent); 979 size_t size = nla_total_size(0); 980 size += num_vfs * 981 (nla_total_size(0) + 982 nla_total_size(sizeof(struct ifla_vf_mac)) + 983 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 984 nla_total_size(sizeof(struct ifla_vf_vlan)) + 985 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 986 nla_total_size(MAX_VLAN_LIST_LEN * 987 sizeof(struct ifla_vf_vlan_info)) + 988 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 989 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 990 nla_total_size(sizeof(struct ifla_vf_rate)) + 991 nla_total_size(sizeof(struct ifla_vf_link_state)) + 992 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 993 nla_total_size(sizeof(struct ifla_vf_trust))); 994 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 995 size += num_vfs * 996 (nla_total_size(0) + /* nest IFLA_VF_STATS */ 997 /* IFLA_VF_STATS_RX_PACKETS */ 998 nla_total_size_64bit(sizeof(__u64)) + 999 /* IFLA_VF_STATS_TX_PACKETS */ 1000 nla_total_size_64bit(sizeof(__u64)) + 1001 /* IFLA_VF_STATS_RX_BYTES */ 1002 nla_total_size_64bit(sizeof(__u64)) + 1003 /* IFLA_VF_STATS_TX_BYTES */ 1004 nla_total_size_64bit(sizeof(__u64)) + 1005 /* IFLA_VF_STATS_BROADCAST */ 1006 nla_total_size_64bit(sizeof(__u64)) + 1007 /* IFLA_VF_STATS_MULTICAST */ 1008 nla_total_size_64bit(sizeof(__u64)) + 1009 /* IFLA_VF_STATS_RX_DROPPED */ 1010 nla_total_size_64bit(sizeof(__u64)) + 1011 /* IFLA_VF_STATS_TX_DROPPED */ 1012 nla_total_size_64bit(sizeof(__u64))); 1013 } 1014 return size; 1015 } else 1016 return 0; 1017 } 1018 1019 static size_t rtnl_port_size(const struct net_device *dev, 1020 u32 ext_filter_mask) 1021 { 1022 size_t port_size = nla_total_size(4) /* PORT_VF */ 1023 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 1024 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 1025 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 1026 + nla_total_size(1) /* PROT_VDP_REQUEST */ 1027 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 1028 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 1029 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 1030 + port_size; 1031 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 1032 + port_size; 1033 1034 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1035 !(ext_filter_mask & RTEXT_FILTER_VF)) 1036 return 0; 1037 if (dev_num_vf(dev->dev.parent)) 1038 return port_self_size + vf_ports_size + 1039 vf_port_size * dev_num_vf(dev->dev.parent); 1040 else 1041 return port_self_size; 1042 } 1043 1044 static size_t rtnl_xdp_size(void) 1045 { 1046 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1047 nla_total_size(1) + /* XDP_ATTACHED */ 1048 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1049 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1050 1051 return xdp_size; 1052 } 1053 1054 static size_t rtnl_prop_list_size(const struct net_device *dev) 1055 { 1056 struct netdev_name_node *name_node; 1057 size_t size; 1058 1059 if (list_empty(&dev->name_node->list)) 1060 return 0; 1061 size = nla_total_size(0); 1062 list_for_each_entry(name_node, &dev->name_node->list, list) 1063 size += nla_total_size(ALTIFNAMSIZ); 1064 return size; 1065 } 1066 1067 static size_t rtnl_proto_down_size(const struct net_device *dev) 1068 { 1069 size_t size = nla_total_size(1); 1070 1071 if (dev->proto_down_reason) 1072 size += nla_total_size(0) + nla_total_size(4); 1073 1074 return size; 1075 } 1076 1077 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1078 { 1079 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1080 1081 if (dev->devlink_port) 1082 size += devlink_nl_port_handle_size(dev->devlink_port); 1083 1084 return size; 1085 } 1086 1087 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1088 u32 ext_filter_mask) 1089 { 1090 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1091 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1092 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1093 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1094 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1095 + nla_total_size(sizeof(struct rtnl_link_stats)) 1096 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1097 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1098 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1099 + nla_total_size(4) /* IFLA_TXQLEN */ 1100 + nla_total_size(4) /* IFLA_WEIGHT */ 1101 + nla_total_size(4) /* IFLA_MTU */ 1102 + nla_total_size(4) /* IFLA_LINK */ 1103 + nla_total_size(4) /* IFLA_MASTER */ 1104 + nla_total_size(1) /* IFLA_CARRIER */ 1105 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1106 + nla_total_size(4) /* IFLA_ALLMULTI */ 1107 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1108 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1109 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1110 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1111 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1112 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */ 1113 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */ 1114 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1115 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1116 + nla_total_size(1) /* IFLA_OPERSTATE */ 1117 + nla_total_size(1) /* IFLA_LINKMODE */ 1118 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1119 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1120 + nla_total_size(4) /* IFLA_GROUP */ 1121 + nla_total_size(ext_filter_mask 1122 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1123 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1124 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1125 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1126 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1127 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1128 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1129 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1130 + rtnl_xdp_size() /* IFLA_XDP */ 1131 + nla_total_size(4) /* IFLA_EVENT */ 1132 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1133 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1134 + rtnl_proto_down_size(dev) /* proto down */ 1135 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1136 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1137 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1138 + nla_total_size(4) /* IFLA_MIN_MTU */ 1139 + nla_total_size(4) /* IFLA_MAX_MTU */ 1140 + rtnl_prop_list_size(dev) 1141 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1142 + rtnl_devlink_port_size(dev) 1143 + 0; 1144 } 1145 1146 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1147 { 1148 struct nlattr *vf_ports; 1149 struct nlattr *vf_port; 1150 int vf; 1151 int err; 1152 1153 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1154 if (!vf_ports) 1155 return -EMSGSIZE; 1156 1157 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1158 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1159 if (!vf_port) 1160 goto nla_put_failure; 1161 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1162 goto nla_put_failure; 1163 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1164 if (err == -EMSGSIZE) 1165 goto nla_put_failure; 1166 if (err) { 1167 nla_nest_cancel(skb, vf_port); 1168 continue; 1169 } 1170 nla_nest_end(skb, vf_port); 1171 } 1172 1173 nla_nest_end(skb, vf_ports); 1174 1175 return 0; 1176 1177 nla_put_failure: 1178 nla_nest_cancel(skb, vf_ports); 1179 return -EMSGSIZE; 1180 } 1181 1182 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1183 { 1184 struct nlattr *port_self; 1185 int err; 1186 1187 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1188 if (!port_self) 1189 return -EMSGSIZE; 1190 1191 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1192 if (err) { 1193 nla_nest_cancel(skb, port_self); 1194 return (err == -EMSGSIZE) ? err : 0; 1195 } 1196 1197 nla_nest_end(skb, port_self); 1198 1199 return 0; 1200 } 1201 1202 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1203 u32 ext_filter_mask) 1204 { 1205 int err; 1206 1207 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1208 !(ext_filter_mask & RTEXT_FILTER_VF)) 1209 return 0; 1210 1211 err = rtnl_port_self_fill(skb, dev); 1212 if (err) 1213 return err; 1214 1215 if (dev_num_vf(dev->dev.parent)) { 1216 err = rtnl_vf_ports_fill(skb, dev); 1217 if (err) 1218 return err; 1219 } 1220 1221 return 0; 1222 } 1223 1224 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1225 { 1226 int err; 1227 struct netdev_phys_item_id ppid; 1228 1229 err = dev_get_phys_port_id(dev, &ppid); 1230 if (err) { 1231 if (err == -EOPNOTSUPP) 1232 return 0; 1233 return err; 1234 } 1235 1236 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1237 return -EMSGSIZE; 1238 1239 return 0; 1240 } 1241 1242 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1243 { 1244 char name[IFNAMSIZ]; 1245 int err; 1246 1247 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1248 if (err) { 1249 if (err == -EOPNOTSUPP) 1250 return 0; 1251 return err; 1252 } 1253 1254 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1255 return -EMSGSIZE; 1256 1257 return 0; 1258 } 1259 1260 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1261 { 1262 struct netdev_phys_item_id ppid = { }; 1263 int err; 1264 1265 err = dev_get_port_parent_id(dev, &ppid, false); 1266 if (err) { 1267 if (err == -EOPNOTSUPP) 1268 return 0; 1269 return err; 1270 } 1271 1272 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1273 return -EMSGSIZE; 1274 1275 return 0; 1276 } 1277 1278 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1279 struct net_device *dev) 1280 { 1281 struct rtnl_link_stats64 *sp; 1282 struct nlattr *attr; 1283 1284 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1285 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1286 if (!attr) 1287 return -EMSGSIZE; 1288 1289 sp = nla_data(attr); 1290 dev_get_stats(dev, sp); 1291 1292 attr = nla_reserve(skb, IFLA_STATS, 1293 sizeof(struct rtnl_link_stats)); 1294 if (!attr) 1295 return -EMSGSIZE; 1296 1297 copy_rtnl_link_stats(nla_data(attr), sp); 1298 1299 return 0; 1300 } 1301 1302 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1303 struct net_device *dev, 1304 int vfs_num, 1305 u32 ext_filter_mask) 1306 { 1307 struct ifla_vf_rss_query_en vf_rss_query_en; 1308 struct nlattr *vf, *vfstats, *vfvlanlist; 1309 struct ifla_vf_link_state vf_linkstate; 1310 struct ifla_vf_vlan_info vf_vlan_info; 1311 struct ifla_vf_spoofchk vf_spoofchk; 1312 struct ifla_vf_tx_rate vf_tx_rate; 1313 struct ifla_vf_stats vf_stats; 1314 struct ifla_vf_trust vf_trust; 1315 struct ifla_vf_vlan vf_vlan; 1316 struct ifla_vf_rate vf_rate; 1317 struct ifla_vf_mac vf_mac; 1318 struct ifla_vf_broadcast vf_broadcast; 1319 struct ifla_vf_info ivi; 1320 struct ifla_vf_guid node_guid; 1321 struct ifla_vf_guid port_guid; 1322 1323 memset(&ivi, 0, sizeof(ivi)); 1324 1325 /* Not all SR-IOV capable drivers support the 1326 * spoofcheck and "RSS query enable" query. Preset to 1327 * -1 so the user space tool can detect that the driver 1328 * didn't report anything. 1329 */ 1330 ivi.spoofchk = -1; 1331 ivi.rss_query_en = -1; 1332 ivi.trusted = -1; 1333 /* The default value for VF link state is "auto" 1334 * IFLA_VF_LINK_STATE_AUTO which equals zero 1335 */ 1336 ivi.linkstate = 0; 1337 /* VLAN Protocol by default is 802.1Q */ 1338 ivi.vlan_proto = htons(ETH_P_8021Q); 1339 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1340 return 0; 1341 1342 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1343 memset(&node_guid, 0, sizeof(node_guid)); 1344 memset(&port_guid, 0, sizeof(port_guid)); 1345 1346 vf_mac.vf = 1347 vf_vlan.vf = 1348 vf_vlan_info.vf = 1349 vf_rate.vf = 1350 vf_tx_rate.vf = 1351 vf_spoofchk.vf = 1352 vf_linkstate.vf = 1353 vf_rss_query_en.vf = 1354 vf_trust.vf = 1355 node_guid.vf = 1356 port_guid.vf = ivi.vf; 1357 1358 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1359 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1360 vf_vlan.vlan = ivi.vlan; 1361 vf_vlan.qos = ivi.qos; 1362 vf_vlan_info.vlan = ivi.vlan; 1363 vf_vlan_info.qos = ivi.qos; 1364 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1365 vf_tx_rate.rate = ivi.max_tx_rate; 1366 vf_rate.min_tx_rate = ivi.min_tx_rate; 1367 vf_rate.max_tx_rate = ivi.max_tx_rate; 1368 vf_spoofchk.setting = ivi.spoofchk; 1369 vf_linkstate.link_state = ivi.linkstate; 1370 vf_rss_query_en.setting = ivi.rss_query_en; 1371 vf_trust.setting = ivi.trusted; 1372 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1373 if (!vf) 1374 return -EMSGSIZE; 1375 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1376 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1377 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1378 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1379 &vf_rate) || 1380 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1381 &vf_tx_rate) || 1382 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1383 &vf_spoofchk) || 1384 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1385 &vf_linkstate) || 1386 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1387 sizeof(vf_rss_query_en), 1388 &vf_rss_query_en) || 1389 nla_put(skb, IFLA_VF_TRUST, 1390 sizeof(vf_trust), &vf_trust)) 1391 goto nla_put_vf_failure; 1392 1393 if (dev->netdev_ops->ndo_get_vf_guid && 1394 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1395 &port_guid)) { 1396 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1397 &node_guid) || 1398 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1399 &port_guid)) 1400 goto nla_put_vf_failure; 1401 } 1402 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1403 if (!vfvlanlist) 1404 goto nla_put_vf_failure; 1405 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1406 &vf_vlan_info)) { 1407 nla_nest_cancel(skb, vfvlanlist); 1408 goto nla_put_vf_failure; 1409 } 1410 nla_nest_end(skb, vfvlanlist); 1411 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 1412 memset(&vf_stats, 0, sizeof(vf_stats)); 1413 if (dev->netdev_ops->ndo_get_vf_stats) 1414 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1415 &vf_stats); 1416 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1417 if (!vfstats) 1418 goto nla_put_vf_failure; 1419 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1420 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1421 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1422 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1423 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1424 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1425 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1426 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1427 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1428 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1429 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1430 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1431 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1432 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1433 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1434 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1435 nla_nest_cancel(skb, vfstats); 1436 goto nla_put_vf_failure; 1437 } 1438 nla_nest_end(skb, vfstats); 1439 } 1440 nla_nest_end(skb, vf); 1441 return 0; 1442 1443 nla_put_vf_failure: 1444 nla_nest_cancel(skb, vf); 1445 return -EMSGSIZE; 1446 } 1447 1448 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1449 struct net_device *dev, 1450 u32 ext_filter_mask) 1451 { 1452 struct nlattr *vfinfo; 1453 int i, num_vfs; 1454 1455 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1456 return 0; 1457 1458 num_vfs = dev_num_vf(dev->dev.parent); 1459 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1460 return -EMSGSIZE; 1461 1462 if (!dev->netdev_ops->ndo_get_vf_config) 1463 return 0; 1464 1465 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1466 if (!vfinfo) 1467 return -EMSGSIZE; 1468 1469 for (i = 0; i < num_vfs; i++) { 1470 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) { 1471 nla_nest_cancel(skb, vfinfo); 1472 return -EMSGSIZE; 1473 } 1474 } 1475 1476 nla_nest_end(skb, vfinfo); 1477 return 0; 1478 } 1479 1480 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1481 { 1482 struct rtnl_link_ifmap map; 1483 1484 memset(&map, 0, sizeof(map)); 1485 map.mem_start = dev->mem_start; 1486 map.mem_end = dev->mem_end; 1487 map.base_addr = dev->base_addr; 1488 map.irq = dev->irq; 1489 map.dma = dev->dma; 1490 map.port = dev->if_port; 1491 1492 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1493 return -EMSGSIZE; 1494 1495 return 0; 1496 } 1497 1498 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1499 { 1500 const struct bpf_prog *generic_xdp_prog; 1501 1502 ASSERT_RTNL(); 1503 1504 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1505 if (!generic_xdp_prog) 1506 return 0; 1507 return generic_xdp_prog->aux->id; 1508 } 1509 1510 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1511 { 1512 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1513 } 1514 1515 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1516 { 1517 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1518 } 1519 1520 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1521 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1522 u32 (*get_prog_id)(struct net_device *dev)) 1523 { 1524 u32 curr_id; 1525 int err; 1526 1527 curr_id = get_prog_id(dev); 1528 if (!curr_id) 1529 return 0; 1530 1531 *prog_id = curr_id; 1532 err = nla_put_u32(skb, attr, curr_id); 1533 if (err) 1534 return err; 1535 1536 if (*mode != XDP_ATTACHED_NONE) 1537 *mode = XDP_ATTACHED_MULTI; 1538 else 1539 *mode = tgt_mode; 1540 1541 return 0; 1542 } 1543 1544 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1545 { 1546 struct nlattr *xdp; 1547 u32 prog_id; 1548 int err; 1549 u8 mode; 1550 1551 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1552 if (!xdp) 1553 return -EMSGSIZE; 1554 1555 prog_id = 0; 1556 mode = XDP_ATTACHED_NONE; 1557 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1558 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1559 if (err) 1560 goto err_cancel; 1561 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1562 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1563 if (err) 1564 goto err_cancel; 1565 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1566 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1567 if (err) 1568 goto err_cancel; 1569 1570 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1571 if (err) 1572 goto err_cancel; 1573 1574 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1575 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1576 if (err) 1577 goto err_cancel; 1578 } 1579 1580 nla_nest_end(skb, xdp); 1581 return 0; 1582 1583 err_cancel: 1584 nla_nest_cancel(skb, xdp); 1585 return err; 1586 } 1587 1588 static u32 rtnl_get_event(unsigned long event) 1589 { 1590 u32 rtnl_event_type = IFLA_EVENT_NONE; 1591 1592 switch (event) { 1593 case NETDEV_REBOOT: 1594 rtnl_event_type = IFLA_EVENT_REBOOT; 1595 break; 1596 case NETDEV_FEAT_CHANGE: 1597 rtnl_event_type = IFLA_EVENT_FEATURES; 1598 break; 1599 case NETDEV_BONDING_FAILOVER: 1600 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1601 break; 1602 case NETDEV_NOTIFY_PEERS: 1603 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1604 break; 1605 case NETDEV_RESEND_IGMP: 1606 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1607 break; 1608 case NETDEV_CHANGEINFODATA: 1609 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1610 break; 1611 default: 1612 break; 1613 } 1614 1615 return rtnl_event_type; 1616 } 1617 1618 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1619 { 1620 const struct net_device *upper_dev; 1621 int ret = 0; 1622 1623 rcu_read_lock(); 1624 1625 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1626 if (upper_dev) 1627 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1628 1629 rcu_read_unlock(); 1630 return ret; 1631 } 1632 1633 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1634 bool force) 1635 { 1636 int ifindex = dev_get_iflink(dev); 1637 1638 if (force || dev->ifindex != ifindex) 1639 return nla_put_u32(skb, IFLA_LINK, ifindex); 1640 1641 return 0; 1642 } 1643 1644 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1645 struct net_device *dev) 1646 { 1647 char buf[IFALIASZ]; 1648 int ret; 1649 1650 ret = dev_get_alias(dev, buf, sizeof(buf)); 1651 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1652 } 1653 1654 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1655 const struct net_device *dev, 1656 struct net *src_net, gfp_t gfp) 1657 { 1658 bool put_iflink = false; 1659 1660 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1661 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1662 1663 if (!net_eq(dev_net(dev), link_net)) { 1664 int id = peernet2id_alloc(src_net, link_net, gfp); 1665 1666 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1667 return -EMSGSIZE; 1668 1669 put_iflink = true; 1670 } 1671 } 1672 1673 return nla_put_iflink(skb, dev, put_iflink); 1674 } 1675 1676 static int rtnl_fill_link_af(struct sk_buff *skb, 1677 const struct net_device *dev, 1678 u32 ext_filter_mask) 1679 { 1680 const struct rtnl_af_ops *af_ops; 1681 struct nlattr *af_spec; 1682 1683 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1684 if (!af_spec) 1685 return -EMSGSIZE; 1686 1687 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1688 struct nlattr *af; 1689 int err; 1690 1691 if (!af_ops->fill_link_af) 1692 continue; 1693 1694 af = nla_nest_start_noflag(skb, af_ops->family); 1695 if (!af) 1696 return -EMSGSIZE; 1697 1698 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1699 /* 1700 * Caller may return ENODATA to indicate that there 1701 * was no data to be dumped. This is not an error, it 1702 * means we should trim the attribute header and 1703 * continue. 1704 */ 1705 if (err == -ENODATA) 1706 nla_nest_cancel(skb, af); 1707 else if (err < 0) 1708 return -EMSGSIZE; 1709 1710 nla_nest_end(skb, af); 1711 } 1712 1713 nla_nest_end(skb, af_spec); 1714 return 0; 1715 } 1716 1717 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1718 const struct net_device *dev) 1719 { 1720 struct netdev_name_node *name_node; 1721 int count = 0; 1722 1723 list_for_each_entry(name_node, &dev->name_node->list, list) { 1724 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1725 return -EMSGSIZE; 1726 count++; 1727 } 1728 return count; 1729 } 1730 1731 static int rtnl_fill_prop_list(struct sk_buff *skb, 1732 const struct net_device *dev) 1733 { 1734 struct nlattr *prop_list; 1735 int ret; 1736 1737 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1738 if (!prop_list) 1739 return -EMSGSIZE; 1740 1741 ret = rtnl_fill_alt_ifnames(skb, dev); 1742 if (ret <= 0) 1743 goto nest_cancel; 1744 1745 nla_nest_end(skb, prop_list); 1746 return 0; 1747 1748 nest_cancel: 1749 nla_nest_cancel(skb, prop_list); 1750 return ret; 1751 } 1752 1753 static int rtnl_fill_proto_down(struct sk_buff *skb, 1754 const struct net_device *dev) 1755 { 1756 struct nlattr *pr; 1757 u32 preason; 1758 1759 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1760 goto nla_put_failure; 1761 1762 preason = dev->proto_down_reason; 1763 if (!preason) 1764 return 0; 1765 1766 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1767 if (!pr) 1768 return -EMSGSIZE; 1769 1770 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1771 nla_nest_cancel(skb, pr); 1772 goto nla_put_failure; 1773 } 1774 1775 nla_nest_end(skb, pr); 1776 return 0; 1777 1778 nla_put_failure: 1779 return -EMSGSIZE; 1780 } 1781 1782 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1783 const struct net_device *dev) 1784 { 1785 struct nlattr *devlink_port_nest; 1786 int ret; 1787 1788 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1789 if (!devlink_port_nest) 1790 return -EMSGSIZE; 1791 1792 if (dev->devlink_port) { 1793 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1794 if (ret < 0) 1795 goto nest_cancel; 1796 } 1797 1798 nla_nest_end(skb, devlink_port_nest); 1799 return 0; 1800 1801 nest_cancel: 1802 nla_nest_cancel(skb, devlink_port_nest); 1803 return ret; 1804 } 1805 1806 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1807 struct net_device *dev, struct net *src_net, 1808 int type, u32 pid, u32 seq, u32 change, 1809 unsigned int flags, u32 ext_filter_mask, 1810 u32 event, int *new_nsid, int new_ifindex, 1811 int tgt_netnsid, gfp_t gfp) 1812 { 1813 struct ifinfomsg *ifm; 1814 struct nlmsghdr *nlh; 1815 struct Qdisc *qdisc; 1816 1817 ASSERT_RTNL(); 1818 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1819 if (nlh == NULL) 1820 return -EMSGSIZE; 1821 1822 ifm = nlmsg_data(nlh); 1823 ifm->ifi_family = AF_UNSPEC; 1824 ifm->__ifi_pad = 0; 1825 ifm->ifi_type = dev->type; 1826 ifm->ifi_index = dev->ifindex; 1827 ifm->ifi_flags = dev_get_flags(dev); 1828 ifm->ifi_change = change; 1829 1830 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1831 goto nla_put_failure; 1832 1833 qdisc = rtnl_dereference(dev->qdisc); 1834 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1835 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1836 nla_put_u8(skb, IFLA_OPERSTATE, 1837 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1838 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1839 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1840 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1841 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1842 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1843 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1844 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) || 1845 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1846 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1847 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1848 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || 1849 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) || 1850 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) || 1851 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) || 1852 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) || 1853 #ifdef CONFIG_RPS 1854 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1855 #endif 1856 put_master_ifindex(skb, dev) || 1857 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1858 (qdisc && 1859 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || 1860 nla_put_ifalias(skb, dev) || 1861 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1862 atomic_read(&dev->carrier_up_count) + 1863 atomic_read(&dev->carrier_down_count)) || 1864 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1865 atomic_read(&dev->carrier_up_count)) || 1866 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1867 atomic_read(&dev->carrier_down_count))) 1868 goto nla_put_failure; 1869 1870 if (rtnl_fill_proto_down(skb, dev)) 1871 goto nla_put_failure; 1872 1873 if (event != IFLA_EVENT_NONE) { 1874 if (nla_put_u32(skb, IFLA_EVENT, event)) 1875 goto nla_put_failure; 1876 } 1877 1878 if (rtnl_fill_link_ifmap(skb, dev)) 1879 goto nla_put_failure; 1880 1881 if (dev->addr_len) { 1882 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1883 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1884 goto nla_put_failure; 1885 } 1886 1887 if (rtnl_phys_port_id_fill(skb, dev)) 1888 goto nla_put_failure; 1889 1890 if (rtnl_phys_port_name_fill(skb, dev)) 1891 goto nla_put_failure; 1892 1893 if (rtnl_phys_switch_id_fill(skb, dev)) 1894 goto nla_put_failure; 1895 1896 if (rtnl_fill_stats(skb, dev)) 1897 goto nla_put_failure; 1898 1899 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1900 goto nla_put_failure; 1901 1902 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1903 goto nla_put_failure; 1904 1905 if (rtnl_xdp_fill(skb, dev)) 1906 goto nla_put_failure; 1907 1908 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1909 if (rtnl_link_fill(skb, dev) < 0) 1910 goto nla_put_failure; 1911 } 1912 1913 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1914 goto nla_put_failure; 1915 1916 if (new_nsid && 1917 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1918 goto nla_put_failure; 1919 if (new_ifindex && 1920 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1921 goto nla_put_failure; 1922 1923 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1924 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1925 goto nla_put_failure; 1926 1927 rcu_read_lock(); 1928 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1929 goto nla_put_failure_rcu; 1930 rcu_read_unlock(); 1931 1932 if (rtnl_fill_prop_list(skb, dev)) 1933 goto nla_put_failure; 1934 1935 if (dev->dev.parent && 1936 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1937 dev_name(dev->dev.parent))) 1938 goto nla_put_failure; 1939 1940 if (dev->dev.parent && dev->dev.parent->bus && 1941 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1942 dev->dev.parent->bus->name)) 1943 goto nla_put_failure; 1944 1945 if (rtnl_fill_devlink_port(skb, dev)) 1946 goto nla_put_failure; 1947 1948 nlmsg_end(skb, nlh); 1949 return 0; 1950 1951 nla_put_failure_rcu: 1952 rcu_read_unlock(); 1953 nla_put_failure: 1954 nlmsg_cancel(skb, nlh); 1955 return -EMSGSIZE; 1956 } 1957 1958 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1959 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1960 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1961 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1962 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1963 [IFLA_MTU] = { .type = NLA_U32 }, 1964 [IFLA_LINK] = { .type = NLA_U32 }, 1965 [IFLA_MASTER] = { .type = NLA_U32 }, 1966 [IFLA_CARRIER] = { .type = NLA_U8 }, 1967 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1968 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1969 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1970 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1971 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1972 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1973 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1974 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1975 * allow 0-length string (needed to remove an alias). 1976 */ 1977 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1978 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1979 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1980 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1981 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1982 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1983 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1984 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1985 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1986 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1987 [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), 1988 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1989 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1990 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1991 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1992 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1993 [IFLA_XDP] = { .type = NLA_NESTED }, 1994 [IFLA_EVENT] = { .type = NLA_U32 }, 1995 [IFLA_GROUP] = { .type = NLA_U32 }, 1996 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1997 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1998 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1999 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 2000 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 2001 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 2002 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 2003 .len = ALTIFNAMSIZ - 1 }, 2004 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 2005 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 2006 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 2007 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 2008 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 2009 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 2010 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 2011 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 2012 [IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), 2013 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2014 }; 2015 2016 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 2017 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 2018 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 2019 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 2020 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 2021 }; 2022 2023 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 2024 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 2025 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 2026 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 2027 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 2028 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 2029 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 2030 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 2031 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 2032 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 2033 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 2034 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 2035 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2036 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2037 }; 2038 2039 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 2040 [IFLA_PORT_VF] = { .type = NLA_U32 }, 2041 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 2042 .len = PORT_PROFILE_MAX }, 2043 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2044 .len = PORT_UUID_MAX }, 2045 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2046 .len = PORT_UUID_MAX }, 2047 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2048 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2049 2050 /* Unused, but we need to keep it here since user space could 2051 * fill it. It's also broken with regard to NLA_BINARY use in 2052 * combination with structs. 2053 */ 2054 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2055 .len = sizeof(struct ifla_port_vsi) }, 2056 }; 2057 2058 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2059 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2060 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2061 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2062 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2063 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2064 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2065 }; 2066 2067 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2068 { 2069 const struct rtnl_link_ops *ops = NULL; 2070 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2071 2072 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2073 return NULL; 2074 2075 if (linfo[IFLA_INFO_KIND]) { 2076 char kind[MODULE_NAME_LEN]; 2077 2078 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2079 ops = rtnl_link_ops_get(kind); 2080 } 2081 2082 return ops; 2083 } 2084 2085 static bool link_master_filtered(struct net_device *dev, int master_idx) 2086 { 2087 struct net_device *master; 2088 2089 if (!master_idx) 2090 return false; 2091 2092 master = netdev_master_upper_dev_get(dev); 2093 2094 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2095 * another invalid value for ifindex to denote "no master". 2096 */ 2097 if (master_idx == -1) 2098 return !!master; 2099 2100 if (!master || master->ifindex != master_idx) 2101 return true; 2102 2103 return false; 2104 } 2105 2106 static bool link_kind_filtered(const struct net_device *dev, 2107 const struct rtnl_link_ops *kind_ops) 2108 { 2109 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2110 return true; 2111 2112 return false; 2113 } 2114 2115 static bool link_dump_filtered(struct net_device *dev, 2116 int master_idx, 2117 const struct rtnl_link_ops *kind_ops) 2118 { 2119 if (link_master_filtered(dev, master_idx) || 2120 link_kind_filtered(dev, kind_ops)) 2121 return true; 2122 2123 return false; 2124 } 2125 2126 /** 2127 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2128 * @sk: netlink socket 2129 * @netnsid: network namespace identifier 2130 * 2131 * Returns the network namespace identified by netnsid on success or an error 2132 * pointer on failure. 2133 */ 2134 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2135 { 2136 struct net *net; 2137 2138 net = get_net_ns_by_id(sock_net(sk), netnsid); 2139 if (!net) 2140 return ERR_PTR(-EINVAL); 2141 2142 /* For now, the caller is required to have CAP_NET_ADMIN in 2143 * the user namespace owning the target net ns. 2144 */ 2145 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2146 put_net(net); 2147 return ERR_PTR(-EACCES); 2148 } 2149 return net; 2150 } 2151 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2152 2153 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2154 bool strict_check, struct nlattr **tb, 2155 struct netlink_ext_ack *extack) 2156 { 2157 int hdrlen; 2158 2159 if (strict_check) { 2160 struct ifinfomsg *ifm; 2161 2162 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2163 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2164 return -EINVAL; 2165 } 2166 2167 ifm = nlmsg_data(nlh); 2168 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2169 ifm->ifi_change) { 2170 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2171 return -EINVAL; 2172 } 2173 if (ifm->ifi_index) { 2174 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2175 return -EINVAL; 2176 } 2177 2178 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2179 IFLA_MAX, ifla_policy, 2180 extack); 2181 } 2182 2183 /* A hack to preserve kernel<->userspace interface. 2184 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2185 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2186 * what iproute2 < v3.9.0 used. 2187 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2188 * attribute, its netlink message is shorter than struct ifinfomsg. 2189 */ 2190 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2191 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2192 2193 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2194 extack); 2195 } 2196 2197 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2198 { 2199 struct netlink_ext_ack *extack = cb->extack; 2200 const struct nlmsghdr *nlh = cb->nlh; 2201 struct net *net = sock_net(skb->sk); 2202 struct net *tgt_net = net; 2203 int h, s_h; 2204 int idx = 0, s_idx; 2205 struct net_device *dev; 2206 struct hlist_head *head; 2207 struct nlattr *tb[IFLA_MAX+1]; 2208 u32 ext_filter_mask = 0; 2209 const struct rtnl_link_ops *kind_ops = NULL; 2210 unsigned int flags = NLM_F_MULTI; 2211 int master_idx = 0; 2212 int netnsid = -1; 2213 int err, i; 2214 2215 s_h = cb->args[0]; 2216 s_idx = cb->args[1]; 2217 2218 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2219 if (err < 0) { 2220 if (cb->strict_check) 2221 return err; 2222 2223 goto walk_entries; 2224 } 2225 2226 for (i = 0; i <= IFLA_MAX; ++i) { 2227 if (!tb[i]) 2228 continue; 2229 2230 /* new attributes should only be added with strict checking */ 2231 switch (i) { 2232 case IFLA_TARGET_NETNSID: 2233 netnsid = nla_get_s32(tb[i]); 2234 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2235 if (IS_ERR(tgt_net)) { 2236 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2237 return PTR_ERR(tgt_net); 2238 } 2239 break; 2240 case IFLA_EXT_MASK: 2241 ext_filter_mask = nla_get_u32(tb[i]); 2242 break; 2243 case IFLA_MASTER: 2244 master_idx = nla_get_u32(tb[i]); 2245 break; 2246 case IFLA_LINKINFO: 2247 kind_ops = linkinfo_to_kind_ops(tb[i]); 2248 break; 2249 default: 2250 if (cb->strict_check) { 2251 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2252 return -EINVAL; 2253 } 2254 } 2255 } 2256 2257 if (master_idx || kind_ops) 2258 flags |= NLM_F_DUMP_FILTERED; 2259 2260 walk_entries: 2261 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 2262 idx = 0; 2263 head = &tgt_net->dev_index_head[h]; 2264 hlist_for_each_entry(dev, head, index_hlist) { 2265 if (link_dump_filtered(dev, master_idx, kind_ops)) 2266 goto cont; 2267 if (idx < s_idx) 2268 goto cont; 2269 err = rtnl_fill_ifinfo(skb, dev, net, 2270 RTM_NEWLINK, 2271 NETLINK_CB(cb->skb).portid, 2272 nlh->nlmsg_seq, 0, flags, 2273 ext_filter_mask, 0, NULL, 0, 2274 netnsid, GFP_KERNEL); 2275 2276 if (err < 0) { 2277 if (likely(skb->len)) 2278 goto out; 2279 2280 goto out_err; 2281 } 2282 cont: 2283 idx++; 2284 } 2285 } 2286 out: 2287 err = skb->len; 2288 out_err: 2289 cb->args[1] = idx; 2290 cb->args[0] = h; 2291 cb->seq = tgt_net->dev_base_seq; 2292 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2293 if (netnsid >= 0) 2294 put_net(tgt_net); 2295 2296 return err; 2297 } 2298 2299 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, 2300 struct netlink_ext_ack *exterr) 2301 { 2302 const struct ifinfomsg *ifmp; 2303 const struct nlattr *attrs; 2304 size_t len; 2305 2306 ifmp = nla_data(nla_peer); 2307 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); 2308 len = nla_len(nla_peer) - sizeof(struct ifinfomsg); 2309 2310 if (ifmp->ifi_index < 0) { 2311 NL_SET_ERR_MSG_ATTR(exterr, nla_peer, 2312 "ifindex can't be negative"); 2313 return -EINVAL; 2314 } 2315 2316 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, 2317 exterr); 2318 } 2319 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); 2320 2321 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2322 { 2323 struct net *net; 2324 /* Examine the link attributes and figure out which 2325 * network namespace we are talking about. 2326 */ 2327 if (tb[IFLA_NET_NS_PID]) 2328 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2329 else if (tb[IFLA_NET_NS_FD]) 2330 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2331 else 2332 net = get_net(src_net); 2333 return net; 2334 } 2335 EXPORT_SYMBOL(rtnl_link_get_net); 2336 2337 /* Figure out which network namespace we are talking about by 2338 * examining the link attributes in the following order: 2339 * 2340 * 1. IFLA_NET_NS_PID 2341 * 2. IFLA_NET_NS_FD 2342 * 3. IFLA_TARGET_NETNSID 2343 */ 2344 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2345 struct nlattr *tb[]) 2346 { 2347 struct net *net; 2348 2349 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2350 return rtnl_link_get_net(src_net, tb); 2351 2352 if (!tb[IFLA_TARGET_NETNSID]) 2353 return get_net(src_net); 2354 2355 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2356 if (!net) 2357 return ERR_PTR(-EINVAL); 2358 2359 return net; 2360 } 2361 2362 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2363 struct net *src_net, 2364 struct nlattr *tb[], int cap) 2365 { 2366 struct net *net; 2367 2368 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2369 if (IS_ERR(net)) 2370 return net; 2371 2372 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2373 put_net(net); 2374 return ERR_PTR(-EPERM); 2375 } 2376 2377 return net; 2378 } 2379 2380 /* Verify that rtnetlink requests do not pass additional properties 2381 * potentially referring to different network namespaces. 2382 */ 2383 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2384 struct netlink_ext_ack *extack, 2385 bool netns_id_only) 2386 { 2387 2388 if (netns_id_only) { 2389 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2390 return 0; 2391 2392 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2393 return -EOPNOTSUPP; 2394 } 2395 2396 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2397 goto invalid_attr; 2398 2399 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2400 goto invalid_attr; 2401 2402 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2403 goto invalid_attr; 2404 2405 return 0; 2406 2407 invalid_attr: 2408 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2409 return -EINVAL; 2410 } 2411 2412 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2413 int max_tx_rate) 2414 { 2415 const struct net_device_ops *ops = dev->netdev_ops; 2416 2417 if (!ops->ndo_set_vf_rate) 2418 return -EOPNOTSUPP; 2419 if (max_tx_rate && max_tx_rate < min_tx_rate) 2420 return -EINVAL; 2421 2422 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2423 } 2424 2425 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2426 struct netlink_ext_ack *extack) 2427 { 2428 if (tb[IFLA_ADDRESS] && 2429 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2430 return -EINVAL; 2431 2432 if (tb[IFLA_BROADCAST] && 2433 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2434 return -EINVAL; 2435 2436 if (tb[IFLA_GSO_MAX_SIZE] && 2437 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { 2438 NL_SET_ERR_MSG(extack, "too big gso_max_size"); 2439 return -EINVAL; 2440 } 2441 2442 if (tb[IFLA_GSO_MAX_SEGS] && 2443 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || 2444 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { 2445 NL_SET_ERR_MSG(extack, "too big gso_max_segs"); 2446 return -EINVAL; 2447 } 2448 2449 if (tb[IFLA_GRO_MAX_SIZE] && 2450 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { 2451 NL_SET_ERR_MSG(extack, "too big gro_max_size"); 2452 return -EINVAL; 2453 } 2454 2455 if (tb[IFLA_GSO_IPV4_MAX_SIZE] && 2456 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { 2457 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); 2458 return -EINVAL; 2459 } 2460 2461 if (tb[IFLA_GRO_IPV4_MAX_SIZE] && 2462 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { 2463 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); 2464 return -EINVAL; 2465 } 2466 2467 if (tb[IFLA_AF_SPEC]) { 2468 struct nlattr *af; 2469 int rem, err; 2470 2471 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2472 const struct rtnl_af_ops *af_ops; 2473 2474 af_ops = rtnl_af_lookup(nla_type(af)); 2475 if (!af_ops) 2476 return -EAFNOSUPPORT; 2477 2478 if (!af_ops->set_link_af) 2479 return -EOPNOTSUPP; 2480 2481 if (af_ops->validate_link_af) { 2482 err = af_ops->validate_link_af(dev, af, extack); 2483 if (err < 0) 2484 return err; 2485 } 2486 } 2487 } 2488 2489 return 0; 2490 } 2491 2492 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2493 int guid_type) 2494 { 2495 const struct net_device_ops *ops = dev->netdev_ops; 2496 2497 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2498 } 2499 2500 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2501 { 2502 if (dev->type != ARPHRD_INFINIBAND) 2503 return -EOPNOTSUPP; 2504 2505 return handle_infiniband_guid(dev, ivt, guid_type); 2506 } 2507 2508 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2509 { 2510 const struct net_device_ops *ops = dev->netdev_ops; 2511 int err = -EINVAL; 2512 2513 if (tb[IFLA_VF_MAC]) { 2514 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2515 2516 if (ivm->vf >= INT_MAX) 2517 return -EINVAL; 2518 err = -EOPNOTSUPP; 2519 if (ops->ndo_set_vf_mac) 2520 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2521 ivm->mac); 2522 if (err < 0) 2523 return err; 2524 } 2525 2526 if (tb[IFLA_VF_VLAN]) { 2527 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2528 2529 if (ivv->vf >= INT_MAX) 2530 return -EINVAL; 2531 err = -EOPNOTSUPP; 2532 if (ops->ndo_set_vf_vlan) 2533 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2534 ivv->qos, 2535 htons(ETH_P_8021Q)); 2536 if (err < 0) 2537 return err; 2538 } 2539 2540 if (tb[IFLA_VF_VLAN_LIST]) { 2541 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2542 struct nlattr *attr; 2543 int rem, len = 0; 2544 2545 err = -EOPNOTSUPP; 2546 if (!ops->ndo_set_vf_vlan) 2547 return err; 2548 2549 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2550 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2551 nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) { 2552 return -EINVAL; 2553 } 2554 if (len >= MAX_VLAN_LIST_LEN) 2555 return -EOPNOTSUPP; 2556 ivvl[len] = nla_data(attr); 2557 2558 len++; 2559 } 2560 if (len == 0) 2561 return -EINVAL; 2562 2563 if (ivvl[0]->vf >= INT_MAX) 2564 return -EINVAL; 2565 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2566 ivvl[0]->qos, ivvl[0]->vlan_proto); 2567 if (err < 0) 2568 return err; 2569 } 2570 2571 if (tb[IFLA_VF_TX_RATE]) { 2572 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2573 struct ifla_vf_info ivf; 2574 2575 if (ivt->vf >= INT_MAX) 2576 return -EINVAL; 2577 err = -EOPNOTSUPP; 2578 if (ops->ndo_get_vf_config) 2579 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2580 if (err < 0) 2581 return err; 2582 2583 err = rtnl_set_vf_rate(dev, ivt->vf, 2584 ivf.min_tx_rate, ivt->rate); 2585 if (err < 0) 2586 return err; 2587 } 2588 2589 if (tb[IFLA_VF_RATE]) { 2590 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2591 2592 if (ivt->vf >= INT_MAX) 2593 return -EINVAL; 2594 2595 err = rtnl_set_vf_rate(dev, ivt->vf, 2596 ivt->min_tx_rate, ivt->max_tx_rate); 2597 if (err < 0) 2598 return err; 2599 } 2600 2601 if (tb[IFLA_VF_SPOOFCHK]) { 2602 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2603 2604 if (ivs->vf >= INT_MAX) 2605 return -EINVAL; 2606 err = -EOPNOTSUPP; 2607 if (ops->ndo_set_vf_spoofchk) 2608 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2609 ivs->setting); 2610 if (err < 0) 2611 return err; 2612 } 2613 2614 if (tb[IFLA_VF_LINK_STATE]) { 2615 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2616 2617 if (ivl->vf >= INT_MAX) 2618 return -EINVAL; 2619 err = -EOPNOTSUPP; 2620 if (ops->ndo_set_vf_link_state) 2621 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2622 ivl->link_state); 2623 if (err < 0) 2624 return err; 2625 } 2626 2627 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2628 struct ifla_vf_rss_query_en *ivrssq_en; 2629 2630 err = -EOPNOTSUPP; 2631 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2632 if (ivrssq_en->vf >= INT_MAX) 2633 return -EINVAL; 2634 if (ops->ndo_set_vf_rss_query_en) 2635 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2636 ivrssq_en->setting); 2637 if (err < 0) 2638 return err; 2639 } 2640 2641 if (tb[IFLA_VF_TRUST]) { 2642 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2643 2644 if (ivt->vf >= INT_MAX) 2645 return -EINVAL; 2646 err = -EOPNOTSUPP; 2647 if (ops->ndo_set_vf_trust) 2648 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2649 if (err < 0) 2650 return err; 2651 } 2652 2653 if (tb[IFLA_VF_IB_NODE_GUID]) { 2654 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2655 2656 if (ivt->vf >= INT_MAX) 2657 return -EINVAL; 2658 if (!ops->ndo_set_vf_guid) 2659 return -EOPNOTSUPP; 2660 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2661 } 2662 2663 if (tb[IFLA_VF_IB_PORT_GUID]) { 2664 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2665 2666 if (ivt->vf >= INT_MAX) 2667 return -EINVAL; 2668 if (!ops->ndo_set_vf_guid) 2669 return -EOPNOTSUPP; 2670 2671 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2672 } 2673 2674 return err; 2675 } 2676 2677 static int do_set_master(struct net_device *dev, int ifindex, 2678 struct netlink_ext_ack *extack) 2679 { 2680 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2681 const struct net_device_ops *ops; 2682 int err; 2683 2684 if (upper_dev) { 2685 if (upper_dev->ifindex == ifindex) 2686 return 0; 2687 ops = upper_dev->netdev_ops; 2688 if (ops->ndo_del_slave) { 2689 err = ops->ndo_del_slave(upper_dev, dev); 2690 if (err) 2691 return err; 2692 } else { 2693 return -EOPNOTSUPP; 2694 } 2695 } 2696 2697 if (ifindex) { 2698 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2699 if (!upper_dev) 2700 return -EINVAL; 2701 ops = upper_dev->netdev_ops; 2702 if (ops->ndo_add_slave) { 2703 err = ops->ndo_add_slave(upper_dev, dev, extack); 2704 if (err) 2705 return err; 2706 } else { 2707 return -EOPNOTSUPP; 2708 } 2709 } 2710 return 0; 2711 } 2712 2713 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2714 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2715 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2716 }; 2717 2718 static int do_set_proto_down(struct net_device *dev, 2719 struct nlattr *nl_proto_down, 2720 struct nlattr *nl_proto_down_reason, 2721 struct netlink_ext_ack *extack) 2722 { 2723 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2724 unsigned long mask = 0; 2725 u32 value; 2726 bool proto_down; 2727 int err; 2728 2729 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { 2730 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2731 return -EOPNOTSUPP; 2732 } 2733 2734 if (nl_proto_down_reason) { 2735 err = nla_parse_nested_deprecated(pdreason, 2736 IFLA_PROTO_DOWN_REASON_MAX, 2737 nl_proto_down_reason, 2738 ifla_proto_down_reason_policy, 2739 NULL); 2740 if (err < 0) 2741 return err; 2742 2743 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2744 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2745 return -EINVAL; 2746 } 2747 2748 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2749 2750 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2751 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2752 2753 dev_change_proto_down_reason(dev, mask, value); 2754 } 2755 2756 if (nl_proto_down) { 2757 proto_down = nla_get_u8(nl_proto_down); 2758 2759 /* Don't turn off protodown if there are active reasons */ 2760 if (!proto_down && dev->proto_down_reason) { 2761 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2762 return -EBUSY; 2763 } 2764 err = dev_change_proto_down(dev, 2765 proto_down); 2766 if (err) 2767 return err; 2768 } 2769 2770 return 0; 2771 } 2772 2773 #define DO_SETLINK_MODIFIED 0x01 2774 /* notify flag means notify + modified. */ 2775 #define DO_SETLINK_NOTIFY 0x03 2776 static int do_setlink(const struct sk_buff *skb, 2777 struct net_device *dev, struct ifinfomsg *ifm, 2778 struct netlink_ext_ack *extack, 2779 struct nlattr **tb, int status) 2780 { 2781 const struct net_device_ops *ops = dev->netdev_ops; 2782 char ifname[IFNAMSIZ]; 2783 int err; 2784 2785 if (tb[IFLA_IFNAME]) 2786 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2787 else 2788 ifname[0] = '\0'; 2789 2790 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2791 const char *pat = ifname[0] ? ifname : NULL; 2792 struct net *net; 2793 int new_ifindex; 2794 2795 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2796 tb, CAP_NET_ADMIN); 2797 if (IS_ERR(net)) { 2798 err = PTR_ERR(net); 2799 goto errout; 2800 } 2801 2802 if (tb[IFLA_NEW_IFINDEX]) 2803 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2804 else 2805 new_ifindex = 0; 2806 2807 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2808 put_net(net); 2809 if (err) 2810 goto errout; 2811 status |= DO_SETLINK_MODIFIED; 2812 } 2813 2814 if (tb[IFLA_MAP]) { 2815 struct rtnl_link_ifmap *u_map; 2816 struct ifmap k_map; 2817 2818 if (!ops->ndo_set_config) { 2819 err = -EOPNOTSUPP; 2820 goto errout; 2821 } 2822 2823 if (!netif_device_present(dev)) { 2824 err = -ENODEV; 2825 goto errout; 2826 } 2827 2828 u_map = nla_data(tb[IFLA_MAP]); 2829 k_map.mem_start = (unsigned long) u_map->mem_start; 2830 k_map.mem_end = (unsigned long) u_map->mem_end; 2831 k_map.base_addr = (unsigned short) u_map->base_addr; 2832 k_map.irq = (unsigned char) u_map->irq; 2833 k_map.dma = (unsigned char) u_map->dma; 2834 k_map.port = (unsigned char) u_map->port; 2835 2836 err = ops->ndo_set_config(dev, &k_map); 2837 if (err < 0) 2838 goto errout; 2839 2840 status |= DO_SETLINK_NOTIFY; 2841 } 2842 2843 if (tb[IFLA_ADDRESS]) { 2844 struct sockaddr *sa; 2845 int len; 2846 2847 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2848 sizeof(*sa)); 2849 sa = kmalloc(len, GFP_KERNEL); 2850 if (!sa) { 2851 err = -ENOMEM; 2852 goto errout; 2853 } 2854 sa->sa_family = dev->type; 2855 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2856 dev->addr_len); 2857 err = dev_set_mac_address_user(dev, sa, extack); 2858 kfree(sa); 2859 if (err) 2860 goto errout; 2861 status |= DO_SETLINK_MODIFIED; 2862 } 2863 2864 if (tb[IFLA_MTU]) { 2865 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2866 if (err < 0) 2867 goto errout; 2868 status |= DO_SETLINK_MODIFIED; 2869 } 2870 2871 if (tb[IFLA_GROUP]) { 2872 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2873 status |= DO_SETLINK_NOTIFY; 2874 } 2875 2876 /* 2877 * Interface selected by interface index but interface 2878 * name provided implies that a name change has been 2879 * requested. 2880 */ 2881 if (ifm->ifi_index > 0 && ifname[0]) { 2882 err = dev_change_name(dev, ifname); 2883 if (err < 0) 2884 goto errout; 2885 status |= DO_SETLINK_MODIFIED; 2886 } 2887 2888 if (tb[IFLA_IFALIAS]) { 2889 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2890 nla_len(tb[IFLA_IFALIAS])); 2891 if (err < 0) 2892 goto errout; 2893 status |= DO_SETLINK_NOTIFY; 2894 } 2895 2896 if (tb[IFLA_BROADCAST]) { 2897 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2898 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2899 } 2900 2901 if (ifm->ifi_flags || ifm->ifi_change) { 2902 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2903 extack); 2904 if (err < 0) 2905 goto errout; 2906 } 2907 2908 if (tb[IFLA_MASTER]) { 2909 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2910 if (err) 2911 goto errout; 2912 status |= DO_SETLINK_MODIFIED; 2913 } 2914 2915 if (tb[IFLA_CARRIER]) { 2916 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2917 if (err) 2918 goto errout; 2919 status |= DO_SETLINK_MODIFIED; 2920 } 2921 2922 if (tb[IFLA_TXQLEN]) { 2923 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2924 2925 err = dev_change_tx_queue_len(dev, value); 2926 if (err) 2927 goto errout; 2928 status |= DO_SETLINK_MODIFIED; 2929 } 2930 2931 if (tb[IFLA_GSO_MAX_SIZE]) { 2932 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2933 2934 if (dev->gso_max_size ^ max_size) { 2935 netif_set_gso_max_size(dev, max_size); 2936 status |= DO_SETLINK_MODIFIED; 2937 } 2938 } 2939 2940 if (tb[IFLA_GSO_MAX_SEGS]) { 2941 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2942 2943 if (dev->gso_max_segs ^ max_segs) { 2944 netif_set_gso_max_segs(dev, max_segs); 2945 status |= DO_SETLINK_MODIFIED; 2946 } 2947 } 2948 2949 if (tb[IFLA_GRO_MAX_SIZE]) { 2950 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2951 2952 if (dev->gro_max_size ^ gro_max_size) { 2953 netif_set_gro_max_size(dev, gro_max_size); 2954 status |= DO_SETLINK_MODIFIED; 2955 } 2956 } 2957 2958 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { 2959 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); 2960 2961 if (dev->gso_ipv4_max_size ^ max_size) { 2962 netif_set_gso_ipv4_max_size(dev, max_size); 2963 status |= DO_SETLINK_MODIFIED; 2964 } 2965 } 2966 2967 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { 2968 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]); 2969 2970 if (dev->gro_ipv4_max_size ^ gro_max_size) { 2971 netif_set_gro_ipv4_max_size(dev, gro_max_size); 2972 status |= DO_SETLINK_MODIFIED; 2973 } 2974 } 2975 2976 if (tb[IFLA_OPERSTATE]) 2977 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2978 2979 if (tb[IFLA_LINKMODE]) { 2980 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2981 2982 write_lock(&dev_base_lock); 2983 if (dev->link_mode ^ value) 2984 status |= DO_SETLINK_NOTIFY; 2985 dev->link_mode = value; 2986 write_unlock(&dev_base_lock); 2987 } 2988 2989 if (tb[IFLA_VFINFO_LIST]) { 2990 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2991 struct nlattr *attr; 2992 int rem; 2993 2994 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2995 if (nla_type(attr) != IFLA_VF_INFO || 2996 nla_len(attr) < NLA_HDRLEN) { 2997 err = -EINVAL; 2998 goto errout; 2999 } 3000 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 3001 attr, 3002 ifla_vf_policy, 3003 NULL); 3004 if (err < 0) 3005 goto errout; 3006 err = do_setvfinfo(dev, vfinfo); 3007 if (err < 0) 3008 goto errout; 3009 status |= DO_SETLINK_NOTIFY; 3010 } 3011 } 3012 err = 0; 3013 3014 if (tb[IFLA_VF_PORTS]) { 3015 struct nlattr *port[IFLA_PORT_MAX+1]; 3016 struct nlattr *attr; 3017 int vf; 3018 int rem; 3019 3020 err = -EOPNOTSUPP; 3021 if (!ops->ndo_set_vf_port) 3022 goto errout; 3023 3024 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 3025 if (nla_type(attr) != IFLA_VF_PORT || 3026 nla_len(attr) < NLA_HDRLEN) { 3027 err = -EINVAL; 3028 goto errout; 3029 } 3030 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3031 attr, 3032 ifla_port_policy, 3033 NULL); 3034 if (err < 0) 3035 goto errout; 3036 if (!port[IFLA_PORT_VF]) { 3037 err = -EOPNOTSUPP; 3038 goto errout; 3039 } 3040 vf = nla_get_u32(port[IFLA_PORT_VF]); 3041 err = ops->ndo_set_vf_port(dev, vf, port); 3042 if (err < 0) 3043 goto errout; 3044 status |= DO_SETLINK_NOTIFY; 3045 } 3046 } 3047 err = 0; 3048 3049 if (tb[IFLA_PORT_SELF]) { 3050 struct nlattr *port[IFLA_PORT_MAX+1]; 3051 3052 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3053 tb[IFLA_PORT_SELF], 3054 ifla_port_policy, NULL); 3055 if (err < 0) 3056 goto errout; 3057 3058 err = -EOPNOTSUPP; 3059 if (ops->ndo_set_vf_port) 3060 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 3061 if (err < 0) 3062 goto errout; 3063 status |= DO_SETLINK_NOTIFY; 3064 } 3065 3066 if (tb[IFLA_AF_SPEC]) { 3067 struct nlattr *af; 3068 int rem; 3069 3070 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 3071 const struct rtnl_af_ops *af_ops; 3072 3073 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 3074 3075 err = af_ops->set_link_af(dev, af, extack); 3076 if (err < 0) 3077 goto errout; 3078 3079 status |= DO_SETLINK_NOTIFY; 3080 } 3081 } 3082 err = 0; 3083 3084 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 3085 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 3086 tb[IFLA_PROTO_DOWN_REASON], extack); 3087 if (err) 3088 goto errout; 3089 status |= DO_SETLINK_NOTIFY; 3090 } 3091 3092 if (tb[IFLA_XDP]) { 3093 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3094 u32 xdp_flags = 0; 3095 3096 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3097 tb[IFLA_XDP], 3098 ifla_xdp_policy, NULL); 3099 if (err < 0) 3100 goto errout; 3101 3102 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3103 err = -EINVAL; 3104 goto errout; 3105 } 3106 3107 if (xdp[IFLA_XDP_FLAGS]) { 3108 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3109 if (xdp_flags & ~XDP_FLAGS_MASK) { 3110 err = -EINVAL; 3111 goto errout; 3112 } 3113 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3114 err = -EINVAL; 3115 goto errout; 3116 } 3117 } 3118 3119 if (xdp[IFLA_XDP_FD]) { 3120 int expected_fd = -1; 3121 3122 if (xdp_flags & XDP_FLAGS_REPLACE) { 3123 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3124 err = -EINVAL; 3125 goto errout; 3126 } 3127 expected_fd = 3128 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3129 } 3130 3131 err = dev_change_xdp_fd(dev, extack, 3132 nla_get_s32(xdp[IFLA_XDP_FD]), 3133 expected_fd, 3134 xdp_flags); 3135 if (err) 3136 goto errout; 3137 status |= DO_SETLINK_NOTIFY; 3138 } 3139 } 3140 3141 errout: 3142 if (status & DO_SETLINK_MODIFIED) { 3143 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3144 netdev_state_change(dev); 3145 3146 if (err < 0) 3147 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3148 dev->name); 3149 } 3150 3151 return err; 3152 } 3153 3154 static struct net_device *rtnl_dev_get(struct net *net, 3155 struct nlattr *tb[]) 3156 { 3157 char ifname[ALTIFNAMSIZ]; 3158 3159 if (tb[IFLA_IFNAME]) 3160 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3161 else if (tb[IFLA_ALT_IFNAME]) 3162 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3163 else 3164 return NULL; 3165 3166 return __dev_get_by_name(net, ifname); 3167 } 3168 3169 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3170 struct netlink_ext_ack *extack) 3171 { 3172 struct net *net = sock_net(skb->sk); 3173 struct ifinfomsg *ifm; 3174 struct net_device *dev; 3175 int err; 3176 struct nlattr *tb[IFLA_MAX+1]; 3177 3178 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3179 ifla_policy, extack); 3180 if (err < 0) 3181 goto errout; 3182 3183 err = rtnl_ensure_unique_netns(tb, extack, false); 3184 if (err < 0) 3185 goto errout; 3186 3187 err = -EINVAL; 3188 ifm = nlmsg_data(nlh); 3189 if (ifm->ifi_index > 0) 3190 dev = __dev_get_by_index(net, ifm->ifi_index); 3191 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3192 dev = rtnl_dev_get(net, tb); 3193 else 3194 goto errout; 3195 3196 if (dev == NULL) { 3197 err = -ENODEV; 3198 goto errout; 3199 } 3200 3201 err = validate_linkmsg(dev, tb, extack); 3202 if (err < 0) 3203 goto errout; 3204 3205 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3206 errout: 3207 return err; 3208 } 3209 3210 static int rtnl_group_dellink(const struct net *net, int group) 3211 { 3212 struct net_device *dev, *aux; 3213 LIST_HEAD(list_kill); 3214 bool found = false; 3215 3216 if (!group) 3217 return -EPERM; 3218 3219 for_each_netdev(net, dev) { 3220 if (dev->group == group) { 3221 const struct rtnl_link_ops *ops; 3222 3223 found = true; 3224 ops = dev->rtnl_link_ops; 3225 if (!ops || !ops->dellink) 3226 return -EOPNOTSUPP; 3227 } 3228 } 3229 3230 if (!found) 3231 return -ENODEV; 3232 3233 for_each_netdev_safe(net, dev, aux) { 3234 if (dev->group == group) { 3235 const struct rtnl_link_ops *ops; 3236 3237 ops = dev->rtnl_link_ops; 3238 ops->dellink(dev, &list_kill); 3239 } 3240 } 3241 unregister_netdevice_many(&list_kill); 3242 3243 return 0; 3244 } 3245 3246 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3247 { 3248 const struct rtnl_link_ops *ops; 3249 LIST_HEAD(list_kill); 3250 3251 ops = dev->rtnl_link_ops; 3252 if (!ops || !ops->dellink) 3253 return -EOPNOTSUPP; 3254 3255 ops->dellink(dev, &list_kill); 3256 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3257 3258 return 0; 3259 } 3260 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3261 3262 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3263 struct netlink_ext_ack *extack) 3264 { 3265 struct net *net = sock_net(skb->sk); 3266 u32 portid = NETLINK_CB(skb).portid; 3267 struct net *tgt_net = net; 3268 struct net_device *dev = NULL; 3269 struct ifinfomsg *ifm; 3270 struct nlattr *tb[IFLA_MAX+1]; 3271 int err; 3272 int netnsid = -1; 3273 3274 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3275 ifla_policy, extack); 3276 if (err < 0) 3277 return err; 3278 3279 err = rtnl_ensure_unique_netns(tb, extack, true); 3280 if (err < 0) 3281 return err; 3282 3283 if (tb[IFLA_TARGET_NETNSID]) { 3284 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3285 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3286 if (IS_ERR(tgt_net)) 3287 return PTR_ERR(tgt_net); 3288 } 3289 3290 err = -EINVAL; 3291 ifm = nlmsg_data(nlh); 3292 if (ifm->ifi_index > 0) 3293 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3294 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3295 dev = rtnl_dev_get(tgt_net, tb); 3296 else if (tb[IFLA_GROUP]) 3297 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3298 else 3299 goto out; 3300 3301 if (!dev) { 3302 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3303 err = -ENODEV; 3304 3305 goto out; 3306 } 3307 3308 err = rtnl_delete_link(dev, portid, nlh); 3309 3310 out: 3311 if (netnsid >= 0) 3312 put_net(tgt_net); 3313 3314 return err; 3315 } 3316 3317 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3318 u32 portid, const struct nlmsghdr *nlh) 3319 { 3320 unsigned int old_flags; 3321 int err; 3322 3323 old_flags = dev->flags; 3324 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3325 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3326 NULL); 3327 if (err < 0) 3328 return err; 3329 } 3330 3331 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3332 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3333 } else { 3334 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3335 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3336 } 3337 return 0; 3338 } 3339 EXPORT_SYMBOL(rtnl_configure_link); 3340 3341 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3342 unsigned char name_assign_type, 3343 const struct rtnl_link_ops *ops, 3344 struct nlattr *tb[], 3345 struct netlink_ext_ack *extack) 3346 { 3347 struct net_device *dev; 3348 unsigned int num_tx_queues = 1; 3349 unsigned int num_rx_queues = 1; 3350 int err; 3351 3352 if (tb[IFLA_NUM_TX_QUEUES]) 3353 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3354 else if (ops->get_num_tx_queues) 3355 num_tx_queues = ops->get_num_tx_queues(); 3356 3357 if (tb[IFLA_NUM_RX_QUEUES]) 3358 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3359 else if (ops->get_num_rx_queues) 3360 num_rx_queues = ops->get_num_rx_queues(); 3361 3362 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3363 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3364 return ERR_PTR(-EINVAL); 3365 } 3366 3367 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3368 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3369 return ERR_PTR(-EINVAL); 3370 } 3371 3372 if (ops->alloc) { 3373 dev = ops->alloc(tb, ifname, name_assign_type, 3374 num_tx_queues, num_rx_queues); 3375 if (IS_ERR(dev)) 3376 return dev; 3377 } else { 3378 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3379 name_assign_type, ops->setup, 3380 num_tx_queues, num_rx_queues); 3381 } 3382 3383 if (!dev) 3384 return ERR_PTR(-ENOMEM); 3385 3386 err = validate_linkmsg(dev, tb, extack); 3387 if (err < 0) { 3388 free_netdev(dev); 3389 return ERR_PTR(err); 3390 } 3391 3392 dev_net_set(dev, net); 3393 dev->rtnl_link_ops = ops; 3394 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3395 3396 if (tb[IFLA_MTU]) { 3397 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3398 3399 err = dev_validate_mtu(dev, mtu, extack); 3400 if (err) { 3401 free_netdev(dev); 3402 return ERR_PTR(err); 3403 } 3404 dev->mtu = mtu; 3405 } 3406 if (tb[IFLA_ADDRESS]) { 3407 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3408 nla_len(tb[IFLA_ADDRESS])); 3409 dev->addr_assign_type = NET_ADDR_SET; 3410 } 3411 if (tb[IFLA_BROADCAST]) 3412 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3413 nla_len(tb[IFLA_BROADCAST])); 3414 if (tb[IFLA_TXQLEN]) 3415 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3416 if (tb[IFLA_OPERSTATE]) 3417 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3418 if (tb[IFLA_LINKMODE]) 3419 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3420 if (tb[IFLA_GROUP]) 3421 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3422 if (tb[IFLA_GSO_MAX_SIZE]) 3423 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3424 if (tb[IFLA_GSO_MAX_SEGS]) 3425 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3426 if (tb[IFLA_GRO_MAX_SIZE]) 3427 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3428 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) 3429 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE])); 3430 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) 3431 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE])); 3432 3433 return dev; 3434 } 3435 EXPORT_SYMBOL(rtnl_create_link); 3436 3437 static int rtnl_group_changelink(const struct sk_buff *skb, 3438 struct net *net, int group, 3439 struct ifinfomsg *ifm, 3440 struct netlink_ext_ack *extack, 3441 struct nlattr **tb) 3442 { 3443 struct net_device *dev, *aux; 3444 int err; 3445 3446 for_each_netdev_safe(net, dev, aux) { 3447 if (dev->group == group) { 3448 err = validate_linkmsg(dev, tb, extack); 3449 if (err < 0) 3450 return err; 3451 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3452 if (err < 0) 3453 return err; 3454 } 3455 } 3456 3457 return 0; 3458 } 3459 3460 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3461 const struct rtnl_link_ops *ops, 3462 const struct nlmsghdr *nlh, 3463 struct nlattr **tb, struct nlattr **data, 3464 struct netlink_ext_ack *extack) 3465 { 3466 unsigned char name_assign_type = NET_NAME_USER; 3467 struct net *net = sock_net(skb->sk); 3468 u32 portid = NETLINK_CB(skb).portid; 3469 struct net *dest_net, *link_net; 3470 struct net_device *dev; 3471 char ifname[IFNAMSIZ]; 3472 int err; 3473 3474 if (!ops->alloc && !ops->setup) 3475 return -EOPNOTSUPP; 3476 3477 if (tb[IFLA_IFNAME]) { 3478 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3479 } else { 3480 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3481 name_assign_type = NET_NAME_ENUM; 3482 } 3483 3484 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3485 if (IS_ERR(dest_net)) 3486 return PTR_ERR(dest_net); 3487 3488 if (tb[IFLA_LINK_NETNSID]) { 3489 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3490 3491 link_net = get_net_ns_by_id(dest_net, id); 3492 if (!link_net) { 3493 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3494 err = -EINVAL; 3495 goto out; 3496 } 3497 err = -EPERM; 3498 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3499 goto out; 3500 } else { 3501 link_net = NULL; 3502 } 3503 3504 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3505 name_assign_type, ops, tb, extack); 3506 if (IS_ERR(dev)) { 3507 err = PTR_ERR(dev); 3508 goto out; 3509 } 3510 3511 dev->ifindex = ifm->ifi_index; 3512 3513 if (ops->newlink) 3514 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3515 else 3516 err = register_netdevice(dev); 3517 if (err < 0) { 3518 free_netdev(dev); 3519 goto out; 3520 } 3521 3522 err = rtnl_configure_link(dev, ifm, portid, nlh); 3523 if (err < 0) 3524 goto out_unregister; 3525 if (link_net) { 3526 err = dev_change_net_namespace(dev, dest_net, ifname); 3527 if (err < 0) 3528 goto out_unregister; 3529 } 3530 if (tb[IFLA_MASTER]) { 3531 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3532 if (err) 3533 goto out_unregister; 3534 } 3535 out: 3536 if (link_net) 3537 put_net(link_net); 3538 put_net(dest_net); 3539 return err; 3540 out_unregister: 3541 if (ops->newlink) { 3542 LIST_HEAD(list_kill); 3543 3544 ops->dellink(dev, &list_kill); 3545 unregister_netdevice_many(&list_kill); 3546 } else { 3547 unregister_netdevice(dev); 3548 } 3549 goto out; 3550 } 3551 3552 struct rtnl_newlink_tbs { 3553 struct nlattr *tb[IFLA_MAX + 1]; 3554 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3555 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3556 }; 3557 3558 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3559 struct rtnl_newlink_tbs *tbs, 3560 struct netlink_ext_ack *extack) 3561 { 3562 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3563 struct nlattr ** const tb = tbs->tb; 3564 const struct rtnl_link_ops *m_ops; 3565 struct net_device *master_dev; 3566 struct net *net = sock_net(skb->sk); 3567 const struct rtnl_link_ops *ops; 3568 struct nlattr **slave_data; 3569 char kind[MODULE_NAME_LEN]; 3570 struct net_device *dev; 3571 struct ifinfomsg *ifm; 3572 struct nlattr **data; 3573 bool link_specified; 3574 int err; 3575 3576 #ifdef CONFIG_MODULES 3577 replay: 3578 #endif 3579 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3580 ifla_policy, extack); 3581 if (err < 0) 3582 return err; 3583 3584 err = rtnl_ensure_unique_netns(tb, extack, false); 3585 if (err < 0) 3586 return err; 3587 3588 ifm = nlmsg_data(nlh); 3589 if (ifm->ifi_index > 0) { 3590 link_specified = true; 3591 dev = __dev_get_by_index(net, ifm->ifi_index); 3592 } else if (ifm->ifi_index < 0) { 3593 NL_SET_ERR_MSG(extack, "ifindex can't be negative"); 3594 return -EINVAL; 3595 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3596 link_specified = true; 3597 dev = rtnl_dev_get(net, tb); 3598 } else { 3599 link_specified = false; 3600 dev = NULL; 3601 } 3602 3603 master_dev = NULL; 3604 m_ops = NULL; 3605 if (dev) { 3606 master_dev = netdev_master_upper_dev_get(dev); 3607 if (master_dev) 3608 m_ops = master_dev->rtnl_link_ops; 3609 } 3610 3611 if (tb[IFLA_LINKINFO]) { 3612 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3613 tb[IFLA_LINKINFO], 3614 ifla_info_policy, NULL); 3615 if (err < 0) 3616 return err; 3617 } else 3618 memset(linkinfo, 0, sizeof(linkinfo)); 3619 3620 if (linkinfo[IFLA_INFO_KIND]) { 3621 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3622 ops = rtnl_link_ops_get(kind); 3623 } else { 3624 kind[0] = '\0'; 3625 ops = NULL; 3626 } 3627 3628 data = NULL; 3629 if (ops) { 3630 if (ops->maxtype > RTNL_MAX_TYPE) 3631 return -EINVAL; 3632 3633 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3634 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3635 linkinfo[IFLA_INFO_DATA], 3636 ops->policy, extack); 3637 if (err < 0) 3638 return err; 3639 data = tbs->attr; 3640 } 3641 if (ops->validate) { 3642 err = ops->validate(tb, data, extack); 3643 if (err < 0) 3644 return err; 3645 } 3646 } 3647 3648 slave_data = NULL; 3649 if (m_ops) { 3650 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3651 return -EINVAL; 3652 3653 if (m_ops->slave_maxtype && 3654 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3655 err = nla_parse_nested_deprecated(tbs->slave_attr, 3656 m_ops->slave_maxtype, 3657 linkinfo[IFLA_INFO_SLAVE_DATA], 3658 m_ops->slave_policy, 3659 extack); 3660 if (err < 0) 3661 return err; 3662 slave_data = tbs->slave_attr; 3663 } 3664 } 3665 3666 if (dev) { 3667 int status = 0; 3668 3669 if (nlh->nlmsg_flags & NLM_F_EXCL) 3670 return -EEXIST; 3671 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3672 return -EOPNOTSUPP; 3673 3674 err = validate_linkmsg(dev, tb, extack); 3675 if (err < 0) 3676 return err; 3677 3678 if (linkinfo[IFLA_INFO_DATA]) { 3679 if (!ops || ops != dev->rtnl_link_ops || 3680 !ops->changelink) 3681 return -EOPNOTSUPP; 3682 3683 err = ops->changelink(dev, tb, data, extack); 3684 if (err < 0) 3685 return err; 3686 status |= DO_SETLINK_NOTIFY; 3687 } 3688 3689 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3690 if (!m_ops || !m_ops->slave_changelink) 3691 return -EOPNOTSUPP; 3692 3693 err = m_ops->slave_changelink(master_dev, dev, tb, 3694 slave_data, extack); 3695 if (err < 0) 3696 return err; 3697 status |= DO_SETLINK_NOTIFY; 3698 } 3699 3700 return do_setlink(skb, dev, ifm, extack, tb, status); 3701 } 3702 3703 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3704 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3705 * or it's for a group 3706 */ 3707 if (link_specified) 3708 return -ENODEV; 3709 if (tb[IFLA_GROUP]) 3710 return rtnl_group_changelink(skb, net, 3711 nla_get_u32(tb[IFLA_GROUP]), 3712 ifm, extack, tb); 3713 return -ENODEV; 3714 } 3715 3716 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3717 return -EOPNOTSUPP; 3718 3719 if (!ops) { 3720 #ifdef CONFIG_MODULES 3721 if (kind[0]) { 3722 __rtnl_unlock(); 3723 request_module("rtnl-link-%s", kind); 3724 rtnl_lock(); 3725 ops = rtnl_link_ops_get(kind); 3726 if (ops) 3727 goto replay; 3728 } 3729 #endif 3730 NL_SET_ERR_MSG(extack, "Unknown device type"); 3731 return -EOPNOTSUPP; 3732 } 3733 3734 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3735 } 3736 3737 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3738 struct netlink_ext_ack *extack) 3739 { 3740 struct rtnl_newlink_tbs *tbs; 3741 int ret; 3742 3743 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3744 if (!tbs) 3745 return -ENOMEM; 3746 3747 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3748 kfree(tbs); 3749 return ret; 3750 } 3751 3752 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3753 const struct nlmsghdr *nlh, 3754 struct nlattr **tb, 3755 struct netlink_ext_ack *extack) 3756 { 3757 struct ifinfomsg *ifm; 3758 int i, err; 3759 3760 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3761 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3762 return -EINVAL; 3763 } 3764 3765 if (!netlink_strict_get_check(skb)) 3766 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3767 ifla_policy, extack); 3768 3769 ifm = nlmsg_data(nlh); 3770 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3771 ifm->ifi_change) { 3772 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3773 return -EINVAL; 3774 } 3775 3776 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3777 ifla_policy, extack); 3778 if (err) 3779 return err; 3780 3781 for (i = 0; i <= IFLA_MAX; i++) { 3782 if (!tb[i]) 3783 continue; 3784 3785 switch (i) { 3786 case IFLA_IFNAME: 3787 case IFLA_ALT_IFNAME: 3788 case IFLA_EXT_MASK: 3789 case IFLA_TARGET_NETNSID: 3790 break; 3791 default: 3792 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3793 return -EINVAL; 3794 } 3795 } 3796 3797 return 0; 3798 } 3799 3800 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3801 struct netlink_ext_ack *extack) 3802 { 3803 struct net *net = sock_net(skb->sk); 3804 struct net *tgt_net = net; 3805 struct ifinfomsg *ifm; 3806 struct nlattr *tb[IFLA_MAX+1]; 3807 struct net_device *dev = NULL; 3808 struct sk_buff *nskb; 3809 int netnsid = -1; 3810 int err; 3811 u32 ext_filter_mask = 0; 3812 3813 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3814 if (err < 0) 3815 return err; 3816 3817 err = rtnl_ensure_unique_netns(tb, extack, true); 3818 if (err < 0) 3819 return err; 3820 3821 if (tb[IFLA_TARGET_NETNSID]) { 3822 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3823 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3824 if (IS_ERR(tgt_net)) 3825 return PTR_ERR(tgt_net); 3826 } 3827 3828 if (tb[IFLA_EXT_MASK]) 3829 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3830 3831 err = -EINVAL; 3832 ifm = nlmsg_data(nlh); 3833 if (ifm->ifi_index > 0) 3834 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3835 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3836 dev = rtnl_dev_get(tgt_net, tb); 3837 else 3838 goto out; 3839 3840 err = -ENODEV; 3841 if (dev == NULL) 3842 goto out; 3843 3844 err = -ENOBUFS; 3845 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3846 if (nskb == NULL) 3847 goto out; 3848 3849 err = rtnl_fill_ifinfo(nskb, dev, net, 3850 RTM_NEWLINK, NETLINK_CB(skb).portid, 3851 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3852 0, NULL, 0, netnsid, GFP_KERNEL); 3853 if (err < 0) { 3854 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3855 WARN_ON(err == -EMSGSIZE); 3856 kfree_skb(nskb); 3857 } else 3858 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3859 out: 3860 if (netnsid >= 0) 3861 put_net(tgt_net); 3862 3863 return err; 3864 } 3865 3866 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3867 bool *changed, struct netlink_ext_ack *extack) 3868 { 3869 char *alt_ifname; 3870 size_t size; 3871 int err; 3872 3873 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3874 if (err) 3875 return err; 3876 3877 if (cmd == RTM_NEWLINKPROP) { 3878 size = rtnl_prop_list_size(dev); 3879 size += nla_total_size(ALTIFNAMSIZ); 3880 if (size >= U16_MAX) { 3881 NL_SET_ERR_MSG(extack, 3882 "effective property list too long"); 3883 return -EINVAL; 3884 } 3885 } 3886 3887 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3888 if (!alt_ifname) 3889 return -ENOMEM; 3890 3891 if (cmd == RTM_NEWLINKPROP) { 3892 err = netdev_name_node_alt_create(dev, alt_ifname); 3893 if (!err) 3894 alt_ifname = NULL; 3895 } else if (cmd == RTM_DELLINKPROP) { 3896 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3897 } else { 3898 WARN_ON_ONCE(1); 3899 err = -EINVAL; 3900 } 3901 3902 kfree(alt_ifname); 3903 if (!err) 3904 *changed = true; 3905 return err; 3906 } 3907 3908 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3909 struct netlink_ext_ack *extack) 3910 { 3911 struct net *net = sock_net(skb->sk); 3912 struct nlattr *tb[IFLA_MAX + 1]; 3913 struct net_device *dev; 3914 struct ifinfomsg *ifm; 3915 bool changed = false; 3916 struct nlattr *attr; 3917 int err, rem; 3918 3919 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3920 if (err) 3921 return err; 3922 3923 err = rtnl_ensure_unique_netns(tb, extack, true); 3924 if (err) 3925 return err; 3926 3927 ifm = nlmsg_data(nlh); 3928 if (ifm->ifi_index > 0) 3929 dev = __dev_get_by_index(net, ifm->ifi_index); 3930 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3931 dev = rtnl_dev_get(net, tb); 3932 else 3933 return -EINVAL; 3934 3935 if (!dev) 3936 return -ENODEV; 3937 3938 if (!tb[IFLA_PROP_LIST]) 3939 return 0; 3940 3941 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3942 switch (nla_type(attr)) { 3943 case IFLA_ALT_IFNAME: 3944 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3945 if (err) 3946 return err; 3947 break; 3948 } 3949 } 3950 3951 if (changed) 3952 netdev_state_change(dev); 3953 return 0; 3954 } 3955 3956 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3957 struct netlink_ext_ack *extack) 3958 { 3959 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3960 } 3961 3962 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3963 struct netlink_ext_ack *extack) 3964 { 3965 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3966 } 3967 3968 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3969 { 3970 struct net *net = sock_net(skb->sk); 3971 size_t min_ifinfo_dump_size = 0; 3972 struct nlattr *tb[IFLA_MAX+1]; 3973 u32 ext_filter_mask = 0; 3974 struct net_device *dev; 3975 int hdrlen; 3976 3977 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3978 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3979 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3980 3981 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3982 if (tb[IFLA_EXT_MASK]) 3983 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3984 } 3985 3986 if (!ext_filter_mask) 3987 return NLMSG_GOODSIZE; 3988 /* 3989 * traverse the list of net devices and compute the minimum 3990 * buffer size based upon the filter mask. 3991 */ 3992 rcu_read_lock(); 3993 for_each_netdev_rcu(net, dev) { 3994 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 3995 if_nlmsg_size(dev, ext_filter_mask)); 3996 } 3997 rcu_read_unlock(); 3998 3999 return nlmsg_total_size(min_ifinfo_dump_size); 4000 } 4001 4002 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 4003 { 4004 int idx; 4005 int s_idx = cb->family; 4006 int type = cb->nlh->nlmsg_type - RTM_BASE; 4007 int ret = 0; 4008 4009 if (s_idx == 0) 4010 s_idx = 1; 4011 4012 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 4013 struct rtnl_link __rcu **tab; 4014 struct rtnl_link *link; 4015 rtnl_dumpit_func dumpit; 4016 4017 if (idx < s_idx || idx == PF_PACKET) 4018 continue; 4019 4020 if (type < 0 || type >= RTM_NR_MSGTYPES) 4021 continue; 4022 4023 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 4024 if (!tab) 4025 continue; 4026 4027 link = rcu_dereference_rtnl(tab[type]); 4028 if (!link) 4029 continue; 4030 4031 dumpit = link->dumpit; 4032 if (!dumpit) 4033 continue; 4034 4035 if (idx > s_idx) { 4036 memset(&cb->args[0], 0, sizeof(cb->args)); 4037 cb->prev_seq = 0; 4038 cb->seq = 0; 4039 } 4040 ret = dumpit(skb, cb); 4041 if (ret) 4042 break; 4043 } 4044 cb->family = idx; 4045 4046 return skb->len ? : ret; 4047 } 4048 4049 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 4050 unsigned int change, 4051 u32 event, gfp_t flags, int *new_nsid, 4052 int new_ifindex, u32 portid, 4053 const struct nlmsghdr *nlh) 4054 { 4055 struct net *net = dev_net(dev); 4056 struct sk_buff *skb; 4057 int err = -ENOBUFS; 4058 u32 seq = 0; 4059 4060 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 4061 if (skb == NULL) 4062 goto errout; 4063 4064 if (nlmsg_report(nlh)) 4065 seq = nlmsg_seq(nlh); 4066 else 4067 portid = 0; 4068 4069 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 4070 type, portid, seq, change, 0, 0, event, 4071 new_nsid, new_ifindex, -1, flags); 4072 if (err < 0) { 4073 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 4074 WARN_ON(err == -EMSGSIZE); 4075 kfree_skb(skb); 4076 goto errout; 4077 } 4078 return skb; 4079 errout: 4080 if (err < 0) 4081 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4082 return NULL; 4083 } 4084 4085 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 4086 u32 portid, const struct nlmsghdr *nlh) 4087 { 4088 struct net *net = dev_net(dev); 4089 4090 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 4091 } 4092 4093 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 4094 unsigned int change, u32 event, 4095 gfp_t flags, int *new_nsid, int new_ifindex, 4096 u32 portid, const struct nlmsghdr *nlh) 4097 { 4098 struct sk_buff *skb; 4099 4100 if (dev->reg_state != NETREG_REGISTERED) 4101 return; 4102 4103 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 4104 new_ifindex, portid, nlh); 4105 if (skb) 4106 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 4107 } 4108 4109 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 4110 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 4111 { 4112 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4113 NULL, 0, portid, nlh); 4114 } 4115 4116 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4117 gfp_t flags, int *new_nsid, int new_ifindex) 4118 { 4119 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4120 new_nsid, new_ifindex, 0, NULL); 4121 } 4122 4123 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4124 struct net_device *dev, 4125 u8 *addr, u16 vid, u32 pid, u32 seq, 4126 int type, unsigned int flags, 4127 int nlflags, u16 ndm_state) 4128 { 4129 struct nlmsghdr *nlh; 4130 struct ndmsg *ndm; 4131 4132 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4133 if (!nlh) 4134 return -EMSGSIZE; 4135 4136 ndm = nlmsg_data(nlh); 4137 ndm->ndm_family = AF_BRIDGE; 4138 ndm->ndm_pad1 = 0; 4139 ndm->ndm_pad2 = 0; 4140 ndm->ndm_flags = flags; 4141 ndm->ndm_type = 0; 4142 ndm->ndm_ifindex = dev->ifindex; 4143 ndm->ndm_state = ndm_state; 4144 4145 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr)) 4146 goto nla_put_failure; 4147 if (vid) 4148 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4149 goto nla_put_failure; 4150 4151 nlmsg_end(skb, nlh); 4152 return 0; 4153 4154 nla_put_failure: 4155 nlmsg_cancel(skb, nlh); 4156 return -EMSGSIZE; 4157 } 4158 4159 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) 4160 { 4161 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4162 nla_total_size(dev->addr_len) + /* NDA_LLADDR */ 4163 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4164 0; 4165 } 4166 4167 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4168 u16 ndm_state) 4169 { 4170 struct net *net = dev_net(dev); 4171 struct sk_buff *skb; 4172 int err = -ENOBUFS; 4173 4174 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); 4175 if (!skb) 4176 goto errout; 4177 4178 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4179 0, 0, type, NTF_SELF, 0, ndm_state); 4180 if (err < 0) { 4181 kfree_skb(skb); 4182 goto errout; 4183 } 4184 4185 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4186 return; 4187 errout: 4188 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4189 } 4190 4191 /* 4192 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4193 */ 4194 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4195 struct nlattr *tb[], 4196 struct net_device *dev, 4197 const unsigned char *addr, u16 vid, 4198 u16 flags) 4199 { 4200 int err = -EINVAL; 4201 4202 /* If aging addresses are supported device will need to 4203 * implement its own handler for this. 4204 */ 4205 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4206 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4207 return err; 4208 } 4209 4210 if (tb[NDA_FLAGS_EXT]) { 4211 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4212 return err; 4213 } 4214 4215 if (vid) { 4216 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4217 return err; 4218 } 4219 4220 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4221 err = dev_uc_add_excl(dev, addr); 4222 else if (is_multicast_ether_addr(addr)) 4223 err = dev_mc_add_excl(dev, addr); 4224 4225 /* Only return duplicate errors if NLM_F_EXCL is set */ 4226 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4227 err = 0; 4228 4229 return err; 4230 } 4231 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4232 4233 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4234 struct netlink_ext_ack *extack) 4235 { 4236 u16 vid = 0; 4237 4238 if (vlan_attr) { 4239 if (nla_len(vlan_attr) != sizeof(u16)) { 4240 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4241 return -EINVAL; 4242 } 4243 4244 vid = nla_get_u16(vlan_attr); 4245 4246 if (!vid || vid >= VLAN_VID_MASK) { 4247 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4248 return -EINVAL; 4249 } 4250 } 4251 *p_vid = vid; 4252 return 0; 4253 } 4254 4255 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4256 struct netlink_ext_ack *extack) 4257 { 4258 struct net *net = sock_net(skb->sk); 4259 struct ndmsg *ndm; 4260 struct nlattr *tb[NDA_MAX+1]; 4261 struct net_device *dev; 4262 u8 *addr; 4263 u16 vid; 4264 int err; 4265 4266 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4267 extack); 4268 if (err < 0) 4269 return err; 4270 4271 ndm = nlmsg_data(nlh); 4272 if (ndm->ndm_ifindex == 0) { 4273 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4274 return -EINVAL; 4275 } 4276 4277 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4278 if (dev == NULL) { 4279 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4280 return -ENODEV; 4281 } 4282 4283 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4284 NL_SET_ERR_MSG(extack, "invalid address"); 4285 return -EINVAL; 4286 } 4287 4288 if (dev->type != ARPHRD_ETHER) { 4289 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4290 return -EINVAL; 4291 } 4292 4293 addr = nla_data(tb[NDA_LLADDR]); 4294 4295 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4296 if (err) 4297 return err; 4298 4299 err = -EOPNOTSUPP; 4300 4301 /* Support fdb on master device the net/bridge default case */ 4302 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4303 netif_is_bridge_port(dev)) { 4304 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4305 const struct net_device_ops *ops = br_dev->netdev_ops; 4306 4307 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4308 nlh->nlmsg_flags, extack); 4309 if (err) 4310 goto out; 4311 else 4312 ndm->ndm_flags &= ~NTF_MASTER; 4313 } 4314 4315 /* Embedded bridge, macvlan, and any other device support */ 4316 if ((ndm->ndm_flags & NTF_SELF)) { 4317 if (dev->netdev_ops->ndo_fdb_add) 4318 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4319 vid, 4320 nlh->nlmsg_flags, 4321 extack); 4322 else 4323 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4324 nlh->nlmsg_flags); 4325 4326 if (!err) { 4327 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4328 ndm->ndm_state); 4329 ndm->ndm_flags &= ~NTF_SELF; 4330 } 4331 } 4332 out: 4333 return err; 4334 } 4335 4336 /* 4337 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4338 */ 4339 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4340 struct nlattr *tb[], 4341 struct net_device *dev, 4342 const unsigned char *addr, u16 vid) 4343 { 4344 int err = -EINVAL; 4345 4346 /* If aging addresses are supported device will need to 4347 * implement its own handler for this. 4348 */ 4349 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4350 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4351 return err; 4352 } 4353 4354 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4355 err = dev_uc_del(dev, addr); 4356 else if (is_multicast_ether_addr(addr)) 4357 err = dev_mc_del(dev, addr); 4358 4359 return err; 4360 } 4361 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4362 4363 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = { 4364 [NDA_VLAN] = { .type = NLA_U16 }, 4365 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 4366 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, 4367 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, 4368 }; 4369 4370 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4371 struct netlink_ext_ack *extack) 4372 { 4373 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4374 struct net *net = sock_net(skb->sk); 4375 const struct net_device_ops *ops; 4376 struct ndmsg *ndm; 4377 struct nlattr *tb[NDA_MAX+1]; 4378 struct net_device *dev; 4379 __u8 *addr = NULL; 4380 int err; 4381 u16 vid; 4382 4383 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4384 return -EPERM; 4385 4386 if (!del_bulk) { 4387 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4388 NULL, extack); 4389 } else { 4390 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, 4391 fdb_del_bulk_policy, extack); 4392 } 4393 if (err < 0) 4394 return err; 4395 4396 ndm = nlmsg_data(nlh); 4397 if (ndm->ndm_ifindex == 0) { 4398 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4399 return -EINVAL; 4400 } 4401 4402 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4403 if (dev == NULL) { 4404 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4405 return -ENODEV; 4406 } 4407 4408 if (!del_bulk) { 4409 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4410 NL_SET_ERR_MSG(extack, "invalid address"); 4411 return -EINVAL; 4412 } 4413 addr = nla_data(tb[NDA_LLADDR]); 4414 } 4415 4416 if (dev->type != ARPHRD_ETHER) { 4417 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4418 return -EINVAL; 4419 } 4420 4421 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4422 if (err) 4423 return err; 4424 4425 err = -EOPNOTSUPP; 4426 4427 /* Support fdb on master device the net/bridge default case */ 4428 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4429 netif_is_bridge_port(dev)) { 4430 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4431 4432 ops = br_dev->netdev_ops; 4433 if (!del_bulk) { 4434 if (ops->ndo_fdb_del) 4435 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4436 } else { 4437 if (ops->ndo_fdb_del_bulk) 4438 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4439 extack); 4440 } 4441 4442 if (err) 4443 goto out; 4444 else 4445 ndm->ndm_flags &= ~NTF_MASTER; 4446 } 4447 4448 /* Embedded bridge, macvlan, and any other device support */ 4449 if (ndm->ndm_flags & NTF_SELF) { 4450 ops = dev->netdev_ops; 4451 if (!del_bulk) { 4452 if (ops->ndo_fdb_del) 4453 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4454 else 4455 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4456 } else { 4457 /* in case err was cleared by NTF_MASTER call */ 4458 err = -EOPNOTSUPP; 4459 if (ops->ndo_fdb_del_bulk) 4460 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4461 extack); 4462 } 4463 4464 if (!err) { 4465 if (!del_bulk) 4466 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4467 ndm->ndm_state); 4468 ndm->ndm_flags &= ~NTF_SELF; 4469 } 4470 } 4471 out: 4472 return err; 4473 } 4474 4475 static int nlmsg_populate_fdb(struct sk_buff *skb, 4476 struct netlink_callback *cb, 4477 struct net_device *dev, 4478 int *idx, 4479 struct netdev_hw_addr_list *list) 4480 { 4481 struct netdev_hw_addr *ha; 4482 int err; 4483 u32 portid, seq; 4484 4485 portid = NETLINK_CB(cb->skb).portid; 4486 seq = cb->nlh->nlmsg_seq; 4487 4488 list_for_each_entry(ha, &list->list, list) { 4489 if (*idx < cb->args[2]) 4490 goto skip; 4491 4492 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4493 portid, seq, 4494 RTM_NEWNEIGH, NTF_SELF, 4495 NLM_F_MULTI, NUD_PERMANENT); 4496 if (err < 0) 4497 return err; 4498 skip: 4499 *idx += 1; 4500 } 4501 return 0; 4502 } 4503 4504 /** 4505 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4506 * @skb: socket buffer to store message in 4507 * @cb: netlink callback 4508 * @dev: netdevice 4509 * @filter_dev: ignored 4510 * @idx: the number of FDB table entries dumped is added to *@idx 4511 * 4512 * Default netdevice operation to dump the existing unicast address list. 4513 * Returns number of addresses from list put in skb. 4514 */ 4515 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4516 struct netlink_callback *cb, 4517 struct net_device *dev, 4518 struct net_device *filter_dev, 4519 int *idx) 4520 { 4521 int err; 4522 4523 if (dev->type != ARPHRD_ETHER) 4524 return -EINVAL; 4525 4526 netif_addr_lock_bh(dev); 4527 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4528 if (err) 4529 goto out; 4530 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4531 out: 4532 netif_addr_unlock_bh(dev); 4533 return err; 4534 } 4535 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4536 4537 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4538 int *br_idx, int *brport_idx, 4539 struct netlink_ext_ack *extack) 4540 { 4541 struct nlattr *tb[NDA_MAX + 1]; 4542 struct ndmsg *ndm; 4543 int err, i; 4544 4545 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4546 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4547 return -EINVAL; 4548 } 4549 4550 ndm = nlmsg_data(nlh); 4551 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4552 ndm->ndm_flags || ndm->ndm_type) { 4553 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4554 return -EINVAL; 4555 } 4556 4557 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4558 NDA_MAX, NULL, extack); 4559 if (err < 0) 4560 return err; 4561 4562 *brport_idx = ndm->ndm_ifindex; 4563 for (i = 0; i <= NDA_MAX; ++i) { 4564 if (!tb[i]) 4565 continue; 4566 4567 switch (i) { 4568 case NDA_IFINDEX: 4569 if (nla_len(tb[i]) != sizeof(u32)) { 4570 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4571 return -EINVAL; 4572 } 4573 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4574 break; 4575 case NDA_MASTER: 4576 if (nla_len(tb[i]) != sizeof(u32)) { 4577 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4578 return -EINVAL; 4579 } 4580 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4581 break; 4582 default: 4583 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4584 return -EINVAL; 4585 } 4586 } 4587 4588 return 0; 4589 } 4590 4591 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4592 int *br_idx, int *brport_idx, 4593 struct netlink_ext_ack *extack) 4594 { 4595 struct nlattr *tb[IFLA_MAX+1]; 4596 int err; 4597 4598 /* A hack to preserve kernel<->userspace interface. 4599 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4600 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4601 * So, check for ndmsg with an optional u32 attribute (not used here). 4602 * Fortunately these sizes don't conflict with the size of ifinfomsg 4603 * with an optional attribute. 4604 */ 4605 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4606 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4607 nla_attr_size(sizeof(u32)))) { 4608 struct ifinfomsg *ifm; 4609 4610 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4611 tb, IFLA_MAX, ifla_policy, 4612 extack); 4613 if (err < 0) { 4614 return -EINVAL; 4615 } else if (err == 0) { 4616 if (tb[IFLA_MASTER]) 4617 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4618 } 4619 4620 ifm = nlmsg_data(nlh); 4621 *brport_idx = ifm->ifi_index; 4622 } 4623 return 0; 4624 } 4625 4626 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4627 { 4628 struct net_device *dev; 4629 struct net_device *br_dev = NULL; 4630 const struct net_device_ops *ops = NULL; 4631 const struct net_device_ops *cops = NULL; 4632 struct net *net = sock_net(skb->sk); 4633 struct hlist_head *head; 4634 int brport_idx = 0; 4635 int br_idx = 0; 4636 int h, s_h; 4637 int idx = 0, s_idx; 4638 int err = 0; 4639 int fidx = 0; 4640 4641 if (cb->strict_check) 4642 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4643 cb->extack); 4644 else 4645 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4646 cb->extack); 4647 if (err < 0) 4648 return err; 4649 4650 if (br_idx) { 4651 br_dev = __dev_get_by_index(net, br_idx); 4652 if (!br_dev) 4653 return -ENODEV; 4654 4655 ops = br_dev->netdev_ops; 4656 } 4657 4658 s_h = cb->args[0]; 4659 s_idx = cb->args[1]; 4660 4661 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4662 idx = 0; 4663 head = &net->dev_index_head[h]; 4664 hlist_for_each_entry(dev, head, index_hlist) { 4665 4666 if (brport_idx && (dev->ifindex != brport_idx)) 4667 continue; 4668 4669 if (!br_idx) { /* user did not specify a specific bridge */ 4670 if (netif_is_bridge_port(dev)) { 4671 br_dev = netdev_master_upper_dev_get(dev); 4672 cops = br_dev->netdev_ops; 4673 } 4674 } else { 4675 if (dev != br_dev && 4676 !netif_is_bridge_port(dev)) 4677 continue; 4678 4679 if (br_dev != netdev_master_upper_dev_get(dev) && 4680 !netif_is_bridge_master(dev)) 4681 continue; 4682 cops = ops; 4683 } 4684 4685 if (idx < s_idx) 4686 goto cont; 4687 4688 if (netif_is_bridge_port(dev)) { 4689 if (cops && cops->ndo_fdb_dump) { 4690 err = cops->ndo_fdb_dump(skb, cb, 4691 br_dev, dev, 4692 &fidx); 4693 if (err == -EMSGSIZE) 4694 goto out; 4695 } 4696 } 4697 4698 if (dev->netdev_ops->ndo_fdb_dump) 4699 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4700 dev, NULL, 4701 &fidx); 4702 else 4703 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4704 &fidx); 4705 if (err == -EMSGSIZE) 4706 goto out; 4707 4708 cops = NULL; 4709 4710 /* reset fdb offset to 0 for rest of the interfaces */ 4711 cb->args[2] = 0; 4712 fidx = 0; 4713 cont: 4714 idx++; 4715 } 4716 } 4717 4718 out: 4719 cb->args[0] = h; 4720 cb->args[1] = idx; 4721 cb->args[2] = fidx; 4722 4723 return skb->len; 4724 } 4725 4726 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4727 struct nlattr **tb, u8 *ndm_flags, 4728 int *br_idx, int *brport_idx, u8 **addr, 4729 u16 *vid, struct netlink_ext_ack *extack) 4730 { 4731 struct ndmsg *ndm; 4732 int err, i; 4733 4734 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4735 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4736 return -EINVAL; 4737 } 4738 4739 ndm = nlmsg_data(nlh); 4740 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4741 ndm->ndm_type) { 4742 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4743 return -EINVAL; 4744 } 4745 4746 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4747 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4748 return -EINVAL; 4749 } 4750 4751 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4752 NDA_MAX, nda_policy, extack); 4753 if (err < 0) 4754 return err; 4755 4756 *ndm_flags = ndm->ndm_flags; 4757 *brport_idx = ndm->ndm_ifindex; 4758 for (i = 0; i <= NDA_MAX; ++i) { 4759 if (!tb[i]) 4760 continue; 4761 4762 switch (i) { 4763 case NDA_MASTER: 4764 *br_idx = nla_get_u32(tb[i]); 4765 break; 4766 case NDA_LLADDR: 4767 if (nla_len(tb[i]) != ETH_ALEN) { 4768 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4769 return -EINVAL; 4770 } 4771 *addr = nla_data(tb[i]); 4772 break; 4773 case NDA_VLAN: 4774 err = fdb_vid_parse(tb[i], vid, extack); 4775 if (err) 4776 return err; 4777 break; 4778 case NDA_VNI: 4779 break; 4780 default: 4781 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4782 return -EINVAL; 4783 } 4784 } 4785 4786 return 0; 4787 } 4788 4789 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4790 struct netlink_ext_ack *extack) 4791 { 4792 struct net_device *dev = NULL, *br_dev = NULL; 4793 const struct net_device_ops *ops = NULL; 4794 struct net *net = sock_net(in_skb->sk); 4795 struct nlattr *tb[NDA_MAX + 1]; 4796 struct sk_buff *skb; 4797 int brport_idx = 0; 4798 u8 ndm_flags = 0; 4799 int br_idx = 0; 4800 u8 *addr = NULL; 4801 u16 vid = 0; 4802 int err; 4803 4804 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4805 &brport_idx, &addr, &vid, extack); 4806 if (err < 0) 4807 return err; 4808 4809 if (!addr) { 4810 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4811 return -EINVAL; 4812 } 4813 4814 if (brport_idx) { 4815 dev = __dev_get_by_index(net, brport_idx); 4816 if (!dev) { 4817 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4818 return -ENODEV; 4819 } 4820 } 4821 4822 if (br_idx) { 4823 if (dev) { 4824 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4825 return -EINVAL; 4826 } 4827 4828 br_dev = __dev_get_by_index(net, br_idx); 4829 if (!br_dev) { 4830 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4831 return -EINVAL; 4832 } 4833 ops = br_dev->netdev_ops; 4834 } 4835 4836 if (dev) { 4837 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4838 if (!netif_is_bridge_port(dev)) { 4839 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4840 return -EINVAL; 4841 } 4842 br_dev = netdev_master_upper_dev_get(dev); 4843 if (!br_dev) { 4844 NL_SET_ERR_MSG(extack, "Master of device not found"); 4845 return -EINVAL; 4846 } 4847 ops = br_dev->netdev_ops; 4848 } else { 4849 if (!(ndm_flags & NTF_SELF)) { 4850 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4851 return -EINVAL; 4852 } 4853 ops = dev->netdev_ops; 4854 } 4855 } 4856 4857 if (!br_dev && !dev) { 4858 NL_SET_ERR_MSG(extack, "No device specified"); 4859 return -ENODEV; 4860 } 4861 4862 if (!ops || !ops->ndo_fdb_get) { 4863 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4864 return -EOPNOTSUPP; 4865 } 4866 4867 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4868 if (!skb) 4869 return -ENOBUFS; 4870 4871 if (br_dev) 4872 dev = br_dev; 4873 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4874 NETLINK_CB(in_skb).portid, 4875 nlh->nlmsg_seq, extack); 4876 if (err) 4877 goto out; 4878 4879 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4880 out: 4881 kfree_skb(skb); 4882 return err; 4883 } 4884 4885 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4886 unsigned int attrnum, unsigned int flag) 4887 { 4888 if (mask & flag) 4889 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4890 return 0; 4891 } 4892 4893 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4894 struct net_device *dev, u16 mode, 4895 u32 flags, u32 mask, int nlflags, 4896 u32 filter_mask, 4897 int (*vlan_fill)(struct sk_buff *skb, 4898 struct net_device *dev, 4899 u32 filter_mask)) 4900 { 4901 struct nlmsghdr *nlh; 4902 struct ifinfomsg *ifm; 4903 struct nlattr *br_afspec; 4904 struct nlattr *protinfo; 4905 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4906 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4907 int err = 0; 4908 4909 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4910 if (nlh == NULL) 4911 return -EMSGSIZE; 4912 4913 ifm = nlmsg_data(nlh); 4914 ifm->ifi_family = AF_BRIDGE; 4915 ifm->__ifi_pad = 0; 4916 ifm->ifi_type = dev->type; 4917 ifm->ifi_index = dev->ifindex; 4918 ifm->ifi_flags = dev_get_flags(dev); 4919 ifm->ifi_change = 0; 4920 4921 4922 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4923 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4924 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4925 (br_dev && 4926 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4927 (dev->addr_len && 4928 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4929 (dev->ifindex != dev_get_iflink(dev) && 4930 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4931 goto nla_put_failure; 4932 4933 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4934 if (!br_afspec) 4935 goto nla_put_failure; 4936 4937 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4938 nla_nest_cancel(skb, br_afspec); 4939 goto nla_put_failure; 4940 } 4941 4942 if (mode != BRIDGE_MODE_UNDEF) { 4943 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4944 nla_nest_cancel(skb, br_afspec); 4945 goto nla_put_failure; 4946 } 4947 } 4948 if (vlan_fill) { 4949 err = vlan_fill(skb, dev, filter_mask); 4950 if (err) { 4951 nla_nest_cancel(skb, br_afspec); 4952 goto nla_put_failure; 4953 } 4954 } 4955 nla_nest_end(skb, br_afspec); 4956 4957 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4958 if (!protinfo) 4959 goto nla_put_failure; 4960 4961 if (brport_nla_put_flag(skb, flags, mask, 4962 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4963 brport_nla_put_flag(skb, flags, mask, 4964 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4965 brport_nla_put_flag(skb, flags, mask, 4966 IFLA_BRPORT_FAST_LEAVE, 4967 BR_MULTICAST_FAST_LEAVE) || 4968 brport_nla_put_flag(skb, flags, mask, 4969 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4970 brport_nla_put_flag(skb, flags, mask, 4971 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4972 brport_nla_put_flag(skb, flags, mask, 4973 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4974 brport_nla_put_flag(skb, flags, mask, 4975 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4976 brport_nla_put_flag(skb, flags, mask, 4977 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4978 brport_nla_put_flag(skb, flags, mask, 4979 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4980 brport_nla_put_flag(skb, flags, mask, 4981 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4982 nla_nest_cancel(skb, protinfo); 4983 goto nla_put_failure; 4984 } 4985 4986 nla_nest_end(skb, protinfo); 4987 4988 nlmsg_end(skb, nlh); 4989 return 0; 4990 nla_put_failure: 4991 nlmsg_cancel(skb, nlh); 4992 return err ? err : -EMSGSIZE; 4993 } 4994 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4995 4996 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4997 bool strict_check, u32 *filter_mask, 4998 struct netlink_ext_ack *extack) 4999 { 5000 struct nlattr *tb[IFLA_MAX+1]; 5001 int err, i; 5002 5003 if (strict_check) { 5004 struct ifinfomsg *ifm; 5005 5006 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 5007 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 5008 return -EINVAL; 5009 } 5010 5011 ifm = nlmsg_data(nlh); 5012 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 5013 ifm->ifi_change || ifm->ifi_index) { 5014 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 5015 return -EINVAL; 5016 } 5017 5018 err = nlmsg_parse_deprecated_strict(nlh, 5019 sizeof(struct ifinfomsg), 5020 tb, IFLA_MAX, ifla_policy, 5021 extack); 5022 } else { 5023 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 5024 tb, IFLA_MAX, ifla_policy, 5025 extack); 5026 } 5027 if (err < 0) 5028 return err; 5029 5030 /* new attributes should only be added with strict checking */ 5031 for (i = 0; i <= IFLA_MAX; ++i) { 5032 if (!tb[i]) 5033 continue; 5034 5035 switch (i) { 5036 case IFLA_EXT_MASK: 5037 *filter_mask = nla_get_u32(tb[i]); 5038 break; 5039 default: 5040 if (strict_check) { 5041 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 5042 return -EINVAL; 5043 } 5044 } 5045 } 5046 5047 return 0; 5048 } 5049 5050 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 5051 { 5052 const struct nlmsghdr *nlh = cb->nlh; 5053 struct net *net = sock_net(skb->sk); 5054 struct net_device *dev; 5055 int idx = 0; 5056 u32 portid = NETLINK_CB(cb->skb).portid; 5057 u32 seq = nlh->nlmsg_seq; 5058 u32 filter_mask = 0; 5059 int err; 5060 5061 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 5062 cb->extack); 5063 if (err < 0 && cb->strict_check) 5064 return err; 5065 5066 rcu_read_lock(); 5067 for_each_netdev_rcu(net, dev) { 5068 const struct net_device_ops *ops = dev->netdev_ops; 5069 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5070 5071 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 5072 if (idx >= cb->args[0]) { 5073 err = br_dev->netdev_ops->ndo_bridge_getlink( 5074 skb, portid, seq, dev, 5075 filter_mask, NLM_F_MULTI); 5076 if (err < 0 && err != -EOPNOTSUPP) { 5077 if (likely(skb->len)) 5078 break; 5079 5080 goto out_err; 5081 } 5082 } 5083 idx++; 5084 } 5085 5086 if (ops->ndo_bridge_getlink) { 5087 if (idx >= cb->args[0]) { 5088 err = ops->ndo_bridge_getlink(skb, portid, 5089 seq, dev, 5090 filter_mask, 5091 NLM_F_MULTI); 5092 if (err < 0 && err != -EOPNOTSUPP) { 5093 if (likely(skb->len)) 5094 break; 5095 5096 goto out_err; 5097 } 5098 } 5099 idx++; 5100 } 5101 } 5102 err = skb->len; 5103 out_err: 5104 rcu_read_unlock(); 5105 cb->args[0] = idx; 5106 5107 return err; 5108 } 5109 5110 static inline size_t bridge_nlmsg_size(void) 5111 { 5112 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5113 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5114 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5115 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 5116 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5117 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5118 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5119 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5120 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5121 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5122 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5123 } 5124 5125 static int rtnl_bridge_notify(struct net_device *dev) 5126 { 5127 struct net *net = dev_net(dev); 5128 struct sk_buff *skb; 5129 int err = -EOPNOTSUPP; 5130 5131 if (!dev->netdev_ops->ndo_bridge_getlink) 5132 return 0; 5133 5134 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5135 if (!skb) { 5136 err = -ENOMEM; 5137 goto errout; 5138 } 5139 5140 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5141 if (err < 0) 5142 goto errout; 5143 5144 /* Notification info is only filled for bridge ports, not the bridge 5145 * device itself. Therefore, a zero notification length is valid and 5146 * should not result in an error. 5147 */ 5148 if (!skb->len) 5149 goto errout; 5150 5151 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5152 return 0; 5153 errout: 5154 WARN_ON(err == -EMSGSIZE); 5155 kfree_skb(skb); 5156 if (err) 5157 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5158 return err; 5159 } 5160 5161 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5162 struct netlink_ext_ack *extack) 5163 { 5164 struct net *net = sock_net(skb->sk); 5165 struct ifinfomsg *ifm; 5166 struct net_device *dev; 5167 struct nlattr *br_spec, *attr, *br_flags_attr = NULL; 5168 int rem, err = -EOPNOTSUPP; 5169 u16 flags = 0; 5170 5171 if (nlmsg_len(nlh) < sizeof(*ifm)) 5172 return -EINVAL; 5173 5174 ifm = nlmsg_data(nlh); 5175 if (ifm->ifi_family != AF_BRIDGE) 5176 return -EPFNOSUPPORT; 5177 5178 dev = __dev_get_by_index(net, ifm->ifi_index); 5179 if (!dev) { 5180 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5181 return -ENODEV; 5182 } 5183 5184 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5185 if (br_spec) { 5186 nla_for_each_nested(attr, br_spec, rem) { 5187 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) { 5188 if (nla_len(attr) < sizeof(flags)) 5189 return -EINVAL; 5190 5191 br_flags_attr = attr; 5192 flags = nla_get_u16(attr); 5193 } 5194 5195 if (nla_type(attr) == IFLA_BRIDGE_MODE) { 5196 if (nla_len(attr) < sizeof(u16)) 5197 return -EINVAL; 5198 } 5199 } 5200 } 5201 5202 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5203 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5204 5205 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5206 err = -EOPNOTSUPP; 5207 goto out; 5208 } 5209 5210 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5211 extack); 5212 if (err) 5213 goto out; 5214 5215 flags &= ~BRIDGE_FLAGS_MASTER; 5216 } 5217 5218 if ((flags & BRIDGE_FLAGS_SELF)) { 5219 if (!dev->netdev_ops->ndo_bridge_setlink) 5220 err = -EOPNOTSUPP; 5221 else 5222 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5223 flags, 5224 extack); 5225 if (!err) { 5226 flags &= ~BRIDGE_FLAGS_SELF; 5227 5228 /* Generate event to notify upper layer of bridge 5229 * change 5230 */ 5231 err = rtnl_bridge_notify(dev); 5232 } 5233 } 5234 5235 if (br_flags_attr) 5236 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags)); 5237 out: 5238 return err; 5239 } 5240 5241 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5242 struct netlink_ext_ack *extack) 5243 { 5244 struct net *net = sock_net(skb->sk); 5245 struct ifinfomsg *ifm; 5246 struct net_device *dev; 5247 struct nlattr *br_spec, *attr = NULL; 5248 int rem, err = -EOPNOTSUPP; 5249 u16 flags = 0; 5250 bool have_flags = false; 5251 5252 if (nlmsg_len(nlh) < sizeof(*ifm)) 5253 return -EINVAL; 5254 5255 ifm = nlmsg_data(nlh); 5256 if (ifm->ifi_family != AF_BRIDGE) 5257 return -EPFNOSUPPORT; 5258 5259 dev = __dev_get_by_index(net, ifm->ifi_index); 5260 if (!dev) { 5261 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5262 return -ENODEV; 5263 } 5264 5265 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5266 if (br_spec) { 5267 nla_for_each_nested(attr, br_spec, rem) { 5268 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5269 if (nla_len(attr) < sizeof(flags)) 5270 return -EINVAL; 5271 5272 have_flags = true; 5273 flags = nla_get_u16(attr); 5274 break; 5275 } 5276 } 5277 } 5278 5279 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5280 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5281 5282 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5283 err = -EOPNOTSUPP; 5284 goto out; 5285 } 5286 5287 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5288 if (err) 5289 goto out; 5290 5291 flags &= ~BRIDGE_FLAGS_MASTER; 5292 } 5293 5294 if ((flags & BRIDGE_FLAGS_SELF)) { 5295 if (!dev->netdev_ops->ndo_bridge_dellink) 5296 err = -EOPNOTSUPP; 5297 else 5298 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5299 flags); 5300 5301 if (!err) { 5302 flags &= ~BRIDGE_FLAGS_SELF; 5303 5304 /* Generate event to notify upper layer of bridge 5305 * change 5306 */ 5307 err = rtnl_bridge_notify(dev); 5308 } 5309 } 5310 5311 if (have_flags) 5312 memcpy(nla_data(attr), &flags, sizeof(flags)); 5313 out: 5314 return err; 5315 } 5316 5317 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5318 { 5319 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5320 (!idxattr || idxattr == attrid); 5321 } 5322 5323 static bool 5324 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5325 { 5326 return dev->netdev_ops && 5327 dev->netdev_ops->ndo_has_offload_stats && 5328 dev->netdev_ops->ndo_get_offload_stats && 5329 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5330 } 5331 5332 static unsigned int 5333 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5334 { 5335 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5336 sizeof(struct rtnl_link_stats64) : 0; 5337 } 5338 5339 static int 5340 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5341 struct sk_buff *skb) 5342 { 5343 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5344 struct nlattr *attr = NULL; 5345 void *attr_data; 5346 int err; 5347 5348 if (!size) 5349 return -ENODATA; 5350 5351 attr = nla_reserve_64bit(skb, attr_id, size, 5352 IFLA_OFFLOAD_XSTATS_UNSPEC); 5353 if (!attr) 5354 return -EMSGSIZE; 5355 5356 attr_data = nla_data(attr); 5357 memset(attr_data, 0, size); 5358 5359 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5360 if (err) 5361 return err; 5362 5363 return 0; 5364 } 5365 5366 static unsigned int 5367 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5368 enum netdev_offload_xstats_type type) 5369 { 5370 bool enabled = netdev_offload_xstats_enabled(dev, type); 5371 5372 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5373 } 5374 5375 struct rtnl_offload_xstats_request_used { 5376 bool request; 5377 bool used; 5378 }; 5379 5380 static int 5381 rtnl_offload_xstats_get_stats(struct net_device *dev, 5382 enum netdev_offload_xstats_type type, 5383 struct rtnl_offload_xstats_request_used *ru, 5384 struct rtnl_hw_stats64 *stats, 5385 struct netlink_ext_ack *extack) 5386 { 5387 bool request; 5388 bool used; 5389 int err; 5390 5391 request = netdev_offload_xstats_enabled(dev, type); 5392 if (!request) { 5393 used = false; 5394 goto out; 5395 } 5396 5397 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5398 if (err) 5399 return err; 5400 5401 out: 5402 if (ru) { 5403 ru->request = request; 5404 ru->used = used; 5405 } 5406 return 0; 5407 } 5408 5409 static int 5410 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5411 struct rtnl_offload_xstats_request_used *ru) 5412 { 5413 struct nlattr *nest; 5414 5415 nest = nla_nest_start(skb, attr_id); 5416 if (!nest) 5417 return -EMSGSIZE; 5418 5419 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5420 goto nla_put_failure; 5421 5422 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5423 goto nla_put_failure; 5424 5425 nla_nest_end(skb, nest); 5426 return 0; 5427 5428 nla_put_failure: 5429 nla_nest_cancel(skb, nest); 5430 return -EMSGSIZE; 5431 } 5432 5433 static int 5434 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5435 struct netlink_ext_ack *extack) 5436 { 5437 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5438 struct rtnl_offload_xstats_request_used ru_l3; 5439 struct nlattr *nest; 5440 int err; 5441 5442 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5443 if (err) 5444 return err; 5445 5446 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5447 if (!nest) 5448 return -EMSGSIZE; 5449 5450 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5451 IFLA_OFFLOAD_XSTATS_L3_STATS, 5452 &ru_l3)) 5453 goto nla_put_failure; 5454 5455 nla_nest_end(skb, nest); 5456 return 0; 5457 5458 nla_put_failure: 5459 nla_nest_cancel(skb, nest); 5460 return -EMSGSIZE; 5461 } 5462 5463 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5464 int *prividx, u32 off_filter_mask, 5465 struct netlink_ext_ack *extack) 5466 { 5467 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5468 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5469 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5470 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5471 bool have_data = false; 5472 int err; 5473 5474 if (*prividx <= attr_id_cpu_hit && 5475 (off_filter_mask & 5476 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5477 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5478 if (!err) { 5479 have_data = true; 5480 } else if (err != -ENODATA) { 5481 *prividx = attr_id_cpu_hit; 5482 return err; 5483 } 5484 } 5485 5486 if (*prividx <= attr_id_hw_s_info && 5487 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5488 *prividx = attr_id_hw_s_info; 5489 5490 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5491 if (err) 5492 return err; 5493 5494 have_data = true; 5495 *prividx = 0; 5496 } 5497 5498 if (*prividx <= attr_id_l3_stats && 5499 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5500 unsigned int size_l3; 5501 struct nlattr *attr; 5502 5503 *prividx = attr_id_l3_stats; 5504 5505 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5506 if (!size_l3) 5507 goto skip_l3_stats; 5508 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5509 IFLA_OFFLOAD_XSTATS_UNSPEC); 5510 if (!attr) 5511 return -EMSGSIZE; 5512 5513 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5514 nla_data(attr), extack); 5515 if (err) 5516 return err; 5517 5518 have_data = true; 5519 skip_l3_stats: 5520 *prividx = 0; 5521 } 5522 5523 if (!have_data) 5524 return -ENODATA; 5525 5526 *prividx = 0; 5527 return 0; 5528 } 5529 5530 static unsigned int 5531 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5532 enum netdev_offload_xstats_type type) 5533 { 5534 return nla_total_size(0) + 5535 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5536 nla_total_size(sizeof(u8)) + 5537 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5538 nla_total_size(sizeof(u8)) + 5539 0; 5540 } 5541 5542 static unsigned int 5543 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5544 { 5545 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5546 5547 return nla_total_size(0) + 5548 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5549 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5550 0; 5551 } 5552 5553 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5554 u32 off_filter_mask) 5555 { 5556 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5557 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5558 int nla_size = 0; 5559 int size; 5560 5561 if (off_filter_mask & 5562 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5563 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5564 nla_size += nla_total_size_64bit(size); 5565 } 5566 5567 if (off_filter_mask & 5568 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5569 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5570 5571 if (off_filter_mask & 5572 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5573 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5574 nla_size += nla_total_size_64bit(size); 5575 } 5576 5577 if (nla_size != 0) 5578 nla_size += nla_total_size(0); 5579 5580 return nla_size; 5581 } 5582 5583 struct rtnl_stats_dump_filters { 5584 /* mask[0] filters outer attributes. Then individual nests have their 5585 * filtering mask at the index of the nested attribute. 5586 */ 5587 u32 mask[IFLA_STATS_MAX + 1]; 5588 }; 5589 5590 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5591 int type, u32 pid, u32 seq, u32 change, 5592 unsigned int flags, 5593 const struct rtnl_stats_dump_filters *filters, 5594 int *idxattr, int *prividx, 5595 struct netlink_ext_ack *extack) 5596 { 5597 unsigned int filter_mask = filters->mask[0]; 5598 struct if_stats_msg *ifsm; 5599 struct nlmsghdr *nlh; 5600 struct nlattr *attr; 5601 int s_prividx = *prividx; 5602 int err; 5603 5604 ASSERT_RTNL(); 5605 5606 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5607 if (!nlh) 5608 return -EMSGSIZE; 5609 5610 ifsm = nlmsg_data(nlh); 5611 ifsm->family = PF_UNSPEC; 5612 ifsm->pad1 = 0; 5613 ifsm->pad2 = 0; 5614 ifsm->ifindex = dev->ifindex; 5615 ifsm->filter_mask = filter_mask; 5616 5617 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5618 struct rtnl_link_stats64 *sp; 5619 5620 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5621 sizeof(struct rtnl_link_stats64), 5622 IFLA_STATS_UNSPEC); 5623 if (!attr) { 5624 err = -EMSGSIZE; 5625 goto nla_put_failure; 5626 } 5627 5628 sp = nla_data(attr); 5629 dev_get_stats(dev, sp); 5630 } 5631 5632 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5633 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5634 5635 if (ops && ops->fill_linkxstats) { 5636 *idxattr = IFLA_STATS_LINK_XSTATS; 5637 attr = nla_nest_start_noflag(skb, 5638 IFLA_STATS_LINK_XSTATS); 5639 if (!attr) { 5640 err = -EMSGSIZE; 5641 goto nla_put_failure; 5642 } 5643 5644 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5645 nla_nest_end(skb, attr); 5646 if (err) 5647 goto nla_put_failure; 5648 *idxattr = 0; 5649 } 5650 } 5651 5652 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5653 *idxattr)) { 5654 const struct rtnl_link_ops *ops = NULL; 5655 const struct net_device *master; 5656 5657 master = netdev_master_upper_dev_get(dev); 5658 if (master) 5659 ops = master->rtnl_link_ops; 5660 if (ops && ops->fill_linkxstats) { 5661 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5662 attr = nla_nest_start_noflag(skb, 5663 IFLA_STATS_LINK_XSTATS_SLAVE); 5664 if (!attr) { 5665 err = -EMSGSIZE; 5666 goto nla_put_failure; 5667 } 5668 5669 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5670 nla_nest_end(skb, attr); 5671 if (err) 5672 goto nla_put_failure; 5673 *idxattr = 0; 5674 } 5675 } 5676 5677 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5678 *idxattr)) { 5679 u32 off_filter_mask; 5680 5681 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5682 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5683 attr = nla_nest_start_noflag(skb, 5684 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5685 if (!attr) { 5686 err = -EMSGSIZE; 5687 goto nla_put_failure; 5688 } 5689 5690 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5691 off_filter_mask, extack); 5692 if (err == -ENODATA) 5693 nla_nest_cancel(skb, attr); 5694 else 5695 nla_nest_end(skb, attr); 5696 5697 if (err && err != -ENODATA) 5698 goto nla_put_failure; 5699 *idxattr = 0; 5700 } 5701 5702 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5703 struct rtnl_af_ops *af_ops; 5704 5705 *idxattr = IFLA_STATS_AF_SPEC; 5706 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5707 if (!attr) { 5708 err = -EMSGSIZE; 5709 goto nla_put_failure; 5710 } 5711 5712 rcu_read_lock(); 5713 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5714 if (af_ops->fill_stats_af) { 5715 struct nlattr *af; 5716 5717 af = nla_nest_start_noflag(skb, 5718 af_ops->family); 5719 if (!af) { 5720 rcu_read_unlock(); 5721 err = -EMSGSIZE; 5722 goto nla_put_failure; 5723 } 5724 err = af_ops->fill_stats_af(skb, dev); 5725 5726 if (err == -ENODATA) { 5727 nla_nest_cancel(skb, af); 5728 } else if (err < 0) { 5729 rcu_read_unlock(); 5730 goto nla_put_failure; 5731 } 5732 5733 nla_nest_end(skb, af); 5734 } 5735 } 5736 rcu_read_unlock(); 5737 5738 nla_nest_end(skb, attr); 5739 5740 *idxattr = 0; 5741 } 5742 5743 nlmsg_end(skb, nlh); 5744 5745 return 0; 5746 5747 nla_put_failure: 5748 /* not a multi message or no progress mean a real error */ 5749 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5750 nlmsg_cancel(skb, nlh); 5751 else 5752 nlmsg_end(skb, nlh); 5753 5754 return err; 5755 } 5756 5757 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5758 const struct rtnl_stats_dump_filters *filters) 5759 { 5760 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5761 unsigned int filter_mask = filters->mask[0]; 5762 5763 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5764 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5765 5766 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5767 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5768 int attr = IFLA_STATS_LINK_XSTATS; 5769 5770 if (ops && ops->get_linkxstats_size) { 5771 size += nla_total_size(ops->get_linkxstats_size(dev, 5772 attr)); 5773 /* for IFLA_STATS_LINK_XSTATS */ 5774 size += nla_total_size(0); 5775 } 5776 } 5777 5778 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5779 struct net_device *_dev = (struct net_device *)dev; 5780 const struct rtnl_link_ops *ops = NULL; 5781 const struct net_device *master; 5782 5783 /* netdev_master_upper_dev_get can't take const */ 5784 master = netdev_master_upper_dev_get(_dev); 5785 if (master) 5786 ops = master->rtnl_link_ops; 5787 if (ops && ops->get_linkxstats_size) { 5788 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5789 5790 size += nla_total_size(ops->get_linkxstats_size(dev, 5791 attr)); 5792 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5793 size += nla_total_size(0); 5794 } 5795 } 5796 5797 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5798 u32 off_filter_mask; 5799 5800 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5801 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5802 } 5803 5804 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5805 struct rtnl_af_ops *af_ops; 5806 5807 /* for IFLA_STATS_AF_SPEC */ 5808 size += nla_total_size(0); 5809 5810 rcu_read_lock(); 5811 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5812 if (af_ops->get_stats_af_size) { 5813 size += nla_total_size( 5814 af_ops->get_stats_af_size(dev)); 5815 5816 /* for AF_* */ 5817 size += nla_total_size(0); 5818 } 5819 } 5820 rcu_read_unlock(); 5821 } 5822 5823 return size; 5824 } 5825 5826 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5827 5828 static const struct nla_policy 5829 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5830 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5831 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5832 }; 5833 5834 static const struct nla_policy 5835 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5836 [IFLA_STATS_GET_FILTERS] = 5837 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5838 }; 5839 5840 static const struct nla_policy 5841 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5842 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5843 }; 5844 5845 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5846 struct rtnl_stats_dump_filters *filters, 5847 struct netlink_ext_ack *extack) 5848 { 5849 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5850 int err; 5851 int at; 5852 5853 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5854 rtnl_stats_get_policy_filters, extack); 5855 if (err < 0) 5856 return err; 5857 5858 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5859 if (tb[at]) { 5860 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5861 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5862 return -EINVAL; 5863 } 5864 filters->mask[at] = nla_get_u32(tb[at]); 5865 } 5866 } 5867 5868 return 0; 5869 } 5870 5871 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5872 u32 filter_mask, 5873 struct rtnl_stats_dump_filters *filters, 5874 struct netlink_ext_ack *extack) 5875 { 5876 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5877 int err; 5878 int i; 5879 5880 filters->mask[0] = filter_mask; 5881 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5882 filters->mask[i] = -1U; 5883 5884 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5885 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5886 if (err < 0) 5887 return err; 5888 5889 if (tb[IFLA_STATS_GET_FILTERS]) { 5890 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5891 filters, extack); 5892 if (err) 5893 return err; 5894 } 5895 5896 return 0; 5897 } 5898 5899 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5900 bool is_dump, struct netlink_ext_ack *extack) 5901 { 5902 struct if_stats_msg *ifsm; 5903 5904 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5905 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5906 return -EINVAL; 5907 } 5908 5909 if (!strict_check) 5910 return 0; 5911 5912 ifsm = nlmsg_data(nlh); 5913 5914 /* only requests using strict checks can pass data to influence 5915 * the dump. The legacy exception is filter_mask. 5916 */ 5917 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5918 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5919 return -EINVAL; 5920 } 5921 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5922 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5923 return -EINVAL; 5924 } 5925 5926 return 0; 5927 } 5928 5929 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5930 struct netlink_ext_ack *extack) 5931 { 5932 struct rtnl_stats_dump_filters filters; 5933 struct net *net = sock_net(skb->sk); 5934 struct net_device *dev = NULL; 5935 int idxattr = 0, prividx = 0; 5936 struct if_stats_msg *ifsm; 5937 struct sk_buff *nskb; 5938 int err; 5939 5940 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5941 false, extack); 5942 if (err) 5943 return err; 5944 5945 ifsm = nlmsg_data(nlh); 5946 if (ifsm->ifindex > 0) 5947 dev = __dev_get_by_index(net, ifsm->ifindex); 5948 else 5949 return -EINVAL; 5950 5951 if (!dev) 5952 return -ENODEV; 5953 5954 if (!ifsm->filter_mask) { 5955 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5956 return -EINVAL; 5957 } 5958 5959 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5960 if (err) 5961 return err; 5962 5963 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5964 if (!nskb) 5965 return -ENOBUFS; 5966 5967 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5968 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5969 0, &filters, &idxattr, &prividx, extack); 5970 if (err < 0) { 5971 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5972 WARN_ON(err == -EMSGSIZE); 5973 kfree_skb(nskb); 5974 } else { 5975 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5976 } 5977 5978 return err; 5979 } 5980 5981 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5982 { 5983 struct netlink_ext_ack *extack = cb->extack; 5984 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5985 struct rtnl_stats_dump_filters filters; 5986 struct net *net = sock_net(skb->sk); 5987 unsigned int flags = NLM_F_MULTI; 5988 struct if_stats_msg *ifsm; 5989 struct hlist_head *head; 5990 struct net_device *dev; 5991 int idx = 0; 5992 5993 s_h = cb->args[0]; 5994 s_idx = cb->args[1]; 5995 s_idxattr = cb->args[2]; 5996 s_prividx = cb->args[3]; 5997 5998 cb->seq = net->dev_base_seq; 5999 6000 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 6001 if (err) 6002 return err; 6003 6004 ifsm = nlmsg_data(cb->nlh); 6005 if (!ifsm->filter_mask) { 6006 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 6007 return -EINVAL; 6008 } 6009 6010 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 6011 extack); 6012 if (err) 6013 return err; 6014 6015 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 6016 idx = 0; 6017 head = &net->dev_index_head[h]; 6018 hlist_for_each_entry(dev, head, index_hlist) { 6019 if (idx < s_idx) 6020 goto cont; 6021 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 6022 NETLINK_CB(cb->skb).portid, 6023 cb->nlh->nlmsg_seq, 0, 6024 flags, &filters, 6025 &s_idxattr, &s_prividx, 6026 extack); 6027 /* If we ran out of room on the first message, 6028 * we're in trouble 6029 */ 6030 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 6031 6032 if (err < 0) 6033 goto out; 6034 s_prividx = 0; 6035 s_idxattr = 0; 6036 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 6037 cont: 6038 idx++; 6039 } 6040 } 6041 out: 6042 cb->args[3] = s_prividx; 6043 cb->args[2] = s_idxattr; 6044 cb->args[1] = idx; 6045 cb->args[0] = h; 6046 6047 return skb->len; 6048 } 6049 6050 void rtnl_offload_xstats_notify(struct net_device *dev) 6051 { 6052 struct rtnl_stats_dump_filters response_filters = {}; 6053 struct net *net = dev_net(dev); 6054 int idxattr = 0, prividx = 0; 6055 struct sk_buff *skb; 6056 int err = -ENOBUFS; 6057 6058 ASSERT_RTNL(); 6059 6060 response_filters.mask[0] |= 6061 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6062 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6063 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6064 6065 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 6066 GFP_KERNEL); 6067 if (!skb) 6068 goto errout; 6069 6070 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 6071 &response_filters, &idxattr, &prividx, NULL); 6072 if (err < 0) { 6073 kfree_skb(skb); 6074 goto errout; 6075 } 6076 6077 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 6078 return; 6079 6080 errout: 6081 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 6082 } 6083 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 6084 6085 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 6086 struct netlink_ext_ack *extack) 6087 { 6088 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 6089 struct rtnl_stats_dump_filters response_filters = {}; 6090 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 6091 struct net *net = sock_net(skb->sk); 6092 struct net_device *dev = NULL; 6093 struct if_stats_msg *ifsm; 6094 bool notify = false; 6095 int err; 6096 6097 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6098 false, extack); 6099 if (err) 6100 return err; 6101 6102 ifsm = nlmsg_data(nlh); 6103 if (ifsm->family != AF_UNSPEC) { 6104 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 6105 return -EINVAL; 6106 } 6107 6108 if (ifsm->ifindex > 0) 6109 dev = __dev_get_by_index(net, ifsm->ifindex); 6110 else 6111 return -EINVAL; 6112 6113 if (!dev) 6114 return -ENODEV; 6115 6116 if (ifsm->filter_mask) { 6117 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6118 return -EINVAL; 6119 } 6120 6121 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6122 ifla_stats_set_policy, extack); 6123 if (err < 0) 6124 return err; 6125 6126 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6127 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6128 6129 if (req) 6130 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6131 else 6132 err = netdev_offload_xstats_disable(dev, t_l3); 6133 6134 if (!err) 6135 notify = true; 6136 else if (err != -EALREADY) 6137 return err; 6138 6139 response_filters.mask[0] |= 6140 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6141 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6142 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6143 } 6144 6145 if (notify) 6146 rtnl_offload_xstats_notify(dev); 6147 6148 return 0; 6149 } 6150 6151 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, 6152 struct netlink_ext_ack *extack) 6153 { 6154 struct br_port_msg *bpm; 6155 6156 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 6157 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request"); 6158 return -EINVAL; 6159 } 6160 6161 bpm = nlmsg_data(nlh); 6162 if (bpm->ifindex) { 6163 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request"); 6164 return -EINVAL; 6165 } 6166 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 6167 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 6168 return -EINVAL; 6169 } 6170 6171 return 0; 6172 } 6173 6174 struct rtnl_mdb_dump_ctx { 6175 long idx; 6176 }; 6177 6178 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 6179 { 6180 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; 6181 struct net *net = sock_net(skb->sk); 6182 struct net_device *dev; 6183 int idx, s_idx; 6184 int err; 6185 6186 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx); 6187 6188 if (cb->strict_check) { 6189 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); 6190 if (err) 6191 return err; 6192 } 6193 6194 s_idx = ctx->idx; 6195 idx = 0; 6196 6197 for_each_netdev(net, dev) { 6198 if (idx < s_idx) 6199 goto skip; 6200 if (!dev->netdev_ops->ndo_mdb_dump) 6201 goto skip; 6202 6203 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); 6204 if (err == -EMSGSIZE) 6205 goto out; 6206 /* Moving on to next device, reset markers and sequence 6207 * counters since they are all maintained per-device. 6208 */ 6209 memset(cb->ctx, 0, sizeof(cb->ctx)); 6210 cb->prev_seq = 0; 6211 cb->seq = 0; 6212 skip: 6213 idx++; 6214 } 6215 6216 out: 6217 ctx->idx = idx; 6218 return skb->len; 6219 } 6220 6221 static int rtnl_validate_mdb_entry(const struct nlattr *attr, 6222 struct netlink_ext_ack *extack) 6223 { 6224 struct br_mdb_entry *entry = nla_data(attr); 6225 6226 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6227 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6228 return -EINVAL; 6229 } 6230 6231 if (entry->ifindex == 0) { 6232 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed"); 6233 return -EINVAL; 6234 } 6235 6236 if (entry->addr.proto == htons(ETH_P_IP)) { 6237 if (!ipv4_is_multicast(entry->addr.u.ip4) && 6238 !ipv4_is_zeronet(entry->addr.u.ip4)) { 6239 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0"); 6240 return -EINVAL; 6241 } 6242 if (ipv4_is_local_multicast(entry->addr.u.ip4)) { 6243 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast"); 6244 return -EINVAL; 6245 } 6246 #if IS_ENABLED(CONFIG_IPV6) 6247 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 6248 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) { 6249 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes"); 6250 return -EINVAL; 6251 } 6252 #endif 6253 } else if (entry->addr.proto == 0) { 6254 /* L2 mdb */ 6255 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) { 6256 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast"); 6257 return -EINVAL; 6258 } 6259 } else { 6260 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6261 return -EINVAL; 6262 } 6263 6264 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6265 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6266 return -EINVAL; 6267 } 6268 if (entry->vid >= VLAN_VID_MASK) { 6269 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6270 return -EINVAL; 6271 } 6272 6273 return 0; 6274 } 6275 6276 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { 6277 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, 6278 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6279 rtnl_validate_mdb_entry, 6280 sizeof(struct br_mdb_entry)), 6281 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6282 }; 6283 6284 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 6285 struct netlink_ext_ack *extack) 6286 { 6287 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6288 struct net *net = sock_net(skb->sk); 6289 struct br_port_msg *bpm; 6290 struct net_device *dev; 6291 int err; 6292 6293 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6294 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6295 if (err) 6296 return err; 6297 6298 bpm = nlmsg_data(nlh); 6299 if (!bpm->ifindex) { 6300 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6301 return -EINVAL; 6302 } 6303 6304 dev = __dev_get_by_index(net, bpm->ifindex); 6305 if (!dev) { 6306 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6307 return -ENODEV; 6308 } 6309 6310 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6311 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6312 return -EINVAL; 6313 } 6314 6315 if (!dev->netdev_ops->ndo_mdb_add) { 6316 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6317 return -EOPNOTSUPP; 6318 } 6319 6320 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); 6321 } 6322 6323 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 6324 struct netlink_ext_ack *extack) 6325 { 6326 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6327 struct net *net = sock_net(skb->sk); 6328 struct br_port_msg *bpm; 6329 struct net_device *dev; 6330 int err; 6331 6332 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6333 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6334 if (err) 6335 return err; 6336 6337 bpm = nlmsg_data(nlh); 6338 if (!bpm->ifindex) { 6339 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6340 return -EINVAL; 6341 } 6342 6343 dev = __dev_get_by_index(net, bpm->ifindex); 6344 if (!dev) { 6345 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6346 return -ENODEV; 6347 } 6348 6349 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6350 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6351 return -EINVAL; 6352 } 6353 6354 if (!dev->netdev_ops->ndo_mdb_del) { 6355 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6356 return -EOPNOTSUPP; 6357 } 6358 6359 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); 6360 } 6361 6362 /* Process one rtnetlink message. */ 6363 6364 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6365 struct netlink_ext_ack *extack) 6366 { 6367 struct net *net = sock_net(skb->sk); 6368 struct rtnl_link *link; 6369 enum rtnl_kinds kind; 6370 struct module *owner; 6371 int err = -EOPNOTSUPP; 6372 rtnl_doit_func doit; 6373 unsigned int flags; 6374 int family; 6375 int type; 6376 6377 type = nlh->nlmsg_type; 6378 if (type > RTM_MAX) 6379 return -EOPNOTSUPP; 6380 6381 type -= RTM_BASE; 6382 6383 /* All the messages must have at least 1 byte length */ 6384 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6385 return 0; 6386 6387 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6388 kind = rtnl_msgtype_kind(type); 6389 6390 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6391 return -EPERM; 6392 6393 rcu_read_lock(); 6394 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6395 struct sock *rtnl; 6396 rtnl_dumpit_func dumpit; 6397 u32 min_dump_alloc = 0; 6398 6399 link = rtnl_get_link(family, type); 6400 if (!link || !link->dumpit) { 6401 family = PF_UNSPEC; 6402 link = rtnl_get_link(family, type); 6403 if (!link || !link->dumpit) 6404 goto err_unlock; 6405 } 6406 owner = link->owner; 6407 dumpit = link->dumpit; 6408 flags = link->flags; 6409 6410 if (type == RTM_GETLINK - RTM_BASE) 6411 min_dump_alloc = rtnl_calcit(skb, nlh); 6412 6413 err = 0; 6414 /* need to do this before rcu_read_unlock() */ 6415 if (!try_module_get(owner)) 6416 err = -EPROTONOSUPPORT; 6417 6418 rcu_read_unlock(); 6419 6420 rtnl = net->rtnl; 6421 if (err == 0) { 6422 struct netlink_dump_control c = { 6423 .dump = dumpit, 6424 .min_dump_alloc = min_dump_alloc, 6425 .module = owner, 6426 .flags = flags, 6427 }; 6428 err = netlink_dump_start(rtnl, skb, nlh, &c); 6429 /* netlink_dump_start() will keep a reference on 6430 * module if dump is still in progress. 6431 */ 6432 module_put(owner); 6433 } 6434 return err; 6435 } 6436 6437 link = rtnl_get_link(family, type); 6438 if (!link || !link->doit) { 6439 family = PF_UNSPEC; 6440 link = rtnl_get_link(PF_UNSPEC, type); 6441 if (!link || !link->doit) 6442 goto out_unlock; 6443 } 6444 6445 owner = link->owner; 6446 if (!try_module_get(owner)) { 6447 err = -EPROTONOSUPPORT; 6448 goto out_unlock; 6449 } 6450 6451 flags = link->flags; 6452 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6453 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6454 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6455 module_put(owner); 6456 goto err_unlock; 6457 } 6458 6459 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6460 doit = link->doit; 6461 rcu_read_unlock(); 6462 if (doit) 6463 err = doit(skb, nlh, extack); 6464 module_put(owner); 6465 return err; 6466 } 6467 rcu_read_unlock(); 6468 6469 rtnl_lock(); 6470 link = rtnl_get_link(family, type); 6471 if (link && link->doit) 6472 err = link->doit(skb, nlh, extack); 6473 rtnl_unlock(); 6474 6475 module_put(owner); 6476 6477 return err; 6478 6479 out_unlock: 6480 rcu_read_unlock(); 6481 return err; 6482 6483 err_unlock: 6484 rcu_read_unlock(); 6485 return -EOPNOTSUPP; 6486 } 6487 6488 static void rtnetlink_rcv(struct sk_buff *skb) 6489 { 6490 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6491 } 6492 6493 static int rtnetlink_bind(struct net *net, int group) 6494 { 6495 switch (group) { 6496 case RTNLGRP_IPV4_MROUTE_R: 6497 case RTNLGRP_IPV6_MROUTE_R: 6498 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6499 return -EPERM; 6500 break; 6501 } 6502 return 0; 6503 } 6504 6505 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6506 { 6507 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6508 6509 switch (event) { 6510 case NETDEV_REBOOT: 6511 case NETDEV_CHANGEMTU: 6512 case NETDEV_CHANGEADDR: 6513 case NETDEV_CHANGENAME: 6514 case NETDEV_FEAT_CHANGE: 6515 case NETDEV_BONDING_FAILOVER: 6516 case NETDEV_POST_TYPE_CHANGE: 6517 case NETDEV_NOTIFY_PEERS: 6518 case NETDEV_CHANGEUPPER: 6519 case NETDEV_RESEND_IGMP: 6520 case NETDEV_CHANGEINFODATA: 6521 case NETDEV_CHANGELOWERSTATE: 6522 case NETDEV_CHANGE_TX_QUEUE_LEN: 6523 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6524 GFP_KERNEL, NULL, 0, 0, NULL); 6525 break; 6526 default: 6527 break; 6528 } 6529 return NOTIFY_DONE; 6530 } 6531 6532 static struct notifier_block rtnetlink_dev_notifier = { 6533 .notifier_call = rtnetlink_event, 6534 }; 6535 6536 6537 static int __net_init rtnetlink_net_init(struct net *net) 6538 { 6539 struct sock *sk; 6540 struct netlink_kernel_cfg cfg = { 6541 .groups = RTNLGRP_MAX, 6542 .input = rtnetlink_rcv, 6543 .cb_mutex = &rtnl_mutex, 6544 .flags = NL_CFG_F_NONROOT_RECV, 6545 .bind = rtnetlink_bind, 6546 }; 6547 6548 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6549 if (!sk) 6550 return -ENOMEM; 6551 net->rtnl = sk; 6552 return 0; 6553 } 6554 6555 static void __net_exit rtnetlink_net_exit(struct net *net) 6556 { 6557 netlink_kernel_release(net->rtnl); 6558 net->rtnl = NULL; 6559 } 6560 6561 static struct pernet_operations rtnetlink_net_ops = { 6562 .init = rtnetlink_net_init, 6563 .exit = rtnetlink_net_exit, 6564 }; 6565 6566 void __init rtnetlink_init(void) 6567 { 6568 if (register_pernet_subsys(&rtnetlink_net_ops)) 6569 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6570 6571 register_netdevice_notifier(&rtnetlink_dev_notifier); 6572 6573 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6574 rtnl_dump_ifinfo, 0); 6575 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6576 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6577 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6578 6579 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6580 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6581 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6582 6583 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6584 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6585 6586 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6587 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6588 RTNL_FLAG_BULK_DEL_SUPPORTED); 6589 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6590 6591 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6592 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6593 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6594 6595 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6596 0); 6597 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6598 6599 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, rtnl_mdb_dump, 0); 6600 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); 6601 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 0); 6602 } 6603