1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetics was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 57 #define RTNL_MAX_TYPE 50 58 #define RTNL_SLAVE_MAX_TYPE 36 59 60 struct rtnl_link { 61 rtnl_doit_func doit; 62 rtnl_dumpit_func dumpit; 63 struct module *owner; 64 unsigned int flags; 65 struct rcu_head rcu; 66 }; 67 68 static DEFINE_MUTEX(rtnl_mutex); 69 70 void rtnl_lock(void) 71 { 72 mutex_lock(&rtnl_mutex); 73 } 74 EXPORT_SYMBOL(rtnl_lock); 75 76 int rtnl_lock_killable(void) 77 { 78 return mutex_lock_killable(&rtnl_mutex); 79 } 80 EXPORT_SYMBOL(rtnl_lock_killable); 81 82 static struct sk_buff *defer_kfree_skb_list; 83 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 84 { 85 if (head && tail) { 86 tail->next = defer_kfree_skb_list; 87 defer_kfree_skb_list = head; 88 } 89 } 90 EXPORT_SYMBOL(rtnl_kfree_skbs); 91 92 void __rtnl_unlock(void) 93 { 94 struct sk_buff *head = defer_kfree_skb_list; 95 96 defer_kfree_skb_list = NULL; 97 98 mutex_unlock(&rtnl_mutex); 99 100 while (head) { 101 struct sk_buff *next = head->next; 102 103 kfree_skb(head); 104 cond_resched(); 105 head = next; 106 } 107 } 108 109 void rtnl_unlock(void) 110 { 111 /* This fellow will unlock it for us. */ 112 netdev_run_todo(); 113 } 114 EXPORT_SYMBOL(rtnl_unlock); 115 116 int rtnl_trylock(void) 117 { 118 return mutex_trylock(&rtnl_mutex); 119 } 120 EXPORT_SYMBOL(rtnl_trylock); 121 122 int rtnl_is_locked(void) 123 { 124 return mutex_is_locked(&rtnl_mutex); 125 } 126 EXPORT_SYMBOL(rtnl_is_locked); 127 128 bool refcount_dec_and_rtnl_lock(refcount_t *r) 129 { 130 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 131 } 132 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 133 134 #ifdef CONFIG_PROVE_LOCKING 135 bool lockdep_rtnl_is_held(void) 136 { 137 return lockdep_is_held(&rtnl_mutex); 138 } 139 EXPORT_SYMBOL(lockdep_rtnl_is_held); 140 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 141 142 static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 143 144 static inline int rtm_msgindex(int msgtype) 145 { 146 int msgindex = msgtype - RTM_BASE; 147 148 /* 149 * msgindex < 0 implies someone tried to register a netlink 150 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 151 * the message type has not been added to linux/rtnetlink.h 152 */ 153 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 154 155 return msgindex; 156 } 157 158 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 159 { 160 struct rtnl_link **tab; 161 162 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 163 protocol = PF_UNSPEC; 164 165 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 166 if (!tab) 167 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 168 169 return tab[msgtype]; 170 } 171 172 static int rtnl_register_internal(struct module *owner, 173 int protocol, int msgtype, 174 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 175 unsigned int flags) 176 { 177 struct rtnl_link *link, *old; 178 struct rtnl_link __rcu **tab; 179 int msgindex; 180 int ret = -ENOBUFS; 181 182 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 183 msgindex = rtm_msgindex(msgtype); 184 185 rtnl_lock(); 186 tab = rtnl_msg_handlers[protocol]; 187 if (tab == NULL) { 188 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 189 if (!tab) 190 goto unlock; 191 192 /* ensures we see the 0 stores */ 193 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 194 } 195 196 old = rtnl_dereference(tab[msgindex]); 197 if (old) { 198 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 199 if (!link) 200 goto unlock; 201 } else { 202 link = kzalloc(sizeof(*link), GFP_KERNEL); 203 if (!link) 204 goto unlock; 205 } 206 207 WARN_ON(link->owner && link->owner != owner); 208 link->owner = owner; 209 210 WARN_ON(doit && link->doit && link->doit != doit); 211 if (doit) 212 link->doit = doit; 213 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 214 if (dumpit) 215 link->dumpit = dumpit; 216 217 link->flags |= flags; 218 219 /* publish protocol:msgtype */ 220 rcu_assign_pointer(tab[msgindex], link); 221 ret = 0; 222 if (old) 223 kfree_rcu(old, rcu); 224 unlock: 225 rtnl_unlock(); 226 return ret; 227 } 228 229 /** 230 * rtnl_register_module - Register a rtnetlink message type 231 * 232 * @owner: module registering the hook (THIS_MODULE) 233 * @protocol: Protocol family or PF_UNSPEC 234 * @msgtype: rtnetlink message type 235 * @doit: Function pointer called for each request message 236 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 237 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions 238 * 239 * Like rtnl_register, but for use by removable modules. 240 */ 241 int rtnl_register_module(struct module *owner, 242 int protocol, int msgtype, 243 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 244 unsigned int flags) 245 { 246 return rtnl_register_internal(owner, protocol, msgtype, 247 doit, dumpit, flags); 248 } 249 EXPORT_SYMBOL_GPL(rtnl_register_module); 250 251 /** 252 * rtnl_register - Register a rtnetlink message type 253 * @protocol: Protocol family or PF_UNSPEC 254 * @msgtype: rtnetlink message type 255 * @doit: Function pointer called for each request message 256 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 257 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions 258 * 259 * Registers the specified function pointers (at least one of them has 260 * to be non-NULL) to be called whenever a request message for the 261 * specified protocol family and message type is received. 262 * 263 * The special protocol family PF_UNSPEC may be used to define fallback 264 * function pointers for the case when no entry for the specific protocol 265 * family exists. 266 */ 267 void rtnl_register(int protocol, int msgtype, 268 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 269 unsigned int flags) 270 { 271 int err; 272 273 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 274 flags); 275 if (err) 276 pr_err("Unable to register rtnetlink message handler, " 277 "protocol = %d, message type = %d\n", protocol, msgtype); 278 } 279 280 /** 281 * rtnl_unregister - Unregister a rtnetlink message type 282 * @protocol: Protocol family or PF_UNSPEC 283 * @msgtype: rtnetlink message type 284 * 285 * Returns 0 on success or a negative error code. 286 */ 287 int rtnl_unregister(int protocol, int msgtype) 288 { 289 struct rtnl_link **tab, *link; 290 int msgindex; 291 292 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 293 msgindex = rtm_msgindex(msgtype); 294 295 rtnl_lock(); 296 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 297 if (!tab) { 298 rtnl_unlock(); 299 return -ENOENT; 300 } 301 302 link = tab[msgindex]; 303 rcu_assign_pointer(tab[msgindex], NULL); 304 rtnl_unlock(); 305 306 kfree_rcu(link, rcu); 307 308 return 0; 309 } 310 EXPORT_SYMBOL_GPL(rtnl_unregister); 311 312 /** 313 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 314 * @protocol : Protocol family or PF_UNSPEC 315 * 316 * Identical to calling rtnl_unregster() for all registered message types 317 * of a certain protocol family. 318 */ 319 void rtnl_unregister_all(int protocol) 320 { 321 struct rtnl_link **tab, *link; 322 int msgindex; 323 324 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 325 326 rtnl_lock(); 327 tab = rtnl_msg_handlers[protocol]; 328 if (!tab) { 329 rtnl_unlock(); 330 return; 331 } 332 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 333 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 334 link = tab[msgindex]; 335 if (!link) 336 continue; 337 338 rcu_assign_pointer(tab[msgindex], NULL); 339 kfree_rcu(link, rcu); 340 } 341 rtnl_unlock(); 342 343 synchronize_net(); 344 345 kfree(tab); 346 } 347 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 348 349 static LIST_HEAD(link_ops); 350 351 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 352 { 353 const struct rtnl_link_ops *ops; 354 355 list_for_each_entry(ops, &link_ops, list) { 356 if (!strcmp(ops->kind, kind)) 357 return ops; 358 } 359 return NULL; 360 } 361 362 /** 363 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 364 * @ops: struct rtnl_link_ops * to register 365 * 366 * The caller must hold the rtnl_mutex. This function should be used 367 * by drivers that create devices during module initialization. It 368 * must be called before registering the devices. 369 * 370 * Returns 0 on success or a negative error code. 371 */ 372 int __rtnl_link_register(struct rtnl_link_ops *ops) 373 { 374 if (rtnl_link_ops_get(ops->kind)) 375 return -EEXIST; 376 377 /* The check for setup is here because if ops 378 * does not have that filled up, it is not possible 379 * to use the ops for creating device. So do not 380 * fill up dellink as well. That disables rtnl_dellink. 381 */ 382 if (ops->setup && !ops->dellink) 383 ops->dellink = unregister_netdevice_queue; 384 385 list_add_tail(&ops->list, &link_ops); 386 return 0; 387 } 388 EXPORT_SYMBOL_GPL(__rtnl_link_register); 389 390 /** 391 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 392 * @ops: struct rtnl_link_ops * to register 393 * 394 * Returns 0 on success or a negative error code. 395 */ 396 int rtnl_link_register(struct rtnl_link_ops *ops) 397 { 398 int err; 399 400 /* Sanity-check max sizes to avoid stack buffer overflow. */ 401 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 402 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 403 return -EINVAL; 404 405 rtnl_lock(); 406 err = __rtnl_link_register(ops); 407 rtnl_unlock(); 408 return err; 409 } 410 EXPORT_SYMBOL_GPL(rtnl_link_register); 411 412 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 413 { 414 struct net_device *dev; 415 LIST_HEAD(list_kill); 416 417 for_each_netdev(net, dev) { 418 if (dev->rtnl_link_ops == ops) 419 ops->dellink(dev, &list_kill); 420 } 421 unregister_netdevice_many(&list_kill); 422 } 423 424 /** 425 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 426 * @ops: struct rtnl_link_ops * to unregister 427 * 428 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 429 * integrity (hold pernet_ops_rwsem for writing to close the race 430 * with setup_net() and cleanup_net()). 431 */ 432 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 433 { 434 struct net *net; 435 436 for_each_net(net) { 437 __rtnl_kill_links(net, ops); 438 } 439 list_del(&ops->list); 440 } 441 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 442 443 /* Return with the rtnl_lock held when there are no network 444 * devices unregistering in any network namespace. 445 */ 446 static void rtnl_lock_unregistering_all(void) 447 { 448 struct net *net; 449 bool unregistering; 450 DEFINE_WAIT_FUNC(wait, woken_wake_function); 451 452 add_wait_queue(&netdev_unregistering_wq, &wait); 453 for (;;) { 454 unregistering = false; 455 rtnl_lock(); 456 /* We held write locked pernet_ops_rwsem, and parallel 457 * setup_net() and cleanup_net() are not possible. 458 */ 459 for_each_net(net) { 460 if (net->dev_unreg_count > 0) { 461 unregistering = true; 462 break; 463 } 464 } 465 if (!unregistering) 466 break; 467 __rtnl_unlock(); 468 469 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 470 } 471 remove_wait_queue(&netdev_unregistering_wq, &wait); 472 } 473 474 /** 475 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 476 * @ops: struct rtnl_link_ops * to unregister 477 */ 478 void rtnl_link_unregister(struct rtnl_link_ops *ops) 479 { 480 /* Close the race with setup_net() and cleanup_net() */ 481 down_write(&pernet_ops_rwsem); 482 rtnl_lock_unregistering_all(); 483 __rtnl_link_unregister(ops); 484 rtnl_unlock(); 485 up_write(&pernet_ops_rwsem); 486 } 487 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 488 489 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 490 { 491 struct net_device *master_dev; 492 const struct rtnl_link_ops *ops; 493 size_t size = 0; 494 495 rcu_read_lock(); 496 497 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 498 if (!master_dev) 499 goto out; 500 501 ops = master_dev->rtnl_link_ops; 502 if (!ops || !ops->get_slave_size) 503 goto out; 504 /* IFLA_INFO_SLAVE_DATA + nested data */ 505 size = nla_total_size(sizeof(struct nlattr)) + 506 ops->get_slave_size(master_dev, dev); 507 508 out: 509 rcu_read_unlock(); 510 return size; 511 } 512 513 static size_t rtnl_link_get_size(const struct net_device *dev) 514 { 515 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 516 size_t size; 517 518 if (!ops) 519 return 0; 520 521 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 522 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 523 524 if (ops->get_size) 525 /* IFLA_INFO_DATA + nested data */ 526 size += nla_total_size(sizeof(struct nlattr)) + 527 ops->get_size(dev); 528 529 if (ops->get_xstats_size) 530 /* IFLA_INFO_XSTATS */ 531 size += nla_total_size(ops->get_xstats_size(dev)); 532 533 size += rtnl_link_get_slave_info_data_size(dev); 534 535 return size; 536 } 537 538 static LIST_HEAD(rtnl_af_ops); 539 540 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 541 { 542 const struct rtnl_af_ops *ops; 543 544 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) { 545 if (ops->family == family) 546 return ops; 547 } 548 549 return NULL; 550 } 551 552 /** 553 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 554 * @ops: struct rtnl_af_ops * to register 555 * 556 * Returns 0 on success or a negative error code. 557 */ 558 void rtnl_af_register(struct rtnl_af_ops *ops) 559 { 560 rtnl_lock(); 561 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 562 rtnl_unlock(); 563 } 564 EXPORT_SYMBOL_GPL(rtnl_af_register); 565 566 /** 567 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 568 * @ops: struct rtnl_af_ops * to unregister 569 */ 570 void rtnl_af_unregister(struct rtnl_af_ops *ops) 571 { 572 rtnl_lock(); 573 list_del_rcu(&ops->list); 574 rtnl_unlock(); 575 576 synchronize_rcu(); 577 } 578 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 579 580 static size_t rtnl_link_get_af_size(const struct net_device *dev, 581 u32 ext_filter_mask) 582 { 583 struct rtnl_af_ops *af_ops; 584 size_t size; 585 586 /* IFLA_AF_SPEC */ 587 size = nla_total_size(sizeof(struct nlattr)); 588 589 rcu_read_lock(); 590 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 591 if (af_ops->get_link_af_size) { 592 /* AF_* + nested data */ 593 size += nla_total_size(sizeof(struct nlattr)) + 594 af_ops->get_link_af_size(dev, ext_filter_mask); 595 } 596 } 597 rcu_read_unlock(); 598 599 return size; 600 } 601 602 static bool rtnl_have_link_slave_info(const struct net_device *dev) 603 { 604 struct net_device *master_dev; 605 bool ret = false; 606 607 rcu_read_lock(); 608 609 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 610 if (master_dev && master_dev->rtnl_link_ops) 611 ret = true; 612 rcu_read_unlock(); 613 return ret; 614 } 615 616 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 617 const struct net_device *dev) 618 { 619 struct net_device *master_dev; 620 const struct rtnl_link_ops *ops; 621 struct nlattr *slave_data; 622 int err; 623 624 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 625 if (!master_dev) 626 return 0; 627 ops = master_dev->rtnl_link_ops; 628 if (!ops) 629 return 0; 630 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 631 return -EMSGSIZE; 632 if (ops->fill_slave_info) { 633 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 634 if (!slave_data) 635 return -EMSGSIZE; 636 err = ops->fill_slave_info(skb, master_dev, dev); 637 if (err < 0) 638 goto err_cancel_slave_data; 639 nla_nest_end(skb, slave_data); 640 } 641 return 0; 642 643 err_cancel_slave_data: 644 nla_nest_cancel(skb, slave_data); 645 return err; 646 } 647 648 static int rtnl_link_info_fill(struct sk_buff *skb, 649 const struct net_device *dev) 650 { 651 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 652 struct nlattr *data; 653 int err; 654 655 if (!ops) 656 return 0; 657 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 658 return -EMSGSIZE; 659 if (ops->fill_xstats) { 660 err = ops->fill_xstats(skb, dev); 661 if (err < 0) 662 return err; 663 } 664 if (ops->fill_info) { 665 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 666 if (data == NULL) 667 return -EMSGSIZE; 668 err = ops->fill_info(skb, dev); 669 if (err < 0) 670 goto err_cancel_data; 671 nla_nest_end(skb, data); 672 } 673 return 0; 674 675 err_cancel_data: 676 nla_nest_cancel(skb, data); 677 return err; 678 } 679 680 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 681 { 682 struct nlattr *linkinfo; 683 int err = -EMSGSIZE; 684 685 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 686 if (linkinfo == NULL) 687 goto out; 688 689 err = rtnl_link_info_fill(skb, dev); 690 if (err < 0) 691 goto err_cancel_link; 692 693 err = rtnl_link_slave_info_fill(skb, dev); 694 if (err < 0) 695 goto err_cancel_link; 696 697 nla_nest_end(skb, linkinfo); 698 return 0; 699 700 err_cancel_link: 701 nla_nest_cancel(skb, linkinfo); 702 out: 703 return err; 704 } 705 706 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 707 { 708 struct sock *rtnl = net->rtnl; 709 int err = 0; 710 711 NETLINK_CB(skb).dst_group = group; 712 if (echo) 713 refcount_inc(&skb->users); 714 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 715 if (echo) 716 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 717 return err; 718 } 719 720 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 721 { 722 struct sock *rtnl = net->rtnl; 723 724 return nlmsg_unicast(rtnl, skb, pid); 725 } 726 EXPORT_SYMBOL(rtnl_unicast); 727 728 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 729 struct nlmsghdr *nlh, gfp_t flags) 730 { 731 struct sock *rtnl = net->rtnl; 732 int report = 0; 733 734 if (nlh) 735 report = nlmsg_report(nlh); 736 737 nlmsg_notify(rtnl, skb, pid, group, report, flags); 738 } 739 EXPORT_SYMBOL(rtnl_notify); 740 741 void rtnl_set_sk_err(struct net *net, u32 group, int error) 742 { 743 struct sock *rtnl = net->rtnl; 744 745 netlink_set_err(rtnl, 0, group, error); 746 } 747 EXPORT_SYMBOL(rtnl_set_sk_err); 748 749 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 750 { 751 struct nlattr *mx; 752 int i, valid = 0; 753 754 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 755 if (metrics == dst_default_metrics.metrics) 756 return 0; 757 758 mx = nla_nest_start_noflag(skb, RTA_METRICS); 759 if (mx == NULL) 760 return -ENOBUFS; 761 762 for (i = 0; i < RTAX_MAX; i++) { 763 if (metrics[i]) { 764 if (i == RTAX_CC_ALGO - 1) { 765 char tmp[TCP_CA_NAME_MAX], *name; 766 767 name = tcp_ca_get_name_by_key(metrics[i], tmp); 768 if (!name) 769 continue; 770 if (nla_put_string(skb, i + 1, name)) 771 goto nla_put_failure; 772 } else if (i == RTAX_FEATURES - 1) { 773 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 774 775 if (!user_features) 776 continue; 777 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 778 if (nla_put_u32(skb, i + 1, user_features)) 779 goto nla_put_failure; 780 } else { 781 if (nla_put_u32(skb, i + 1, metrics[i])) 782 goto nla_put_failure; 783 } 784 valid++; 785 } 786 } 787 788 if (!valid) { 789 nla_nest_cancel(skb, mx); 790 return 0; 791 } 792 793 return nla_nest_end(skb, mx); 794 795 nla_put_failure: 796 nla_nest_cancel(skb, mx); 797 return -EMSGSIZE; 798 } 799 EXPORT_SYMBOL(rtnetlink_put_metrics); 800 801 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 802 long expires, u32 error) 803 { 804 struct rta_cacheinfo ci = { 805 .rta_error = error, 806 .rta_id = id, 807 }; 808 809 if (dst) { 810 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 811 ci.rta_used = dst->__use; 812 ci.rta_clntref = atomic_read(&dst->__refcnt); 813 } 814 if (expires) { 815 unsigned long clock; 816 817 clock = jiffies_to_clock_t(abs(expires)); 818 clock = min_t(unsigned long, clock, INT_MAX); 819 ci.rta_expires = (expires > 0) ? clock : -clock; 820 } 821 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 822 } 823 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 824 825 static void set_operstate(struct net_device *dev, unsigned char transition) 826 { 827 unsigned char operstate = dev->operstate; 828 829 switch (transition) { 830 case IF_OPER_UP: 831 if ((operstate == IF_OPER_DORMANT || 832 operstate == IF_OPER_UNKNOWN) && 833 !netif_dormant(dev)) 834 operstate = IF_OPER_UP; 835 break; 836 837 case IF_OPER_DORMANT: 838 if (operstate == IF_OPER_UP || 839 operstate == IF_OPER_UNKNOWN) 840 operstate = IF_OPER_DORMANT; 841 break; 842 } 843 844 if (dev->operstate != operstate) { 845 write_lock_bh(&dev_base_lock); 846 dev->operstate = operstate; 847 write_unlock_bh(&dev_base_lock); 848 netdev_state_change(dev); 849 } 850 } 851 852 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 853 { 854 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 855 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 856 } 857 858 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 859 const struct ifinfomsg *ifm) 860 { 861 unsigned int flags = ifm->ifi_flags; 862 863 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 864 if (ifm->ifi_change) 865 flags = (flags & ifm->ifi_change) | 866 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 867 868 return flags; 869 } 870 871 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 872 const struct rtnl_link_stats64 *b) 873 { 874 a->rx_packets = b->rx_packets; 875 a->tx_packets = b->tx_packets; 876 a->rx_bytes = b->rx_bytes; 877 a->tx_bytes = b->tx_bytes; 878 a->rx_errors = b->rx_errors; 879 a->tx_errors = b->tx_errors; 880 a->rx_dropped = b->rx_dropped; 881 a->tx_dropped = b->tx_dropped; 882 883 a->multicast = b->multicast; 884 a->collisions = b->collisions; 885 886 a->rx_length_errors = b->rx_length_errors; 887 a->rx_over_errors = b->rx_over_errors; 888 a->rx_crc_errors = b->rx_crc_errors; 889 a->rx_frame_errors = b->rx_frame_errors; 890 a->rx_fifo_errors = b->rx_fifo_errors; 891 a->rx_missed_errors = b->rx_missed_errors; 892 893 a->tx_aborted_errors = b->tx_aborted_errors; 894 a->tx_carrier_errors = b->tx_carrier_errors; 895 a->tx_fifo_errors = b->tx_fifo_errors; 896 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 897 a->tx_window_errors = b->tx_window_errors; 898 899 a->rx_compressed = b->rx_compressed; 900 a->tx_compressed = b->tx_compressed; 901 902 a->rx_nohandler = b->rx_nohandler; 903 } 904 905 /* All VF info */ 906 static inline int rtnl_vfinfo_size(const struct net_device *dev, 907 u32 ext_filter_mask) 908 { 909 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 910 int num_vfs = dev_num_vf(dev->dev.parent); 911 size_t size = nla_total_size(0); 912 size += num_vfs * 913 (nla_total_size(0) + 914 nla_total_size(sizeof(struct ifla_vf_mac)) + 915 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 916 nla_total_size(sizeof(struct ifla_vf_vlan)) + 917 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 918 nla_total_size(MAX_VLAN_LIST_LEN * 919 sizeof(struct ifla_vf_vlan_info)) + 920 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 921 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 922 nla_total_size(sizeof(struct ifla_vf_rate)) + 923 nla_total_size(sizeof(struct ifla_vf_link_state)) + 924 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 925 nla_total_size(0) + /* nest IFLA_VF_STATS */ 926 /* IFLA_VF_STATS_RX_PACKETS */ 927 nla_total_size_64bit(sizeof(__u64)) + 928 /* IFLA_VF_STATS_TX_PACKETS */ 929 nla_total_size_64bit(sizeof(__u64)) + 930 /* IFLA_VF_STATS_RX_BYTES */ 931 nla_total_size_64bit(sizeof(__u64)) + 932 /* IFLA_VF_STATS_TX_BYTES */ 933 nla_total_size_64bit(sizeof(__u64)) + 934 /* IFLA_VF_STATS_BROADCAST */ 935 nla_total_size_64bit(sizeof(__u64)) + 936 /* IFLA_VF_STATS_MULTICAST */ 937 nla_total_size_64bit(sizeof(__u64)) + 938 /* IFLA_VF_STATS_RX_DROPPED */ 939 nla_total_size_64bit(sizeof(__u64)) + 940 /* IFLA_VF_STATS_TX_DROPPED */ 941 nla_total_size_64bit(sizeof(__u64)) + 942 nla_total_size(sizeof(struct ifla_vf_trust))); 943 return size; 944 } else 945 return 0; 946 } 947 948 static size_t rtnl_port_size(const struct net_device *dev, 949 u32 ext_filter_mask) 950 { 951 size_t port_size = nla_total_size(4) /* PORT_VF */ 952 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 953 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 954 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 955 + nla_total_size(1) /* PROT_VDP_REQUEST */ 956 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 957 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 958 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 959 + port_size; 960 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 961 + port_size; 962 963 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 964 !(ext_filter_mask & RTEXT_FILTER_VF)) 965 return 0; 966 if (dev_num_vf(dev->dev.parent)) 967 return port_self_size + vf_ports_size + 968 vf_port_size * dev_num_vf(dev->dev.parent); 969 else 970 return port_self_size; 971 } 972 973 static size_t rtnl_xdp_size(void) 974 { 975 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 976 nla_total_size(1) + /* XDP_ATTACHED */ 977 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 978 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 979 980 return xdp_size; 981 } 982 983 static size_t rtnl_prop_list_size(const struct net_device *dev) 984 { 985 struct netdev_name_node *name_node; 986 size_t size; 987 988 if (list_empty(&dev->name_node->list)) 989 return 0; 990 size = nla_total_size(0); 991 list_for_each_entry(name_node, &dev->name_node->list, list) 992 size += nla_total_size(ALTIFNAMSIZ); 993 return size; 994 } 995 996 static noinline size_t if_nlmsg_size(const struct net_device *dev, 997 u32 ext_filter_mask) 998 { 999 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1000 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1001 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1002 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1003 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1004 + nla_total_size(sizeof(struct rtnl_link_stats)) 1005 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1006 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1007 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1008 + nla_total_size(4) /* IFLA_TXQLEN */ 1009 + nla_total_size(4) /* IFLA_WEIGHT */ 1010 + nla_total_size(4) /* IFLA_MTU */ 1011 + nla_total_size(4) /* IFLA_LINK */ 1012 + nla_total_size(4) /* IFLA_MASTER */ 1013 + nla_total_size(1) /* IFLA_CARRIER */ 1014 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1015 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1016 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1017 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1018 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1019 + nla_total_size(1) /* IFLA_OPERSTATE */ 1020 + nla_total_size(1) /* IFLA_LINKMODE */ 1021 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1022 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1023 + nla_total_size(4) /* IFLA_GROUP */ 1024 + nla_total_size(ext_filter_mask 1025 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1026 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1027 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1028 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1029 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1030 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1031 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1032 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1033 + rtnl_xdp_size() /* IFLA_XDP */ 1034 + nla_total_size(4) /* IFLA_EVENT */ 1035 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1036 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1037 + nla_total_size(1) /* IFLA_PROTO_DOWN */ 1038 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1039 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1040 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1041 + nla_total_size(4) /* IFLA_MIN_MTU */ 1042 + nla_total_size(4) /* IFLA_MAX_MTU */ 1043 + rtnl_prop_list_size(dev) 1044 + 0; 1045 } 1046 1047 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1048 { 1049 struct nlattr *vf_ports; 1050 struct nlattr *vf_port; 1051 int vf; 1052 int err; 1053 1054 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1055 if (!vf_ports) 1056 return -EMSGSIZE; 1057 1058 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1059 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1060 if (!vf_port) 1061 goto nla_put_failure; 1062 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1063 goto nla_put_failure; 1064 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1065 if (err == -EMSGSIZE) 1066 goto nla_put_failure; 1067 if (err) { 1068 nla_nest_cancel(skb, vf_port); 1069 continue; 1070 } 1071 nla_nest_end(skb, vf_port); 1072 } 1073 1074 nla_nest_end(skb, vf_ports); 1075 1076 return 0; 1077 1078 nla_put_failure: 1079 nla_nest_cancel(skb, vf_ports); 1080 return -EMSGSIZE; 1081 } 1082 1083 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1084 { 1085 struct nlattr *port_self; 1086 int err; 1087 1088 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1089 if (!port_self) 1090 return -EMSGSIZE; 1091 1092 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1093 if (err) { 1094 nla_nest_cancel(skb, port_self); 1095 return (err == -EMSGSIZE) ? err : 0; 1096 } 1097 1098 nla_nest_end(skb, port_self); 1099 1100 return 0; 1101 } 1102 1103 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1104 u32 ext_filter_mask) 1105 { 1106 int err; 1107 1108 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1109 !(ext_filter_mask & RTEXT_FILTER_VF)) 1110 return 0; 1111 1112 err = rtnl_port_self_fill(skb, dev); 1113 if (err) 1114 return err; 1115 1116 if (dev_num_vf(dev->dev.parent)) { 1117 err = rtnl_vf_ports_fill(skb, dev); 1118 if (err) 1119 return err; 1120 } 1121 1122 return 0; 1123 } 1124 1125 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1126 { 1127 int err; 1128 struct netdev_phys_item_id ppid; 1129 1130 err = dev_get_phys_port_id(dev, &ppid); 1131 if (err) { 1132 if (err == -EOPNOTSUPP) 1133 return 0; 1134 return err; 1135 } 1136 1137 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1138 return -EMSGSIZE; 1139 1140 return 0; 1141 } 1142 1143 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1144 { 1145 char name[IFNAMSIZ]; 1146 int err; 1147 1148 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1149 if (err) { 1150 if (err == -EOPNOTSUPP) 1151 return 0; 1152 return err; 1153 } 1154 1155 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1156 return -EMSGSIZE; 1157 1158 return 0; 1159 } 1160 1161 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1162 { 1163 struct netdev_phys_item_id ppid = { }; 1164 int err; 1165 1166 err = dev_get_port_parent_id(dev, &ppid, false); 1167 if (err) { 1168 if (err == -EOPNOTSUPP) 1169 return 0; 1170 return err; 1171 } 1172 1173 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1174 return -EMSGSIZE; 1175 1176 return 0; 1177 } 1178 1179 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1180 struct net_device *dev) 1181 { 1182 struct rtnl_link_stats64 *sp; 1183 struct nlattr *attr; 1184 1185 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1186 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1187 if (!attr) 1188 return -EMSGSIZE; 1189 1190 sp = nla_data(attr); 1191 dev_get_stats(dev, sp); 1192 1193 attr = nla_reserve(skb, IFLA_STATS, 1194 sizeof(struct rtnl_link_stats)); 1195 if (!attr) 1196 return -EMSGSIZE; 1197 1198 copy_rtnl_link_stats(nla_data(attr), sp); 1199 1200 return 0; 1201 } 1202 1203 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1204 struct net_device *dev, 1205 int vfs_num, 1206 struct nlattr *vfinfo) 1207 { 1208 struct ifla_vf_rss_query_en vf_rss_query_en; 1209 struct nlattr *vf, *vfstats, *vfvlanlist; 1210 struct ifla_vf_link_state vf_linkstate; 1211 struct ifla_vf_vlan_info vf_vlan_info; 1212 struct ifla_vf_spoofchk vf_spoofchk; 1213 struct ifla_vf_tx_rate vf_tx_rate; 1214 struct ifla_vf_stats vf_stats; 1215 struct ifla_vf_trust vf_trust; 1216 struct ifla_vf_vlan vf_vlan; 1217 struct ifla_vf_rate vf_rate; 1218 struct ifla_vf_mac vf_mac; 1219 struct ifla_vf_broadcast vf_broadcast; 1220 struct ifla_vf_info ivi; 1221 struct ifla_vf_guid node_guid; 1222 struct ifla_vf_guid port_guid; 1223 1224 memset(&ivi, 0, sizeof(ivi)); 1225 1226 /* Not all SR-IOV capable drivers support the 1227 * spoofcheck and "RSS query enable" query. Preset to 1228 * -1 so the user space tool can detect that the driver 1229 * didn't report anything. 1230 */ 1231 ivi.spoofchk = -1; 1232 ivi.rss_query_en = -1; 1233 ivi.trusted = -1; 1234 /* The default value for VF link state is "auto" 1235 * IFLA_VF_LINK_STATE_AUTO which equals zero 1236 */ 1237 ivi.linkstate = 0; 1238 /* VLAN Protocol by default is 802.1Q */ 1239 ivi.vlan_proto = htons(ETH_P_8021Q); 1240 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1241 return 0; 1242 1243 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1244 1245 vf_mac.vf = 1246 vf_vlan.vf = 1247 vf_vlan_info.vf = 1248 vf_rate.vf = 1249 vf_tx_rate.vf = 1250 vf_spoofchk.vf = 1251 vf_linkstate.vf = 1252 vf_rss_query_en.vf = 1253 vf_trust.vf = ivi.vf; 1254 1255 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1256 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1257 vf_vlan.vlan = ivi.vlan; 1258 vf_vlan.qos = ivi.qos; 1259 vf_vlan_info.vlan = ivi.vlan; 1260 vf_vlan_info.qos = ivi.qos; 1261 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1262 vf_tx_rate.rate = ivi.max_tx_rate; 1263 vf_rate.min_tx_rate = ivi.min_tx_rate; 1264 vf_rate.max_tx_rate = ivi.max_tx_rate; 1265 vf_spoofchk.setting = ivi.spoofchk; 1266 vf_linkstate.link_state = ivi.linkstate; 1267 vf_rss_query_en.setting = ivi.rss_query_en; 1268 vf_trust.setting = ivi.trusted; 1269 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1270 if (!vf) 1271 goto nla_put_vfinfo_failure; 1272 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1273 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1274 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1275 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1276 &vf_rate) || 1277 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1278 &vf_tx_rate) || 1279 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1280 &vf_spoofchk) || 1281 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1282 &vf_linkstate) || 1283 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1284 sizeof(vf_rss_query_en), 1285 &vf_rss_query_en) || 1286 nla_put(skb, IFLA_VF_TRUST, 1287 sizeof(vf_trust), &vf_trust)) 1288 goto nla_put_vf_failure; 1289 1290 memset(&node_guid, 0, sizeof(node_guid)); 1291 memset(&port_guid, 0, sizeof(port_guid)); 1292 if (dev->netdev_ops->ndo_get_vf_guid && 1293 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1294 &port_guid)) { 1295 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1296 &node_guid) || 1297 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1298 &port_guid)) 1299 goto nla_put_vf_failure; 1300 } 1301 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1302 if (!vfvlanlist) 1303 goto nla_put_vf_failure; 1304 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1305 &vf_vlan_info)) { 1306 nla_nest_cancel(skb, vfvlanlist); 1307 goto nla_put_vf_failure; 1308 } 1309 nla_nest_end(skb, vfvlanlist); 1310 memset(&vf_stats, 0, sizeof(vf_stats)); 1311 if (dev->netdev_ops->ndo_get_vf_stats) 1312 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1313 &vf_stats); 1314 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1315 if (!vfstats) 1316 goto nla_put_vf_failure; 1317 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1318 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1319 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1320 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1321 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1322 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1323 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1324 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1325 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1326 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1327 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1328 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1329 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1330 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1331 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1332 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1333 nla_nest_cancel(skb, vfstats); 1334 goto nla_put_vf_failure; 1335 } 1336 nla_nest_end(skb, vfstats); 1337 nla_nest_end(skb, vf); 1338 return 0; 1339 1340 nla_put_vf_failure: 1341 nla_nest_cancel(skb, vf); 1342 nla_put_vfinfo_failure: 1343 nla_nest_cancel(skb, vfinfo); 1344 return -EMSGSIZE; 1345 } 1346 1347 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1348 struct net_device *dev, 1349 u32 ext_filter_mask) 1350 { 1351 struct nlattr *vfinfo; 1352 int i, num_vfs; 1353 1354 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1355 return 0; 1356 1357 num_vfs = dev_num_vf(dev->dev.parent); 1358 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1359 return -EMSGSIZE; 1360 1361 if (!dev->netdev_ops->ndo_get_vf_config) 1362 return 0; 1363 1364 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1365 if (!vfinfo) 1366 return -EMSGSIZE; 1367 1368 for (i = 0; i < num_vfs; i++) { 1369 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) 1370 return -EMSGSIZE; 1371 } 1372 1373 nla_nest_end(skb, vfinfo); 1374 return 0; 1375 } 1376 1377 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1378 { 1379 struct rtnl_link_ifmap map; 1380 1381 memset(&map, 0, sizeof(map)); 1382 map.mem_start = dev->mem_start; 1383 map.mem_end = dev->mem_end; 1384 map.base_addr = dev->base_addr; 1385 map.irq = dev->irq; 1386 map.dma = dev->dma; 1387 map.port = dev->if_port; 1388 1389 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1390 return -EMSGSIZE; 1391 1392 return 0; 1393 } 1394 1395 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1396 { 1397 const struct bpf_prog *generic_xdp_prog; 1398 1399 ASSERT_RTNL(); 1400 1401 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1402 if (!generic_xdp_prog) 1403 return 0; 1404 return generic_xdp_prog->aux->id; 1405 } 1406 1407 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1408 { 1409 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG); 1410 } 1411 1412 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1413 { 1414 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, 1415 XDP_QUERY_PROG_HW); 1416 } 1417 1418 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1419 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1420 u32 (*get_prog_id)(struct net_device *dev)) 1421 { 1422 u32 curr_id; 1423 int err; 1424 1425 curr_id = get_prog_id(dev); 1426 if (!curr_id) 1427 return 0; 1428 1429 *prog_id = curr_id; 1430 err = nla_put_u32(skb, attr, curr_id); 1431 if (err) 1432 return err; 1433 1434 if (*mode != XDP_ATTACHED_NONE) 1435 *mode = XDP_ATTACHED_MULTI; 1436 else 1437 *mode = tgt_mode; 1438 1439 return 0; 1440 } 1441 1442 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1443 { 1444 struct nlattr *xdp; 1445 u32 prog_id; 1446 int err; 1447 u8 mode; 1448 1449 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1450 if (!xdp) 1451 return -EMSGSIZE; 1452 1453 prog_id = 0; 1454 mode = XDP_ATTACHED_NONE; 1455 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1456 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1457 if (err) 1458 goto err_cancel; 1459 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1460 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1461 if (err) 1462 goto err_cancel; 1463 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1464 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1465 if (err) 1466 goto err_cancel; 1467 1468 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1469 if (err) 1470 goto err_cancel; 1471 1472 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1473 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1474 if (err) 1475 goto err_cancel; 1476 } 1477 1478 nla_nest_end(skb, xdp); 1479 return 0; 1480 1481 err_cancel: 1482 nla_nest_cancel(skb, xdp); 1483 return err; 1484 } 1485 1486 static u32 rtnl_get_event(unsigned long event) 1487 { 1488 u32 rtnl_event_type = IFLA_EVENT_NONE; 1489 1490 switch (event) { 1491 case NETDEV_REBOOT: 1492 rtnl_event_type = IFLA_EVENT_REBOOT; 1493 break; 1494 case NETDEV_FEAT_CHANGE: 1495 rtnl_event_type = IFLA_EVENT_FEATURES; 1496 break; 1497 case NETDEV_BONDING_FAILOVER: 1498 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1499 break; 1500 case NETDEV_NOTIFY_PEERS: 1501 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1502 break; 1503 case NETDEV_RESEND_IGMP: 1504 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1505 break; 1506 case NETDEV_CHANGEINFODATA: 1507 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1508 break; 1509 default: 1510 break; 1511 } 1512 1513 return rtnl_event_type; 1514 } 1515 1516 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1517 { 1518 const struct net_device *upper_dev; 1519 int ret = 0; 1520 1521 rcu_read_lock(); 1522 1523 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1524 if (upper_dev) 1525 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1526 1527 rcu_read_unlock(); 1528 return ret; 1529 } 1530 1531 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1532 bool force) 1533 { 1534 int ifindex = dev_get_iflink(dev); 1535 1536 if (force || dev->ifindex != ifindex) 1537 return nla_put_u32(skb, IFLA_LINK, ifindex); 1538 1539 return 0; 1540 } 1541 1542 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1543 struct net_device *dev) 1544 { 1545 char buf[IFALIASZ]; 1546 int ret; 1547 1548 ret = dev_get_alias(dev, buf, sizeof(buf)); 1549 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1550 } 1551 1552 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1553 const struct net_device *dev, 1554 struct net *src_net, gfp_t gfp) 1555 { 1556 bool put_iflink = false; 1557 1558 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1559 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1560 1561 if (!net_eq(dev_net(dev), link_net)) { 1562 int id = peernet2id_alloc(src_net, link_net, gfp); 1563 1564 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1565 return -EMSGSIZE; 1566 1567 put_iflink = true; 1568 } 1569 } 1570 1571 return nla_put_iflink(skb, dev, put_iflink); 1572 } 1573 1574 static int rtnl_fill_link_af(struct sk_buff *skb, 1575 const struct net_device *dev, 1576 u32 ext_filter_mask) 1577 { 1578 const struct rtnl_af_ops *af_ops; 1579 struct nlattr *af_spec; 1580 1581 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1582 if (!af_spec) 1583 return -EMSGSIZE; 1584 1585 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1586 struct nlattr *af; 1587 int err; 1588 1589 if (!af_ops->fill_link_af) 1590 continue; 1591 1592 af = nla_nest_start_noflag(skb, af_ops->family); 1593 if (!af) 1594 return -EMSGSIZE; 1595 1596 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1597 /* 1598 * Caller may return ENODATA to indicate that there 1599 * was no data to be dumped. This is not an error, it 1600 * means we should trim the attribute header and 1601 * continue. 1602 */ 1603 if (err == -ENODATA) 1604 nla_nest_cancel(skb, af); 1605 else if (err < 0) 1606 return -EMSGSIZE; 1607 1608 nla_nest_end(skb, af); 1609 } 1610 1611 nla_nest_end(skb, af_spec); 1612 return 0; 1613 } 1614 1615 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1616 const struct net_device *dev) 1617 { 1618 struct netdev_name_node *name_node; 1619 int count = 0; 1620 1621 list_for_each_entry(name_node, &dev->name_node->list, list) { 1622 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1623 return -EMSGSIZE; 1624 count++; 1625 } 1626 return count; 1627 } 1628 1629 static int rtnl_fill_prop_list(struct sk_buff *skb, 1630 const struct net_device *dev) 1631 { 1632 struct nlattr *prop_list; 1633 int ret; 1634 1635 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1636 if (!prop_list) 1637 return -EMSGSIZE; 1638 1639 ret = rtnl_fill_alt_ifnames(skb, dev); 1640 if (ret <= 0) 1641 goto nest_cancel; 1642 1643 nla_nest_end(skb, prop_list); 1644 return 0; 1645 1646 nest_cancel: 1647 nla_nest_cancel(skb, prop_list); 1648 return ret; 1649 } 1650 1651 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1652 struct net_device *dev, struct net *src_net, 1653 int type, u32 pid, u32 seq, u32 change, 1654 unsigned int flags, u32 ext_filter_mask, 1655 u32 event, int *new_nsid, int new_ifindex, 1656 int tgt_netnsid, gfp_t gfp) 1657 { 1658 struct ifinfomsg *ifm; 1659 struct nlmsghdr *nlh; 1660 1661 ASSERT_RTNL(); 1662 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1663 if (nlh == NULL) 1664 return -EMSGSIZE; 1665 1666 ifm = nlmsg_data(nlh); 1667 ifm->ifi_family = AF_UNSPEC; 1668 ifm->__ifi_pad = 0; 1669 ifm->ifi_type = dev->type; 1670 ifm->ifi_index = dev->ifindex; 1671 ifm->ifi_flags = dev_get_flags(dev); 1672 ifm->ifi_change = change; 1673 1674 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1675 goto nla_put_failure; 1676 1677 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1678 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1679 nla_put_u8(skb, IFLA_OPERSTATE, 1680 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1681 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1682 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1683 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1684 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1685 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1686 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1687 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1688 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1689 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1690 #ifdef CONFIG_RPS 1691 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1692 #endif 1693 put_master_ifindex(skb, dev) || 1694 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1695 (dev->qdisc && 1696 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || 1697 nla_put_ifalias(skb, dev) || 1698 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1699 atomic_read(&dev->carrier_up_count) + 1700 atomic_read(&dev->carrier_down_count)) || 1701 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) || 1702 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1703 atomic_read(&dev->carrier_up_count)) || 1704 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1705 atomic_read(&dev->carrier_down_count))) 1706 goto nla_put_failure; 1707 1708 if (event != IFLA_EVENT_NONE) { 1709 if (nla_put_u32(skb, IFLA_EVENT, event)) 1710 goto nla_put_failure; 1711 } 1712 1713 if (rtnl_fill_link_ifmap(skb, dev)) 1714 goto nla_put_failure; 1715 1716 if (dev->addr_len) { 1717 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1718 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1719 goto nla_put_failure; 1720 } 1721 1722 if (rtnl_phys_port_id_fill(skb, dev)) 1723 goto nla_put_failure; 1724 1725 if (rtnl_phys_port_name_fill(skb, dev)) 1726 goto nla_put_failure; 1727 1728 if (rtnl_phys_switch_id_fill(skb, dev)) 1729 goto nla_put_failure; 1730 1731 if (rtnl_fill_stats(skb, dev)) 1732 goto nla_put_failure; 1733 1734 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1735 goto nla_put_failure; 1736 1737 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1738 goto nla_put_failure; 1739 1740 if (rtnl_xdp_fill(skb, dev)) 1741 goto nla_put_failure; 1742 1743 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1744 if (rtnl_link_fill(skb, dev) < 0) 1745 goto nla_put_failure; 1746 } 1747 1748 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1749 goto nla_put_failure; 1750 1751 if (new_nsid && 1752 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1753 goto nla_put_failure; 1754 if (new_ifindex && 1755 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1756 goto nla_put_failure; 1757 1758 1759 rcu_read_lock(); 1760 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1761 goto nla_put_failure_rcu; 1762 rcu_read_unlock(); 1763 1764 if (rtnl_fill_prop_list(skb, dev)) 1765 goto nla_put_failure; 1766 1767 nlmsg_end(skb, nlh); 1768 return 0; 1769 1770 nla_put_failure_rcu: 1771 rcu_read_unlock(); 1772 nla_put_failure: 1773 nlmsg_cancel(skb, nlh); 1774 return -EMSGSIZE; 1775 } 1776 1777 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1778 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1779 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1780 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1781 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1782 [IFLA_MTU] = { .type = NLA_U32 }, 1783 [IFLA_LINK] = { .type = NLA_U32 }, 1784 [IFLA_MASTER] = { .type = NLA_U32 }, 1785 [IFLA_CARRIER] = { .type = NLA_U8 }, 1786 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1787 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1788 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1789 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1790 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1791 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1792 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1793 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1794 * allow 0-length string (needed to remove an alias). 1795 */ 1796 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1797 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1798 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1799 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1800 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1801 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1802 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1803 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1804 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1805 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1806 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1807 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1808 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1809 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1810 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1811 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1812 [IFLA_XDP] = { .type = NLA_NESTED }, 1813 [IFLA_EVENT] = { .type = NLA_U32 }, 1814 [IFLA_GROUP] = { .type = NLA_U32 }, 1815 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1816 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1817 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1818 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 1819 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 1820 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 1821 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 1822 .len = ALTIFNAMSIZ - 1 }, 1823 }; 1824 1825 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1826 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 1827 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 1828 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 1829 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1830 }; 1831 1832 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1833 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1834 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 1835 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1836 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 1837 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 1838 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 1839 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 1840 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 1841 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 1842 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 1843 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 1844 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1845 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1846 }; 1847 1848 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1849 [IFLA_PORT_VF] = { .type = NLA_U32 }, 1850 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 1851 .len = PORT_PROFILE_MAX }, 1852 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 1853 .len = PORT_UUID_MAX }, 1854 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 1855 .len = PORT_UUID_MAX }, 1856 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 1857 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 1858 1859 /* Unused, but we need to keep it here since user space could 1860 * fill it. It's also broken with regard to NLA_BINARY use in 1861 * combination with structs. 1862 */ 1863 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 1864 .len = sizeof(struct ifla_port_vsi) }, 1865 }; 1866 1867 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 1868 [IFLA_XDP_FD] = { .type = NLA_S32 }, 1869 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 1870 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 1871 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 1872 }; 1873 1874 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 1875 { 1876 const struct rtnl_link_ops *ops = NULL; 1877 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 1878 1879 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 1880 return NULL; 1881 1882 if (linfo[IFLA_INFO_KIND]) { 1883 char kind[MODULE_NAME_LEN]; 1884 1885 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 1886 ops = rtnl_link_ops_get(kind); 1887 } 1888 1889 return ops; 1890 } 1891 1892 static bool link_master_filtered(struct net_device *dev, int master_idx) 1893 { 1894 struct net_device *master; 1895 1896 if (!master_idx) 1897 return false; 1898 1899 master = netdev_master_upper_dev_get(dev); 1900 if (!master || master->ifindex != master_idx) 1901 return true; 1902 1903 return false; 1904 } 1905 1906 static bool link_kind_filtered(const struct net_device *dev, 1907 const struct rtnl_link_ops *kind_ops) 1908 { 1909 if (kind_ops && dev->rtnl_link_ops != kind_ops) 1910 return true; 1911 1912 return false; 1913 } 1914 1915 static bool link_dump_filtered(struct net_device *dev, 1916 int master_idx, 1917 const struct rtnl_link_ops *kind_ops) 1918 { 1919 if (link_master_filtered(dev, master_idx) || 1920 link_kind_filtered(dev, kind_ops)) 1921 return true; 1922 1923 return false; 1924 } 1925 1926 /** 1927 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 1928 * @sk: netlink socket 1929 * @netnsid: network namespace identifier 1930 * 1931 * Returns the network namespace identified by netnsid on success or an error 1932 * pointer on failure. 1933 */ 1934 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 1935 { 1936 struct net *net; 1937 1938 net = get_net_ns_by_id(sock_net(sk), netnsid); 1939 if (!net) 1940 return ERR_PTR(-EINVAL); 1941 1942 /* For now, the caller is required to have CAP_NET_ADMIN in 1943 * the user namespace owning the target net ns. 1944 */ 1945 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 1946 put_net(net); 1947 return ERR_PTR(-EACCES); 1948 } 1949 return net; 1950 } 1951 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 1952 1953 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 1954 bool strict_check, struct nlattr **tb, 1955 struct netlink_ext_ack *extack) 1956 { 1957 int hdrlen; 1958 1959 if (strict_check) { 1960 struct ifinfomsg *ifm; 1961 1962 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 1963 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 1964 return -EINVAL; 1965 } 1966 1967 ifm = nlmsg_data(nlh); 1968 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 1969 ifm->ifi_change) { 1970 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 1971 return -EINVAL; 1972 } 1973 if (ifm->ifi_index) { 1974 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 1975 return -EINVAL; 1976 } 1977 1978 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 1979 IFLA_MAX, ifla_policy, 1980 extack); 1981 } 1982 1983 /* A hack to preserve kernel<->userspace interface. 1984 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 1985 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 1986 * what iproute2 < v3.9.0 used. 1987 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 1988 * attribute, its netlink message is shorter than struct ifinfomsg. 1989 */ 1990 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 1991 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 1992 1993 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 1994 extack); 1995 } 1996 1997 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 1998 { 1999 struct netlink_ext_ack *extack = cb->extack; 2000 const struct nlmsghdr *nlh = cb->nlh; 2001 struct net *net = sock_net(skb->sk); 2002 struct net *tgt_net = net; 2003 int h, s_h; 2004 int idx = 0, s_idx; 2005 struct net_device *dev; 2006 struct hlist_head *head; 2007 struct nlattr *tb[IFLA_MAX+1]; 2008 u32 ext_filter_mask = 0; 2009 const struct rtnl_link_ops *kind_ops = NULL; 2010 unsigned int flags = NLM_F_MULTI; 2011 int master_idx = 0; 2012 int netnsid = -1; 2013 int err, i; 2014 2015 s_h = cb->args[0]; 2016 s_idx = cb->args[1]; 2017 2018 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2019 if (err < 0) { 2020 if (cb->strict_check) 2021 return err; 2022 2023 goto walk_entries; 2024 } 2025 2026 for (i = 0; i <= IFLA_MAX; ++i) { 2027 if (!tb[i]) 2028 continue; 2029 2030 /* new attributes should only be added with strict checking */ 2031 switch (i) { 2032 case IFLA_TARGET_NETNSID: 2033 netnsid = nla_get_s32(tb[i]); 2034 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2035 if (IS_ERR(tgt_net)) { 2036 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2037 return PTR_ERR(tgt_net); 2038 } 2039 break; 2040 case IFLA_EXT_MASK: 2041 ext_filter_mask = nla_get_u32(tb[i]); 2042 break; 2043 case IFLA_MASTER: 2044 master_idx = nla_get_u32(tb[i]); 2045 break; 2046 case IFLA_LINKINFO: 2047 kind_ops = linkinfo_to_kind_ops(tb[i]); 2048 break; 2049 default: 2050 if (cb->strict_check) { 2051 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2052 return -EINVAL; 2053 } 2054 } 2055 } 2056 2057 if (master_idx || kind_ops) 2058 flags |= NLM_F_DUMP_FILTERED; 2059 2060 walk_entries: 2061 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 2062 idx = 0; 2063 head = &tgt_net->dev_index_head[h]; 2064 hlist_for_each_entry(dev, head, index_hlist) { 2065 if (link_dump_filtered(dev, master_idx, kind_ops)) 2066 goto cont; 2067 if (idx < s_idx) 2068 goto cont; 2069 err = rtnl_fill_ifinfo(skb, dev, net, 2070 RTM_NEWLINK, 2071 NETLINK_CB(cb->skb).portid, 2072 nlh->nlmsg_seq, 0, flags, 2073 ext_filter_mask, 0, NULL, 0, 2074 netnsid, GFP_KERNEL); 2075 2076 if (err < 0) { 2077 if (likely(skb->len)) 2078 goto out; 2079 2080 goto out_err; 2081 } 2082 cont: 2083 idx++; 2084 } 2085 } 2086 out: 2087 err = skb->len; 2088 out_err: 2089 cb->args[1] = idx; 2090 cb->args[0] = h; 2091 cb->seq = net->dev_base_seq; 2092 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2093 if (netnsid >= 0) 2094 put_net(tgt_net); 2095 2096 return err; 2097 } 2098 2099 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 2100 struct netlink_ext_ack *exterr) 2101 { 2102 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, 2103 exterr); 2104 } 2105 EXPORT_SYMBOL(rtnl_nla_parse_ifla); 2106 2107 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2108 { 2109 struct net *net; 2110 /* Examine the link attributes and figure out which 2111 * network namespace we are talking about. 2112 */ 2113 if (tb[IFLA_NET_NS_PID]) 2114 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2115 else if (tb[IFLA_NET_NS_FD]) 2116 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2117 else 2118 net = get_net(src_net); 2119 return net; 2120 } 2121 EXPORT_SYMBOL(rtnl_link_get_net); 2122 2123 /* Figure out which network namespace we are talking about by 2124 * examining the link attributes in the following order: 2125 * 2126 * 1. IFLA_NET_NS_PID 2127 * 2. IFLA_NET_NS_FD 2128 * 3. IFLA_TARGET_NETNSID 2129 */ 2130 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2131 struct nlattr *tb[]) 2132 { 2133 struct net *net; 2134 2135 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2136 return rtnl_link_get_net(src_net, tb); 2137 2138 if (!tb[IFLA_TARGET_NETNSID]) 2139 return get_net(src_net); 2140 2141 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2142 if (!net) 2143 return ERR_PTR(-EINVAL); 2144 2145 return net; 2146 } 2147 2148 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2149 struct net *src_net, 2150 struct nlattr *tb[], int cap) 2151 { 2152 struct net *net; 2153 2154 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2155 if (IS_ERR(net)) 2156 return net; 2157 2158 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2159 put_net(net); 2160 return ERR_PTR(-EPERM); 2161 } 2162 2163 return net; 2164 } 2165 2166 /* Verify that rtnetlink requests do not pass additional properties 2167 * potentially referring to different network namespaces. 2168 */ 2169 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2170 struct netlink_ext_ack *extack, 2171 bool netns_id_only) 2172 { 2173 2174 if (netns_id_only) { 2175 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2176 return 0; 2177 2178 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2179 return -EOPNOTSUPP; 2180 } 2181 2182 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2183 goto invalid_attr; 2184 2185 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2186 goto invalid_attr; 2187 2188 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2189 goto invalid_attr; 2190 2191 return 0; 2192 2193 invalid_attr: 2194 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2195 return -EINVAL; 2196 } 2197 2198 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) 2199 { 2200 if (dev) { 2201 if (tb[IFLA_ADDRESS] && 2202 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2203 return -EINVAL; 2204 2205 if (tb[IFLA_BROADCAST] && 2206 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2207 return -EINVAL; 2208 } 2209 2210 if (tb[IFLA_AF_SPEC]) { 2211 struct nlattr *af; 2212 int rem, err; 2213 2214 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2215 const struct rtnl_af_ops *af_ops; 2216 2217 rcu_read_lock(); 2218 af_ops = rtnl_af_lookup(nla_type(af)); 2219 if (!af_ops) { 2220 rcu_read_unlock(); 2221 return -EAFNOSUPPORT; 2222 } 2223 2224 if (!af_ops->set_link_af) { 2225 rcu_read_unlock(); 2226 return -EOPNOTSUPP; 2227 } 2228 2229 if (af_ops->validate_link_af) { 2230 err = af_ops->validate_link_af(dev, af); 2231 if (err < 0) { 2232 rcu_read_unlock(); 2233 return err; 2234 } 2235 } 2236 2237 rcu_read_unlock(); 2238 } 2239 } 2240 2241 return 0; 2242 } 2243 2244 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2245 int guid_type) 2246 { 2247 const struct net_device_ops *ops = dev->netdev_ops; 2248 2249 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2250 } 2251 2252 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2253 { 2254 if (dev->type != ARPHRD_INFINIBAND) 2255 return -EOPNOTSUPP; 2256 2257 return handle_infiniband_guid(dev, ivt, guid_type); 2258 } 2259 2260 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2261 { 2262 const struct net_device_ops *ops = dev->netdev_ops; 2263 int err = -EINVAL; 2264 2265 if (tb[IFLA_VF_MAC]) { 2266 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2267 2268 if (ivm->vf >= INT_MAX) 2269 return -EINVAL; 2270 err = -EOPNOTSUPP; 2271 if (ops->ndo_set_vf_mac) 2272 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2273 ivm->mac); 2274 if (err < 0) 2275 return err; 2276 } 2277 2278 if (tb[IFLA_VF_VLAN]) { 2279 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2280 2281 if (ivv->vf >= INT_MAX) 2282 return -EINVAL; 2283 err = -EOPNOTSUPP; 2284 if (ops->ndo_set_vf_vlan) 2285 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2286 ivv->qos, 2287 htons(ETH_P_8021Q)); 2288 if (err < 0) 2289 return err; 2290 } 2291 2292 if (tb[IFLA_VF_VLAN_LIST]) { 2293 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2294 struct nlattr *attr; 2295 int rem, len = 0; 2296 2297 err = -EOPNOTSUPP; 2298 if (!ops->ndo_set_vf_vlan) 2299 return err; 2300 2301 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2302 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2303 nla_len(attr) < NLA_HDRLEN) { 2304 return -EINVAL; 2305 } 2306 if (len >= MAX_VLAN_LIST_LEN) 2307 return -EOPNOTSUPP; 2308 ivvl[len] = nla_data(attr); 2309 2310 len++; 2311 } 2312 if (len == 0) 2313 return -EINVAL; 2314 2315 if (ivvl[0]->vf >= INT_MAX) 2316 return -EINVAL; 2317 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2318 ivvl[0]->qos, ivvl[0]->vlan_proto); 2319 if (err < 0) 2320 return err; 2321 } 2322 2323 if (tb[IFLA_VF_TX_RATE]) { 2324 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2325 struct ifla_vf_info ivf; 2326 2327 if (ivt->vf >= INT_MAX) 2328 return -EINVAL; 2329 err = -EOPNOTSUPP; 2330 if (ops->ndo_get_vf_config) 2331 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2332 if (err < 0) 2333 return err; 2334 2335 err = -EOPNOTSUPP; 2336 if (ops->ndo_set_vf_rate) 2337 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2338 ivf.min_tx_rate, 2339 ivt->rate); 2340 if (err < 0) 2341 return err; 2342 } 2343 2344 if (tb[IFLA_VF_RATE]) { 2345 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2346 2347 if (ivt->vf >= INT_MAX) 2348 return -EINVAL; 2349 err = -EOPNOTSUPP; 2350 if (ops->ndo_set_vf_rate) 2351 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2352 ivt->min_tx_rate, 2353 ivt->max_tx_rate); 2354 if (err < 0) 2355 return err; 2356 } 2357 2358 if (tb[IFLA_VF_SPOOFCHK]) { 2359 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2360 2361 if (ivs->vf >= INT_MAX) 2362 return -EINVAL; 2363 err = -EOPNOTSUPP; 2364 if (ops->ndo_set_vf_spoofchk) 2365 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2366 ivs->setting); 2367 if (err < 0) 2368 return err; 2369 } 2370 2371 if (tb[IFLA_VF_LINK_STATE]) { 2372 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2373 2374 if (ivl->vf >= INT_MAX) 2375 return -EINVAL; 2376 err = -EOPNOTSUPP; 2377 if (ops->ndo_set_vf_link_state) 2378 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2379 ivl->link_state); 2380 if (err < 0) 2381 return err; 2382 } 2383 2384 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2385 struct ifla_vf_rss_query_en *ivrssq_en; 2386 2387 err = -EOPNOTSUPP; 2388 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2389 if (ivrssq_en->vf >= INT_MAX) 2390 return -EINVAL; 2391 if (ops->ndo_set_vf_rss_query_en) 2392 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2393 ivrssq_en->setting); 2394 if (err < 0) 2395 return err; 2396 } 2397 2398 if (tb[IFLA_VF_TRUST]) { 2399 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2400 2401 if (ivt->vf >= INT_MAX) 2402 return -EINVAL; 2403 err = -EOPNOTSUPP; 2404 if (ops->ndo_set_vf_trust) 2405 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2406 if (err < 0) 2407 return err; 2408 } 2409 2410 if (tb[IFLA_VF_IB_NODE_GUID]) { 2411 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2412 2413 if (ivt->vf >= INT_MAX) 2414 return -EINVAL; 2415 if (!ops->ndo_set_vf_guid) 2416 return -EOPNOTSUPP; 2417 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2418 } 2419 2420 if (tb[IFLA_VF_IB_PORT_GUID]) { 2421 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2422 2423 if (ivt->vf >= INT_MAX) 2424 return -EINVAL; 2425 if (!ops->ndo_set_vf_guid) 2426 return -EOPNOTSUPP; 2427 2428 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2429 } 2430 2431 return err; 2432 } 2433 2434 static int do_set_master(struct net_device *dev, int ifindex, 2435 struct netlink_ext_ack *extack) 2436 { 2437 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2438 const struct net_device_ops *ops; 2439 int err; 2440 2441 if (upper_dev) { 2442 if (upper_dev->ifindex == ifindex) 2443 return 0; 2444 ops = upper_dev->netdev_ops; 2445 if (ops->ndo_del_slave) { 2446 err = ops->ndo_del_slave(upper_dev, dev); 2447 if (err) 2448 return err; 2449 netdev_update_lockdep_key(dev); 2450 } else { 2451 return -EOPNOTSUPP; 2452 } 2453 } 2454 2455 if (ifindex) { 2456 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2457 if (!upper_dev) 2458 return -EINVAL; 2459 ops = upper_dev->netdev_ops; 2460 if (ops->ndo_add_slave) { 2461 err = ops->ndo_add_slave(upper_dev, dev, extack); 2462 if (err) 2463 return err; 2464 } else { 2465 return -EOPNOTSUPP; 2466 } 2467 } 2468 return 0; 2469 } 2470 2471 #define DO_SETLINK_MODIFIED 0x01 2472 /* notify flag means notify + modified. */ 2473 #define DO_SETLINK_NOTIFY 0x03 2474 static int do_setlink(const struct sk_buff *skb, 2475 struct net_device *dev, struct ifinfomsg *ifm, 2476 struct netlink_ext_ack *extack, 2477 struct nlattr **tb, char *ifname, int status) 2478 { 2479 const struct net_device_ops *ops = dev->netdev_ops; 2480 int err; 2481 2482 err = validate_linkmsg(dev, tb); 2483 if (err < 0) 2484 return err; 2485 2486 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2487 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev), 2488 tb, CAP_NET_ADMIN); 2489 if (IS_ERR(net)) { 2490 err = PTR_ERR(net); 2491 goto errout; 2492 } 2493 2494 err = dev_change_net_namespace(dev, net, ifname); 2495 put_net(net); 2496 if (err) 2497 goto errout; 2498 status |= DO_SETLINK_MODIFIED; 2499 } 2500 2501 if (tb[IFLA_MAP]) { 2502 struct rtnl_link_ifmap *u_map; 2503 struct ifmap k_map; 2504 2505 if (!ops->ndo_set_config) { 2506 err = -EOPNOTSUPP; 2507 goto errout; 2508 } 2509 2510 if (!netif_device_present(dev)) { 2511 err = -ENODEV; 2512 goto errout; 2513 } 2514 2515 u_map = nla_data(tb[IFLA_MAP]); 2516 k_map.mem_start = (unsigned long) u_map->mem_start; 2517 k_map.mem_end = (unsigned long) u_map->mem_end; 2518 k_map.base_addr = (unsigned short) u_map->base_addr; 2519 k_map.irq = (unsigned char) u_map->irq; 2520 k_map.dma = (unsigned char) u_map->dma; 2521 k_map.port = (unsigned char) u_map->port; 2522 2523 err = ops->ndo_set_config(dev, &k_map); 2524 if (err < 0) 2525 goto errout; 2526 2527 status |= DO_SETLINK_NOTIFY; 2528 } 2529 2530 if (tb[IFLA_ADDRESS]) { 2531 struct sockaddr *sa; 2532 int len; 2533 2534 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2535 sizeof(*sa)); 2536 sa = kmalloc(len, GFP_KERNEL); 2537 if (!sa) { 2538 err = -ENOMEM; 2539 goto errout; 2540 } 2541 sa->sa_family = dev->type; 2542 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2543 dev->addr_len); 2544 err = dev_set_mac_address(dev, sa, extack); 2545 kfree(sa); 2546 if (err) 2547 goto errout; 2548 status |= DO_SETLINK_MODIFIED; 2549 } 2550 2551 if (tb[IFLA_MTU]) { 2552 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2553 if (err < 0) 2554 goto errout; 2555 status |= DO_SETLINK_MODIFIED; 2556 } 2557 2558 if (tb[IFLA_GROUP]) { 2559 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2560 status |= DO_SETLINK_NOTIFY; 2561 } 2562 2563 /* 2564 * Interface selected by interface index but interface 2565 * name provided implies that a name change has been 2566 * requested. 2567 */ 2568 if (ifm->ifi_index > 0 && ifname[0]) { 2569 err = dev_change_name(dev, ifname); 2570 if (err < 0) 2571 goto errout; 2572 status |= DO_SETLINK_MODIFIED; 2573 } 2574 2575 if (tb[IFLA_IFALIAS]) { 2576 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2577 nla_len(tb[IFLA_IFALIAS])); 2578 if (err < 0) 2579 goto errout; 2580 status |= DO_SETLINK_NOTIFY; 2581 } 2582 2583 if (tb[IFLA_BROADCAST]) { 2584 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2585 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2586 } 2587 2588 if (ifm->ifi_flags || ifm->ifi_change) { 2589 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2590 extack); 2591 if (err < 0) 2592 goto errout; 2593 } 2594 2595 if (tb[IFLA_MASTER]) { 2596 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2597 if (err) 2598 goto errout; 2599 status |= DO_SETLINK_MODIFIED; 2600 } 2601 2602 if (tb[IFLA_CARRIER]) { 2603 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2604 if (err) 2605 goto errout; 2606 status |= DO_SETLINK_MODIFIED; 2607 } 2608 2609 if (tb[IFLA_TXQLEN]) { 2610 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2611 2612 err = dev_change_tx_queue_len(dev, value); 2613 if (err) 2614 goto errout; 2615 status |= DO_SETLINK_MODIFIED; 2616 } 2617 2618 if (tb[IFLA_GSO_MAX_SIZE]) { 2619 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2620 2621 if (max_size > GSO_MAX_SIZE) { 2622 err = -EINVAL; 2623 goto errout; 2624 } 2625 2626 if (dev->gso_max_size ^ max_size) { 2627 netif_set_gso_max_size(dev, max_size); 2628 status |= DO_SETLINK_MODIFIED; 2629 } 2630 } 2631 2632 if (tb[IFLA_GSO_MAX_SEGS]) { 2633 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2634 2635 if (max_segs > GSO_MAX_SEGS) { 2636 err = -EINVAL; 2637 goto errout; 2638 } 2639 2640 if (dev->gso_max_segs ^ max_segs) { 2641 dev->gso_max_segs = max_segs; 2642 status |= DO_SETLINK_MODIFIED; 2643 } 2644 } 2645 2646 if (tb[IFLA_OPERSTATE]) 2647 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2648 2649 if (tb[IFLA_LINKMODE]) { 2650 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2651 2652 write_lock_bh(&dev_base_lock); 2653 if (dev->link_mode ^ value) 2654 status |= DO_SETLINK_NOTIFY; 2655 dev->link_mode = value; 2656 write_unlock_bh(&dev_base_lock); 2657 } 2658 2659 if (tb[IFLA_VFINFO_LIST]) { 2660 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2661 struct nlattr *attr; 2662 int rem; 2663 2664 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2665 if (nla_type(attr) != IFLA_VF_INFO || 2666 nla_len(attr) < NLA_HDRLEN) { 2667 err = -EINVAL; 2668 goto errout; 2669 } 2670 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 2671 attr, 2672 ifla_vf_policy, 2673 NULL); 2674 if (err < 0) 2675 goto errout; 2676 err = do_setvfinfo(dev, vfinfo); 2677 if (err < 0) 2678 goto errout; 2679 status |= DO_SETLINK_NOTIFY; 2680 } 2681 } 2682 err = 0; 2683 2684 if (tb[IFLA_VF_PORTS]) { 2685 struct nlattr *port[IFLA_PORT_MAX+1]; 2686 struct nlattr *attr; 2687 int vf; 2688 int rem; 2689 2690 err = -EOPNOTSUPP; 2691 if (!ops->ndo_set_vf_port) 2692 goto errout; 2693 2694 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2695 if (nla_type(attr) != IFLA_VF_PORT || 2696 nla_len(attr) < NLA_HDRLEN) { 2697 err = -EINVAL; 2698 goto errout; 2699 } 2700 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2701 attr, 2702 ifla_port_policy, 2703 NULL); 2704 if (err < 0) 2705 goto errout; 2706 if (!port[IFLA_PORT_VF]) { 2707 err = -EOPNOTSUPP; 2708 goto errout; 2709 } 2710 vf = nla_get_u32(port[IFLA_PORT_VF]); 2711 err = ops->ndo_set_vf_port(dev, vf, port); 2712 if (err < 0) 2713 goto errout; 2714 status |= DO_SETLINK_NOTIFY; 2715 } 2716 } 2717 err = 0; 2718 2719 if (tb[IFLA_PORT_SELF]) { 2720 struct nlattr *port[IFLA_PORT_MAX+1]; 2721 2722 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2723 tb[IFLA_PORT_SELF], 2724 ifla_port_policy, NULL); 2725 if (err < 0) 2726 goto errout; 2727 2728 err = -EOPNOTSUPP; 2729 if (ops->ndo_set_vf_port) 2730 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 2731 if (err < 0) 2732 goto errout; 2733 status |= DO_SETLINK_NOTIFY; 2734 } 2735 2736 if (tb[IFLA_AF_SPEC]) { 2737 struct nlattr *af; 2738 int rem; 2739 2740 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2741 const struct rtnl_af_ops *af_ops; 2742 2743 rcu_read_lock(); 2744 2745 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 2746 2747 err = af_ops->set_link_af(dev, af); 2748 if (err < 0) { 2749 rcu_read_unlock(); 2750 goto errout; 2751 } 2752 2753 rcu_read_unlock(); 2754 status |= DO_SETLINK_NOTIFY; 2755 } 2756 } 2757 err = 0; 2758 2759 if (tb[IFLA_PROTO_DOWN]) { 2760 err = dev_change_proto_down(dev, 2761 nla_get_u8(tb[IFLA_PROTO_DOWN])); 2762 if (err) 2763 goto errout; 2764 status |= DO_SETLINK_NOTIFY; 2765 } 2766 2767 if (tb[IFLA_XDP]) { 2768 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 2769 u32 xdp_flags = 0; 2770 2771 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 2772 tb[IFLA_XDP], 2773 ifla_xdp_policy, NULL); 2774 if (err < 0) 2775 goto errout; 2776 2777 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 2778 err = -EINVAL; 2779 goto errout; 2780 } 2781 2782 if (xdp[IFLA_XDP_FLAGS]) { 2783 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 2784 if (xdp_flags & ~XDP_FLAGS_MASK) { 2785 err = -EINVAL; 2786 goto errout; 2787 } 2788 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 2789 err = -EINVAL; 2790 goto errout; 2791 } 2792 } 2793 2794 if (xdp[IFLA_XDP_FD]) { 2795 err = dev_change_xdp_fd(dev, extack, 2796 nla_get_s32(xdp[IFLA_XDP_FD]), 2797 xdp_flags); 2798 if (err) 2799 goto errout; 2800 status |= DO_SETLINK_NOTIFY; 2801 } 2802 } 2803 2804 errout: 2805 if (status & DO_SETLINK_MODIFIED) { 2806 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 2807 netdev_state_change(dev); 2808 2809 if (err < 0) 2810 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 2811 dev->name); 2812 } 2813 2814 return err; 2815 } 2816 2817 static struct net_device *rtnl_dev_get(struct net *net, 2818 struct nlattr *ifname_attr, 2819 struct nlattr *altifname_attr, 2820 char *ifname) 2821 { 2822 char buffer[ALTIFNAMSIZ]; 2823 2824 if (!ifname) { 2825 ifname = buffer; 2826 if (ifname_attr) 2827 nla_strlcpy(ifname, ifname_attr, IFNAMSIZ); 2828 else if (altifname_attr) 2829 nla_strlcpy(ifname, altifname_attr, ALTIFNAMSIZ); 2830 else 2831 return NULL; 2832 } 2833 2834 return __dev_get_by_name(net, ifname); 2835 } 2836 2837 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 2838 struct netlink_ext_ack *extack) 2839 { 2840 struct net *net = sock_net(skb->sk); 2841 struct ifinfomsg *ifm; 2842 struct net_device *dev; 2843 int err; 2844 struct nlattr *tb[IFLA_MAX+1]; 2845 char ifname[IFNAMSIZ]; 2846 2847 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 2848 ifla_policy, extack); 2849 if (err < 0) 2850 goto errout; 2851 2852 err = rtnl_ensure_unique_netns(tb, extack, false); 2853 if (err < 0) 2854 goto errout; 2855 2856 if (tb[IFLA_IFNAME]) 2857 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2858 else 2859 ifname[0] = '\0'; 2860 2861 err = -EINVAL; 2862 ifm = nlmsg_data(nlh); 2863 if (ifm->ifi_index > 0) 2864 dev = __dev_get_by_index(net, ifm->ifi_index); 2865 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 2866 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname); 2867 else 2868 goto errout; 2869 2870 if (dev == NULL) { 2871 err = -ENODEV; 2872 goto errout; 2873 } 2874 2875 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0); 2876 errout: 2877 return err; 2878 } 2879 2880 static int rtnl_group_dellink(const struct net *net, int group) 2881 { 2882 struct net_device *dev, *aux; 2883 LIST_HEAD(list_kill); 2884 bool found = false; 2885 2886 if (!group) 2887 return -EPERM; 2888 2889 for_each_netdev(net, dev) { 2890 if (dev->group == group) { 2891 const struct rtnl_link_ops *ops; 2892 2893 found = true; 2894 ops = dev->rtnl_link_ops; 2895 if (!ops || !ops->dellink) 2896 return -EOPNOTSUPP; 2897 } 2898 } 2899 2900 if (!found) 2901 return -ENODEV; 2902 2903 for_each_netdev_safe(net, dev, aux) { 2904 if (dev->group == group) { 2905 const struct rtnl_link_ops *ops; 2906 2907 ops = dev->rtnl_link_ops; 2908 ops->dellink(dev, &list_kill); 2909 } 2910 } 2911 unregister_netdevice_many(&list_kill); 2912 2913 return 0; 2914 } 2915 2916 int rtnl_delete_link(struct net_device *dev) 2917 { 2918 const struct rtnl_link_ops *ops; 2919 LIST_HEAD(list_kill); 2920 2921 ops = dev->rtnl_link_ops; 2922 if (!ops || !ops->dellink) 2923 return -EOPNOTSUPP; 2924 2925 ops->dellink(dev, &list_kill); 2926 unregister_netdevice_many(&list_kill); 2927 2928 return 0; 2929 } 2930 EXPORT_SYMBOL_GPL(rtnl_delete_link); 2931 2932 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 2933 struct netlink_ext_ack *extack) 2934 { 2935 struct net *net = sock_net(skb->sk); 2936 struct net *tgt_net = net; 2937 struct net_device *dev = NULL; 2938 struct ifinfomsg *ifm; 2939 struct nlattr *tb[IFLA_MAX+1]; 2940 int err; 2941 int netnsid = -1; 2942 2943 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 2944 ifla_policy, extack); 2945 if (err < 0) 2946 return err; 2947 2948 err = rtnl_ensure_unique_netns(tb, extack, true); 2949 if (err < 0) 2950 return err; 2951 2952 if (tb[IFLA_TARGET_NETNSID]) { 2953 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 2954 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 2955 if (IS_ERR(tgt_net)) 2956 return PTR_ERR(tgt_net); 2957 } 2958 2959 err = -EINVAL; 2960 ifm = nlmsg_data(nlh); 2961 if (ifm->ifi_index > 0) 2962 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 2963 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 2964 dev = rtnl_dev_get(net, tb[IFLA_IFNAME], 2965 tb[IFLA_ALT_IFNAME], NULL); 2966 else if (tb[IFLA_GROUP]) 2967 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 2968 else 2969 goto out; 2970 2971 if (!dev) { 2972 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0) 2973 err = -ENODEV; 2974 2975 goto out; 2976 } 2977 2978 err = rtnl_delete_link(dev); 2979 2980 out: 2981 if (netnsid >= 0) 2982 put_net(tgt_net); 2983 2984 return err; 2985 } 2986 2987 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) 2988 { 2989 unsigned int old_flags; 2990 int err; 2991 2992 old_flags = dev->flags; 2993 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 2994 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2995 NULL); 2996 if (err < 0) 2997 return err; 2998 } 2999 3000 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3001 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); 3002 } else { 3003 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3004 __dev_notify_flags(dev, old_flags, ~0U); 3005 } 3006 return 0; 3007 } 3008 EXPORT_SYMBOL(rtnl_configure_link); 3009 3010 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3011 unsigned char name_assign_type, 3012 const struct rtnl_link_ops *ops, 3013 struct nlattr *tb[], 3014 struct netlink_ext_ack *extack) 3015 { 3016 struct net_device *dev; 3017 unsigned int num_tx_queues = 1; 3018 unsigned int num_rx_queues = 1; 3019 3020 if (tb[IFLA_NUM_TX_QUEUES]) 3021 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3022 else if (ops->get_num_tx_queues) 3023 num_tx_queues = ops->get_num_tx_queues(); 3024 3025 if (tb[IFLA_NUM_RX_QUEUES]) 3026 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3027 else if (ops->get_num_rx_queues) 3028 num_rx_queues = ops->get_num_rx_queues(); 3029 3030 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3031 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3032 return ERR_PTR(-EINVAL); 3033 } 3034 3035 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3036 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3037 return ERR_PTR(-EINVAL); 3038 } 3039 3040 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, 3041 ops->setup, num_tx_queues, num_rx_queues); 3042 if (!dev) 3043 return ERR_PTR(-ENOMEM); 3044 3045 dev_net_set(dev, net); 3046 dev->rtnl_link_ops = ops; 3047 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3048 3049 if (tb[IFLA_MTU]) 3050 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 3051 if (tb[IFLA_ADDRESS]) { 3052 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), 3053 nla_len(tb[IFLA_ADDRESS])); 3054 dev->addr_assign_type = NET_ADDR_SET; 3055 } 3056 if (tb[IFLA_BROADCAST]) 3057 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3058 nla_len(tb[IFLA_BROADCAST])); 3059 if (tb[IFLA_TXQLEN]) 3060 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3061 if (tb[IFLA_OPERSTATE]) 3062 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3063 if (tb[IFLA_LINKMODE]) 3064 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3065 if (tb[IFLA_GROUP]) 3066 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3067 if (tb[IFLA_GSO_MAX_SIZE]) 3068 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3069 if (tb[IFLA_GSO_MAX_SEGS]) 3070 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 3071 3072 return dev; 3073 } 3074 EXPORT_SYMBOL(rtnl_create_link); 3075 3076 static int rtnl_group_changelink(const struct sk_buff *skb, 3077 struct net *net, int group, 3078 struct ifinfomsg *ifm, 3079 struct netlink_ext_ack *extack, 3080 struct nlattr **tb) 3081 { 3082 struct net_device *dev, *aux; 3083 int err; 3084 3085 for_each_netdev_safe(net, dev, aux) { 3086 if (dev->group == group) { 3087 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0); 3088 if (err < 0) 3089 return err; 3090 } 3091 } 3092 3093 return 0; 3094 } 3095 3096 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3097 struct nlattr **attr, struct netlink_ext_ack *extack) 3098 { 3099 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3100 unsigned char name_assign_type = NET_NAME_USER; 3101 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3102 const struct rtnl_link_ops *m_ops = NULL; 3103 struct net_device *master_dev = NULL; 3104 struct net *net = sock_net(skb->sk); 3105 const struct rtnl_link_ops *ops; 3106 struct nlattr *tb[IFLA_MAX + 1]; 3107 struct net *dest_net, *link_net; 3108 struct nlattr **slave_data; 3109 char kind[MODULE_NAME_LEN]; 3110 struct net_device *dev; 3111 struct ifinfomsg *ifm; 3112 char ifname[IFNAMSIZ]; 3113 struct nlattr **data; 3114 int err; 3115 3116 #ifdef CONFIG_MODULES 3117 replay: 3118 #endif 3119 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3120 ifla_policy, extack); 3121 if (err < 0) 3122 return err; 3123 3124 err = rtnl_ensure_unique_netns(tb, extack, false); 3125 if (err < 0) 3126 return err; 3127 3128 if (tb[IFLA_IFNAME]) 3129 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3130 else 3131 ifname[0] = '\0'; 3132 3133 ifm = nlmsg_data(nlh); 3134 if (ifm->ifi_index > 0) 3135 dev = __dev_get_by_index(net, ifm->ifi_index); 3136 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3137 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname); 3138 else 3139 dev = NULL; 3140 3141 if (dev) { 3142 master_dev = netdev_master_upper_dev_get(dev); 3143 if (master_dev) 3144 m_ops = master_dev->rtnl_link_ops; 3145 } 3146 3147 err = validate_linkmsg(dev, tb); 3148 if (err < 0) 3149 return err; 3150 3151 if (tb[IFLA_LINKINFO]) { 3152 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3153 tb[IFLA_LINKINFO], 3154 ifla_info_policy, NULL); 3155 if (err < 0) 3156 return err; 3157 } else 3158 memset(linkinfo, 0, sizeof(linkinfo)); 3159 3160 if (linkinfo[IFLA_INFO_KIND]) { 3161 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3162 ops = rtnl_link_ops_get(kind); 3163 } else { 3164 kind[0] = '\0'; 3165 ops = NULL; 3166 } 3167 3168 data = NULL; 3169 if (ops) { 3170 if (ops->maxtype > RTNL_MAX_TYPE) 3171 return -EINVAL; 3172 3173 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3174 err = nla_parse_nested_deprecated(attr, ops->maxtype, 3175 linkinfo[IFLA_INFO_DATA], 3176 ops->policy, extack); 3177 if (err < 0) 3178 return err; 3179 data = attr; 3180 } 3181 if (ops->validate) { 3182 err = ops->validate(tb, data, extack); 3183 if (err < 0) 3184 return err; 3185 } 3186 } 3187 3188 slave_data = NULL; 3189 if (m_ops) { 3190 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3191 return -EINVAL; 3192 3193 if (m_ops->slave_maxtype && 3194 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3195 err = nla_parse_nested_deprecated(slave_attr, 3196 m_ops->slave_maxtype, 3197 linkinfo[IFLA_INFO_SLAVE_DATA], 3198 m_ops->slave_policy, 3199 extack); 3200 if (err < 0) 3201 return err; 3202 slave_data = slave_attr; 3203 } 3204 } 3205 3206 if (dev) { 3207 int status = 0; 3208 3209 if (nlh->nlmsg_flags & NLM_F_EXCL) 3210 return -EEXIST; 3211 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3212 return -EOPNOTSUPP; 3213 3214 if (linkinfo[IFLA_INFO_DATA]) { 3215 if (!ops || ops != dev->rtnl_link_ops || 3216 !ops->changelink) 3217 return -EOPNOTSUPP; 3218 3219 err = ops->changelink(dev, tb, data, extack); 3220 if (err < 0) 3221 return err; 3222 status |= DO_SETLINK_NOTIFY; 3223 } 3224 3225 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3226 if (!m_ops || !m_ops->slave_changelink) 3227 return -EOPNOTSUPP; 3228 3229 err = m_ops->slave_changelink(master_dev, dev, tb, 3230 slave_data, extack); 3231 if (err < 0) 3232 return err; 3233 status |= DO_SETLINK_NOTIFY; 3234 } 3235 3236 return do_setlink(skb, dev, ifm, extack, tb, ifname, status); 3237 } 3238 3239 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3240 if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) 3241 return rtnl_group_changelink(skb, net, 3242 nla_get_u32(tb[IFLA_GROUP]), 3243 ifm, extack, tb); 3244 return -ENODEV; 3245 } 3246 3247 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3248 return -EOPNOTSUPP; 3249 3250 if (!ops) { 3251 #ifdef CONFIG_MODULES 3252 if (kind[0]) { 3253 __rtnl_unlock(); 3254 request_module("rtnl-link-%s", kind); 3255 rtnl_lock(); 3256 ops = rtnl_link_ops_get(kind); 3257 if (ops) 3258 goto replay; 3259 } 3260 #endif 3261 NL_SET_ERR_MSG(extack, "Unknown device type"); 3262 return -EOPNOTSUPP; 3263 } 3264 3265 if (!ops->setup) 3266 return -EOPNOTSUPP; 3267 3268 if (!ifname[0]) { 3269 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3270 name_assign_type = NET_NAME_ENUM; 3271 } 3272 3273 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3274 if (IS_ERR(dest_net)) 3275 return PTR_ERR(dest_net); 3276 3277 if (tb[IFLA_LINK_NETNSID]) { 3278 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3279 3280 link_net = get_net_ns_by_id(dest_net, id); 3281 if (!link_net) { 3282 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3283 err = -EINVAL; 3284 goto out; 3285 } 3286 err = -EPERM; 3287 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3288 goto out; 3289 } else { 3290 link_net = NULL; 3291 } 3292 3293 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3294 name_assign_type, ops, tb, extack); 3295 if (IS_ERR(dev)) { 3296 err = PTR_ERR(dev); 3297 goto out; 3298 } 3299 3300 dev->ifindex = ifm->ifi_index; 3301 3302 if (ops->newlink) { 3303 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3304 /* Drivers should call free_netdev() in ->destructor 3305 * and unregister it on failure after registration 3306 * so that device could be finally freed in rtnl_unlock. 3307 */ 3308 if (err < 0) { 3309 /* If device is not registered at all, free it now */ 3310 if (dev->reg_state == NETREG_UNINITIALIZED) 3311 free_netdev(dev); 3312 goto out; 3313 } 3314 } else { 3315 err = register_netdevice(dev); 3316 if (err < 0) { 3317 free_netdev(dev); 3318 goto out; 3319 } 3320 } 3321 err = rtnl_configure_link(dev, ifm); 3322 if (err < 0) 3323 goto out_unregister; 3324 if (link_net) { 3325 err = dev_change_net_namespace(dev, dest_net, ifname); 3326 if (err < 0) 3327 goto out_unregister; 3328 } 3329 if (tb[IFLA_MASTER]) { 3330 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3331 if (err) 3332 goto out_unregister; 3333 } 3334 out: 3335 if (link_net) 3336 put_net(link_net); 3337 put_net(dest_net); 3338 return err; 3339 out_unregister: 3340 if (ops->newlink) { 3341 LIST_HEAD(list_kill); 3342 3343 ops->dellink(dev, &list_kill); 3344 unregister_netdevice_many(&list_kill); 3345 } else { 3346 unregister_netdevice(dev); 3347 } 3348 goto out; 3349 } 3350 3351 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3352 struct netlink_ext_ack *extack) 3353 { 3354 struct nlattr **attr; 3355 int ret; 3356 3357 attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL); 3358 if (!attr) 3359 return -ENOMEM; 3360 3361 ret = __rtnl_newlink(skb, nlh, attr, extack); 3362 kfree(attr); 3363 return ret; 3364 } 3365 3366 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3367 const struct nlmsghdr *nlh, 3368 struct nlattr **tb, 3369 struct netlink_ext_ack *extack) 3370 { 3371 struct ifinfomsg *ifm; 3372 int i, err; 3373 3374 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3375 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3376 return -EINVAL; 3377 } 3378 3379 if (!netlink_strict_get_check(skb)) 3380 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3381 ifla_policy, extack); 3382 3383 ifm = nlmsg_data(nlh); 3384 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3385 ifm->ifi_change) { 3386 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3387 return -EINVAL; 3388 } 3389 3390 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3391 ifla_policy, extack); 3392 if (err) 3393 return err; 3394 3395 for (i = 0; i <= IFLA_MAX; i++) { 3396 if (!tb[i]) 3397 continue; 3398 3399 switch (i) { 3400 case IFLA_IFNAME: 3401 case IFLA_ALT_IFNAME: 3402 case IFLA_EXT_MASK: 3403 case IFLA_TARGET_NETNSID: 3404 break; 3405 default: 3406 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3407 return -EINVAL; 3408 } 3409 } 3410 3411 return 0; 3412 } 3413 3414 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3415 struct netlink_ext_ack *extack) 3416 { 3417 struct net *net = sock_net(skb->sk); 3418 struct net *tgt_net = net; 3419 struct ifinfomsg *ifm; 3420 struct nlattr *tb[IFLA_MAX+1]; 3421 struct net_device *dev = NULL; 3422 struct sk_buff *nskb; 3423 int netnsid = -1; 3424 int err; 3425 u32 ext_filter_mask = 0; 3426 3427 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3428 if (err < 0) 3429 return err; 3430 3431 err = rtnl_ensure_unique_netns(tb, extack, true); 3432 if (err < 0) 3433 return err; 3434 3435 if (tb[IFLA_TARGET_NETNSID]) { 3436 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3437 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3438 if (IS_ERR(tgt_net)) 3439 return PTR_ERR(tgt_net); 3440 } 3441 3442 if (tb[IFLA_EXT_MASK]) 3443 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3444 3445 err = -EINVAL; 3446 ifm = nlmsg_data(nlh); 3447 if (ifm->ifi_index > 0) 3448 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3449 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3450 dev = rtnl_dev_get(tgt_net, tb[IFLA_IFNAME], 3451 tb[IFLA_ALT_IFNAME], NULL); 3452 else 3453 goto out; 3454 3455 err = -ENODEV; 3456 if (dev == NULL) 3457 goto out; 3458 3459 err = -ENOBUFS; 3460 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3461 if (nskb == NULL) 3462 goto out; 3463 3464 err = rtnl_fill_ifinfo(nskb, dev, net, 3465 RTM_NEWLINK, NETLINK_CB(skb).portid, 3466 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3467 0, NULL, 0, netnsid, GFP_KERNEL); 3468 if (err < 0) { 3469 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3470 WARN_ON(err == -EMSGSIZE); 3471 kfree_skb(nskb); 3472 } else 3473 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3474 out: 3475 if (netnsid >= 0) 3476 put_net(tgt_net); 3477 3478 return err; 3479 } 3480 3481 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3482 bool *changed, struct netlink_ext_ack *extack) 3483 { 3484 char *alt_ifname; 3485 int err; 3486 3487 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3488 if (err) 3489 return err; 3490 3491 alt_ifname = nla_data(attr); 3492 if (cmd == RTM_NEWLINKPROP) { 3493 alt_ifname = kstrdup(alt_ifname, GFP_KERNEL); 3494 if (!alt_ifname) 3495 return -ENOMEM; 3496 err = netdev_name_node_alt_create(dev, alt_ifname); 3497 if (err) { 3498 kfree(alt_ifname); 3499 return err; 3500 } 3501 } else if (cmd == RTM_DELLINKPROP) { 3502 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3503 if (err) 3504 return err; 3505 } else { 3506 WARN_ON(1); 3507 return 0; 3508 } 3509 3510 *changed = true; 3511 return 0; 3512 } 3513 3514 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3515 struct netlink_ext_ack *extack) 3516 { 3517 struct net *net = sock_net(skb->sk); 3518 struct nlattr *tb[IFLA_MAX + 1]; 3519 struct net_device *dev; 3520 struct ifinfomsg *ifm; 3521 bool changed = false; 3522 struct nlattr *attr; 3523 int err, rem; 3524 3525 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3526 if (err) 3527 return err; 3528 3529 err = rtnl_ensure_unique_netns(tb, extack, true); 3530 if (err) 3531 return err; 3532 3533 ifm = nlmsg_data(nlh); 3534 if (ifm->ifi_index > 0) 3535 dev = __dev_get_by_index(net, ifm->ifi_index); 3536 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3537 dev = rtnl_dev_get(net, tb[IFLA_IFNAME], 3538 tb[IFLA_ALT_IFNAME], NULL); 3539 else 3540 return -EINVAL; 3541 3542 if (!dev) 3543 return -ENODEV; 3544 3545 if (!tb[IFLA_PROP_LIST]) 3546 return 0; 3547 3548 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3549 switch (nla_type(attr)) { 3550 case IFLA_ALT_IFNAME: 3551 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3552 if (err) 3553 return err; 3554 break; 3555 } 3556 } 3557 3558 if (changed) 3559 netdev_state_change(dev); 3560 return 0; 3561 } 3562 3563 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3564 struct netlink_ext_ack *extack) 3565 { 3566 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3567 } 3568 3569 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3570 struct netlink_ext_ack *extack) 3571 { 3572 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3573 } 3574 3575 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3576 { 3577 struct net *net = sock_net(skb->sk); 3578 struct net_device *dev; 3579 struct nlattr *tb[IFLA_MAX+1]; 3580 u32 ext_filter_mask = 0; 3581 u16 min_ifinfo_dump_size = 0; 3582 int hdrlen; 3583 3584 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3585 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3586 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3587 3588 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3589 if (tb[IFLA_EXT_MASK]) 3590 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3591 } 3592 3593 if (!ext_filter_mask) 3594 return NLMSG_GOODSIZE; 3595 /* 3596 * traverse the list of net devices and compute the minimum 3597 * buffer size based upon the filter mask. 3598 */ 3599 rcu_read_lock(); 3600 for_each_netdev_rcu(net, dev) { 3601 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size, 3602 if_nlmsg_size(dev, 3603 ext_filter_mask)); 3604 } 3605 rcu_read_unlock(); 3606 3607 return nlmsg_total_size(min_ifinfo_dump_size); 3608 } 3609 3610 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3611 { 3612 int idx; 3613 int s_idx = cb->family; 3614 int type = cb->nlh->nlmsg_type - RTM_BASE; 3615 int ret = 0; 3616 3617 if (s_idx == 0) 3618 s_idx = 1; 3619 3620 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3621 struct rtnl_link **tab; 3622 struct rtnl_link *link; 3623 rtnl_dumpit_func dumpit; 3624 3625 if (idx < s_idx || idx == PF_PACKET) 3626 continue; 3627 3628 if (type < 0 || type >= RTM_NR_MSGTYPES) 3629 continue; 3630 3631 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 3632 if (!tab) 3633 continue; 3634 3635 link = tab[type]; 3636 if (!link) 3637 continue; 3638 3639 dumpit = link->dumpit; 3640 if (!dumpit) 3641 continue; 3642 3643 if (idx > s_idx) { 3644 memset(&cb->args[0], 0, sizeof(cb->args)); 3645 cb->prev_seq = 0; 3646 cb->seq = 0; 3647 } 3648 ret = dumpit(skb, cb); 3649 if (ret) 3650 break; 3651 } 3652 cb->family = idx; 3653 3654 return skb->len ? : ret; 3655 } 3656 3657 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 3658 unsigned int change, 3659 u32 event, gfp_t flags, int *new_nsid, 3660 int new_ifindex) 3661 { 3662 struct net *net = dev_net(dev); 3663 struct sk_buff *skb; 3664 int err = -ENOBUFS; 3665 size_t if_info_size; 3666 3667 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags); 3668 if (skb == NULL) 3669 goto errout; 3670 3671 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3672 type, 0, 0, change, 0, 0, event, 3673 new_nsid, new_ifindex, -1, flags); 3674 if (err < 0) { 3675 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3676 WARN_ON(err == -EMSGSIZE); 3677 kfree_skb(skb); 3678 goto errout; 3679 } 3680 return skb; 3681 errout: 3682 if (err < 0) 3683 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 3684 return NULL; 3685 } 3686 3687 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags) 3688 { 3689 struct net *net = dev_net(dev); 3690 3691 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); 3692 } 3693 3694 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 3695 unsigned int change, u32 event, 3696 gfp_t flags, int *new_nsid, int new_ifindex) 3697 { 3698 struct sk_buff *skb; 3699 3700 if (dev->reg_state != NETREG_REGISTERED) 3701 return; 3702 3703 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 3704 new_ifindex); 3705 if (skb) 3706 rtmsg_ifinfo_send(skb, dev, flags); 3707 } 3708 3709 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 3710 gfp_t flags) 3711 { 3712 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3713 NULL, 0); 3714 } 3715 3716 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 3717 gfp_t flags, int *new_nsid, int new_ifindex) 3718 { 3719 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3720 new_nsid, new_ifindex); 3721 } 3722 3723 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 3724 struct net_device *dev, 3725 u8 *addr, u16 vid, u32 pid, u32 seq, 3726 int type, unsigned int flags, 3727 int nlflags, u16 ndm_state) 3728 { 3729 struct nlmsghdr *nlh; 3730 struct ndmsg *ndm; 3731 3732 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 3733 if (!nlh) 3734 return -EMSGSIZE; 3735 3736 ndm = nlmsg_data(nlh); 3737 ndm->ndm_family = AF_BRIDGE; 3738 ndm->ndm_pad1 = 0; 3739 ndm->ndm_pad2 = 0; 3740 ndm->ndm_flags = flags; 3741 ndm->ndm_type = 0; 3742 ndm->ndm_ifindex = dev->ifindex; 3743 ndm->ndm_state = ndm_state; 3744 3745 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) 3746 goto nla_put_failure; 3747 if (vid) 3748 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 3749 goto nla_put_failure; 3750 3751 nlmsg_end(skb, nlh); 3752 return 0; 3753 3754 nla_put_failure: 3755 nlmsg_cancel(skb, nlh); 3756 return -EMSGSIZE; 3757 } 3758 3759 static inline size_t rtnl_fdb_nlmsg_size(void) 3760 { 3761 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 3762 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 3763 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 3764 0; 3765 } 3766 3767 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 3768 u16 ndm_state) 3769 { 3770 struct net *net = dev_net(dev); 3771 struct sk_buff *skb; 3772 int err = -ENOBUFS; 3773 3774 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); 3775 if (!skb) 3776 goto errout; 3777 3778 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 3779 0, 0, type, NTF_SELF, 0, ndm_state); 3780 if (err < 0) { 3781 kfree_skb(skb); 3782 goto errout; 3783 } 3784 3785 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3786 return; 3787 errout: 3788 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3789 } 3790 3791 /* 3792 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 3793 */ 3794 int ndo_dflt_fdb_add(struct ndmsg *ndm, 3795 struct nlattr *tb[], 3796 struct net_device *dev, 3797 const unsigned char *addr, u16 vid, 3798 u16 flags) 3799 { 3800 int err = -EINVAL; 3801 3802 /* If aging addresses are supported device will need to 3803 * implement its own handler for this. 3804 */ 3805 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 3806 pr_info("%s: FDB only supports static addresses\n", dev->name); 3807 return err; 3808 } 3809 3810 if (vid) { 3811 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 3812 return err; 3813 } 3814 3815 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 3816 err = dev_uc_add_excl(dev, addr); 3817 else if (is_multicast_ether_addr(addr)) 3818 err = dev_mc_add_excl(dev, addr); 3819 3820 /* Only return duplicate errors if NLM_F_EXCL is set */ 3821 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 3822 err = 0; 3823 3824 return err; 3825 } 3826 EXPORT_SYMBOL(ndo_dflt_fdb_add); 3827 3828 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 3829 struct netlink_ext_ack *extack) 3830 { 3831 u16 vid = 0; 3832 3833 if (vlan_attr) { 3834 if (nla_len(vlan_attr) != sizeof(u16)) { 3835 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 3836 return -EINVAL; 3837 } 3838 3839 vid = nla_get_u16(vlan_attr); 3840 3841 if (!vid || vid >= VLAN_VID_MASK) { 3842 NL_SET_ERR_MSG(extack, "invalid vlan id"); 3843 return -EINVAL; 3844 } 3845 } 3846 *p_vid = vid; 3847 return 0; 3848 } 3849 3850 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 3851 struct netlink_ext_ack *extack) 3852 { 3853 struct net *net = sock_net(skb->sk); 3854 struct ndmsg *ndm; 3855 struct nlattr *tb[NDA_MAX+1]; 3856 struct net_device *dev; 3857 u8 *addr; 3858 u16 vid; 3859 int err; 3860 3861 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 3862 extack); 3863 if (err < 0) 3864 return err; 3865 3866 ndm = nlmsg_data(nlh); 3867 if (ndm->ndm_ifindex == 0) { 3868 NL_SET_ERR_MSG(extack, "invalid ifindex"); 3869 return -EINVAL; 3870 } 3871 3872 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 3873 if (dev == NULL) { 3874 NL_SET_ERR_MSG(extack, "unknown ifindex"); 3875 return -ENODEV; 3876 } 3877 3878 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 3879 NL_SET_ERR_MSG(extack, "invalid address"); 3880 return -EINVAL; 3881 } 3882 3883 if (dev->type != ARPHRD_ETHER) { 3884 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 3885 return -EINVAL; 3886 } 3887 3888 addr = nla_data(tb[NDA_LLADDR]); 3889 3890 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 3891 if (err) 3892 return err; 3893 3894 err = -EOPNOTSUPP; 3895 3896 /* Support fdb on master device the net/bridge default case */ 3897 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 3898 (dev->priv_flags & IFF_BRIDGE_PORT)) { 3899 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3900 const struct net_device_ops *ops = br_dev->netdev_ops; 3901 3902 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 3903 nlh->nlmsg_flags, extack); 3904 if (err) 3905 goto out; 3906 else 3907 ndm->ndm_flags &= ~NTF_MASTER; 3908 } 3909 3910 /* Embedded bridge, macvlan, and any other device support */ 3911 if ((ndm->ndm_flags & NTF_SELF)) { 3912 if (dev->netdev_ops->ndo_fdb_add) 3913 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 3914 vid, 3915 nlh->nlmsg_flags, 3916 extack); 3917 else 3918 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 3919 nlh->nlmsg_flags); 3920 3921 if (!err) { 3922 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 3923 ndm->ndm_state); 3924 ndm->ndm_flags &= ~NTF_SELF; 3925 } 3926 } 3927 out: 3928 return err; 3929 } 3930 3931 /* 3932 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 3933 */ 3934 int ndo_dflt_fdb_del(struct ndmsg *ndm, 3935 struct nlattr *tb[], 3936 struct net_device *dev, 3937 const unsigned char *addr, u16 vid) 3938 { 3939 int err = -EINVAL; 3940 3941 /* If aging addresses are supported device will need to 3942 * implement its own handler for this. 3943 */ 3944 if (!(ndm->ndm_state & NUD_PERMANENT)) { 3945 pr_info("%s: FDB only supports static addresses\n", dev->name); 3946 return err; 3947 } 3948 3949 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 3950 err = dev_uc_del(dev, addr); 3951 else if (is_multicast_ether_addr(addr)) 3952 err = dev_mc_del(dev, addr); 3953 3954 return err; 3955 } 3956 EXPORT_SYMBOL(ndo_dflt_fdb_del); 3957 3958 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 3959 struct netlink_ext_ack *extack) 3960 { 3961 struct net *net = sock_net(skb->sk); 3962 struct ndmsg *ndm; 3963 struct nlattr *tb[NDA_MAX+1]; 3964 struct net_device *dev; 3965 int err = -EINVAL; 3966 __u8 *addr; 3967 u16 vid; 3968 3969 if (!netlink_capable(skb, CAP_NET_ADMIN)) 3970 return -EPERM; 3971 3972 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 3973 extack); 3974 if (err < 0) 3975 return err; 3976 3977 ndm = nlmsg_data(nlh); 3978 if (ndm->ndm_ifindex == 0) { 3979 NL_SET_ERR_MSG(extack, "invalid ifindex"); 3980 return -EINVAL; 3981 } 3982 3983 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 3984 if (dev == NULL) { 3985 NL_SET_ERR_MSG(extack, "unknown ifindex"); 3986 return -ENODEV; 3987 } 3988 3989 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 3990 NL_SET_ERR_MSG(extack, "invalid address"); 3991 return -EINVAL; 3992 } 3993 3994 if (dev->type != ARPHRD_ETHER) { 3995 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 3996 return -EINVAL; 3997 } 3998 3999 addr = nla_data(tb[NDA_LLADDR]); 4000 4001 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4002 if (err) 4003 return err; 4004 4005 err = -EOPNOTSUPP; 4006 4007 /* Support fdb on master device the net/bridge default case */ 4008 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4009 (dev->priv_flags & IFF_BRIDGE_PORT)) { 4010 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4011 const struct net_device_ops *ops = br_dev->netdev_ops; 4012 4013 if (ops->ndo_fdb_del) 4014 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); 4015 4016 if (err) 4017 goto out; 4018 else 4019 ndm->ndm_flags &= ~NTF_MASTER; 4020 } 4021 4022 /* Embedded bridge, macvlan, and any other device support */ 4023 if (ndm->ndm_flags & NTF_SELF) { 4024 if (dev->netdev_ops->ndo_fdb_del) 4025 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr, 4026 vid); 4027 else 4028 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4029 4030 if (!err) { 4031 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4032 ndm->ndm_state); 4033 ndm->ndm_flags &= ~NTF_SELF; 4034 } 4035 } 4036 out: 4037 return err; 4038 } 4039 4040 static int nlmsg_populate_fdb(struct sk_buff *skb, 4041 struct netlink_callback *cb, 4042 struct net_device *dev, 4043 int *idx, 4044 struct netdev_hw_addr_list *list) 4045 { 4046 struct netdev_hw_addr *ha; 4047 int err; 4048 u32 portid, seq; 4049 4050 portid = NETLINK_CB(cb->skb).portid; 4051 seq = cb->nlh->nlmsg_seq; 4052 4053 list_for_each_entry(ha, &list->list, list) { 4054 if (*idx < cb->args[2]) 4055 goto skip; 4056 4057 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4058 portid, seq, 4059 RTM_NEWNEIGH, NTF_SELF, 4060 NLM_F_MULTI, NUD_PERMANENT); 4061 if (err < 0) 4062 return err; 4063 skip: 4064 *idx += 1; 4065 } 4066 return 0; 4067 } 4068 4069 /** 4070 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4071 * @skb: socket buffer to store message in 4072 * @cb: netlink callback 4073 * @dev: netdevice 4074 * @filter_dev: ignored 4075 * @idx: the number of FDB table entries dumped is added to *@idx 4076 * 4077 * Default netdevice operation to dump the existing unicast address list. 4078 * Returns number of addresses from list put in skb. 4079 */ 4080 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4081 struct netlink_callback *cb, 4082 struct net_device *dev, 4083 struct net_device *filter_dev, 4084 int *idx) 4085 { 4086 int err; 4087 4088 if (dev->type != ARPHRD_ETHER) 4089 return -EINVAL; 4090 4091 netif_addr_lock_bh(dev); 4092 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4093 if (err) 4094 goto out; 4095 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4096 out: 4097 netif_addr_unlock_bh(dev); 4098 return err; 4099 } 4100 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4101 4102 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4103 int *br_idx, int *brport_idx, 4104 struct netlink_ext_ack *extack) 4105 { 4106 struct nlattr *tb[NDA_MAX + 1]; 4107 struct ndmsg *ndm; 4108 int err, i; 4109 4110 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4111 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4112 return -EINVAL; 4113 } 4114 4115 ndm = nlmsg_data(nlh); 4116 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4117 ndm->ndm_flags || ndm->ndm_type) { 4118 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4119 return -EINVAL; 4120 } 4121 4122 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4123 NDA_MAX, NULL, extack); 4124 if (err < 0) 4125 return err; 4126 4127 *brport_idx = ndm->ndm_ifindex; 4128 for (i = 0; i <= NDA_MAX; ++i) { 4129 if (!tb[i]) 4130 continue; 4131 4132 switch (i) { 4133 case NDA_IFINDEX: 4134 if (nla_len(tb[i]) != sizeof(u32)) { 4135 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4136 return -EINVAL; 4137 } 4138 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4139 break; 4140 case NDA_MASTER: 4141 if (nla_len(tb[i]) != sizeof(u32)) { 4142 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4143 return -EINVAL; 4144 } 4145 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4146 break; 4147 default: 4148 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4149 return -EINVAL; 4150 } 4151 } 4152 4153 return 0; 4154 } 4155 4156 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4157 int *br_idx, int *brport_idx, 4158 struct netlink_ext_ack *extack) 4159 { 4160 struct nlattr *tb[IFLA_MAX+1]; 4161 int err; 4162 4163 /* A hack to preserve kernel<->userspace interface. 4164 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4165 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4166 * So, check for ndmsg with an optional u32 attribute (not used here). 4167 * Fortunately these sizes don't conflict with the size of ifinfomsg 4168 * with an optional attribute. 4169 */ 4170 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4171 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4172 nla_attr_size(sizeof(u32)))) { 4173 struct ifinfomsg *ifm; 4174 4175 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4176 tb, IFLA_MAX, ifla_policy, 4177 extack); 4178 if (err < 0) { 4179 return -EINVAL; 4180 } else if (err == 0) { 4181 if (tb[IFLA_MASTER]) 4182 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4183 } 4184 4185 ifm = nlmsg_data(nlh); 4186 *brport_idx = ifm->ifi_index; 4187 } 4188 return 0; 4189 } 4190 4191 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4192 { 4193 struct net_device *dev; 4194 struct net_device *br_dev = NULL; 4195 const struct net_device_ops *ops = NULL; 4196 const struct net_device_ops *cops = NULL; 4197 struct net *net = sock_net(skb->sk); 4198 struct hlist_head *head; 4199 int brport_idx = 0; 4200 int br_idx = 0; 4201 int h, s_h; 4202 int idx = 0, s_idx; 4203 int err = 0; 4204 int fidx = 0; 4205 4206 if (cb->strict_check) 4207 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4208 cb->extack); 4209 else 4210 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4211 cb->extack); 4212 if (err < 0) 4213 return err; 4214 4215 if (br_idx) { 4216 br_dev = __dev_get_by_index(net, br_idx); 4217 if (!br_dev) 4218 return -ENODEV; 4219 4220 ops = br_dev->netdev_ops; 4221 } 4222 4223 s_h = cb->args[0]; 4224 s_idx = cb->args[1]; 4225 4226 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4227 idx = 0; 4228 head = &net->dev_index_head[h]; 4229 hlist_for_each_entry(dev, head, index_hlist) { 4230 4231 if (brport_idx && (dev->ifindex != brport_idx)) 4232 continue; 4233 4234 if (!br_idx) { /* user did not specify a specific bridge */ 4235 if (dev->priv_flags & IFF_BRIDGE_PORT) { 4236 br_dev = netdev_master_upper_dev_get(dev); 4237 cops = br_dev->netdev_ops; 4238 } 4239 } else { 4240 if (dev != br_dev && 4241 !(dev->priv_flags & IFF_BRIDGE_PORT)) 4242 continue; 4243 4244 if (br_dev != netdev_master_upper_dev_get(dev) && 4245 !(dev->priv_flags & IFF_EBRIDGE)) 4246 continue; 4247 cops = ops; 4248 } 4249 4250 if (idx < s_idx) 4251 goto cont; 4252 4253 if (dev->priv_flags & IFF_BRIDGE_PORT) { 4254 if (cops && cops->ndo_fdb_dump) { 4255 err = cops->ndo_fdb_dump(skb, cb, 4256 br_dev, dev, 4257 &fidx); 4258 if (err == -EMSGSIZE) 4259 goto out; 4260 } 4261 } 4262 4263 if (dev->netdev_ops->ndo_fdb_dump) 4264 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4265 dev, NULL, 4266 &fidx); 4267 else 4268 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4269 &fidx); 4270 if (err == -EMSGSIZE) 4271 goto out; 4272 4273 cops = NULL; 4274 4275 /* reset fdb offset to 0 for rest of the interfaces */ 4276 cb->args[2] = 0; 4277 fidx = 0; 4278 cont: 4279 idx++; 4280 } 4281 } 4282 4283 out: 4284 cb->args[0] = h; 4285 cb->args[1] = idx; 4286 cb->args[2] = fidx; 4287 4288 return skb->len; 4289 } 4290 4291 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4292 struct nlattr **tb, u8 *ndm_flags, 4293 int *br_idx, int *brport_idx, u8 **addr, 4294 u16 *vid, struct netlink_ext_ack *extack) 4295 { 4296 struct ndmsg *ndm; 4297 int err, i; 4298 4299 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4300 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4301 return -EINVAL; 4302 } 4303 4304 ndm = nlmsg_data(nlh); 4305 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4306 ndm->ndm_type) { 4307 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4308 return -EINVAL; 4309 } 4310 4311 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4312 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4313 return -EINVAL; 4314 } 4315 4316 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4317 NDA_MAX, nda_policy, extack); 4318 if (err < 0) 4319 return err; 4320 4321 *ndm_flags = ndm->ndm_flags; 4322 *brport_idx = ndm->ndm_ifindex; 4323 for (i = 0; i <= NDA_MAX; ++i) { 4324 if (!tb[i]) 4325 continue; 4326 4327 switch (i) { 4328 case NDA_MASTER: 4329 *br_idx = nla_get_u32(tb[i]); 4330 break; 4331 case NDA_LLADDR: 4332 if (nla_len(tb[i]) != ETH_ALEN) { 4333 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4334 return -EINVAL; 4335 } 4336 *addr = nla_data(tb[i]); 4337 break; 4338 case NDA_VLAN: 4339 err = fdb_vid_parse(tb[i], vid, extack); 4340 if (err) 4341 return err; 4342 break; 4343 case NDA_VNI: 4344 break; 4345 default: 4346 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4347 return -EINVAL; 4348 } 4349 } 4350 4351 return 0; 4352 } 4353 4354 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4355 struct netlink_ext_ack *extack) 4356 { 4357 struct net_device *dev = NULL, *br_dev = NULL; 4358 const struct net_device_ops *ops = NULL; 4359 struct net *net = sock_net(in_skb->sk); 4360 struct nlattr *tb[NDA_MAX + 1]; 4361 struct sk_buff *skb; 4362 int brport_idx = 0; 4363 u8 ndm_flags = 0; 4364 int br_idx = 0; 4365 u8 *addr = NULL; 4366 u16 vid = 0; 4367 int err; 4368 4369 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4370 &brport_idx, &addr, &vid, extack); 4371 if (err < 0) 4372 return err; 4373 4374 if (!addr) { 4375 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4376 return -EINVAL; 4377 } 4378 4379 if (brport_idx) { 4380 dev = __dev_get_by_index(net, brport_idx); 4381 if (!dev) { 4382 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4383 return -ENODEV; 4384 } 4385 } 4386 4387 if (br_idx) { 4388 if (dev) { 4389 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4390 return -EINVAL; 4391 } 4392 4393 br_dev = __dev_get_by_index(net, br_idx); 4394 if (!br_dev) { 4395 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4396 return -EINVAL; 4397 } 4398 ops = br_dev->netdev_ops; 4399 } 4400 4401 if (dev) { 4402 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4403 if (!(dev->priv_flags & IFF_BRIDGE_PORT)) { 4404 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4405 return -EINVAL; 4406 } 4407 br_dev = netdev_master_upper_dev_get(dev); 4408 if (!br_dev) { 4409 NL_SET_ERR_MSG(extack, "Master of device not found"); 4410 return -EINVAL; 4411 } 4412 ops = br_dev->netdev_ops; 4413 } else { 4414 if (!(ndm_flags & NTF_SELF)) { 4415 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4416 return -EINVAL; 4417 } 4418 ops = dev->netdev_ops; 4419 } 4420 } 4421 4422 if (!br_dev && !dev) { 4423 NL_SET_ERR_MSG(extack, "No device specified"); 4424 return -ENODEV; 4425 } 4426 4427 if (!ops || !ops->ndo_fdb_get) { 4428 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4429 return -EOPNOTSUPP; 4430 } 4431 4432 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4433 if (!skb) 4434 return -ENOBUFS; 4435 4436 if (br_dev) 4437 dev = br_dev; 4438 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4439 NETLINK_CB(in_skb).portid, 4440 nlh->nlmsg_seq, extack); 4441 if (err) 4442 goto out; 4443 4444 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4445 out: 4446 kfree_skb(skb); 4447 return err; 4448 } 4449 4450 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4451 unsigned int attrnum, unsigned int flag) 4452 { 4453 if (mask & flag) 4454 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4455 return 0; 4456 } 4457 4458 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4459 struct net_device *dev, u16 mode, 4460 u32 flags, u32 mask, int nlflags, 4461 u32 filter_mask, 4462 int (*vlan_fill)(struct sk_buff *skb, 4463 struct net_device *dev, 4464 u32 filter_mask)) 4465 { 4466 struct nlmsghdr *nlh; 4467 struct ifinfomsg *ifm; 4468 struct nlattr *br_afspec; 4469 struct nlattr *protinfo; 4470 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4471 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4472 int err = 0; 4473 4474 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4475 if (nlh == NULL) 4476 return -EMSGSIZE; 4477 4478 ifm = nlmsg_data(nlh); 4479 ifm->ifi_family = AF_BRIDGE; 4480 ifm->__ifi_pad = 0; 4481 ifm->ifi_type = dev->type; 4482 ifm->ifi_index = dev->ifindex; 4483 ifm->ifi_flags = dev_get_flags(dev); 4484 ifm->ifi_change = 0; 4485 4486 4487 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4488 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4489 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4490 (br_dev && 4491 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4492 (dev->addr_len && 4493 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4494 (dev->ifindex != dev_get_iflink(dev) && 4495 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4496 goto nla_put_failure; 4497 4498 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4499 if (!br_afspec) 4500 goto nla_put_failure; 4501 4502 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4503 nla_nest_cancel(skb, br_afspec); 4504 goto nla_put_failure; 4505 } 4506 4507 if (mode != BRIDGE_MODE_UNDEF) { 4508 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4509 nla_nest_cancel(skb, br_afspec); 4510 goto nla_put_failure; 4511 } 4512 } 4513 if (vlan_fill) { 4514 err = vlan_fill(skb, dev, filter_mask); 4515 if (err) { 4516 nla_nest_cancel(skb, br_afspec); 4517 goto nla_put_failure; 4518 } 4519 } 4520 nla_nest_end(skb, br_afspec); 4521 4522 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4523 if (!protinfo) 4524 goto nla_put_failure; 4525 4526 if (brport_nla_put_flag(skb, flags, mask, 4527 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4528 brport_nla_put_flag(skb, flags, mask, 4529 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4530 brport_nla_put_flag(skb, flags, mask, 4531 IFLA_BRPORT_FAST_LEAVE, 4532 BR_MULTICAST_FAST_LEAVE) || 4533 brport_nla_put_flag(skb, flags, mask, 4534 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4535 brport_nla_put_flag(skb, flags, mask, 4536 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4537 brport_nla_put_flag(skb, flags, mask, 4538 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4539 brport_nla_put_flag(skb, flags, mask, 4540 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4541 brport_nla_put_flag(skb, flags, mask, 4542 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) { 4543 nla_nest_cancel(skb, protinfo); 4544 goto nla_put_failure; 4545 } 4546 4547 nla_nest_end(skb, protinfo); 4548 4549 nlmsg_end(skb, nlh); 4550 return 0; 4551 nla_put_failure: 4552 nlmsg_cancel(skb, nlh); 4553 return err ? err : -EMSGSIZE; 4554 } 4555 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4556 4557 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4558 bool strict_check, u32 *filter_mask, 4559 struct netlink_ext_ack *extack) 4560 { 4561 struct nlattr *tb[IFLA_MAX+1]; 4562 int err, i; 4563 4564 if (strict_check) { 4565 struct ifinfomsg *ifm; 4566 4567 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4568 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4569 return -EINVAL; 4570 } 4571 4572 ifm = nlmsg_data(nlh); 4573 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4574 ifm->ifi_change || ifm->ifi_index) { 4575 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4576 return -EINVAL; 4577 } 4578 4579 err = nlmsg_parse_deprecated_strict(nlh, 4580 sizeof(struct ifinfomsg), 4581 tb, IFLA_MAX, ifla_policy, 4582 extack); 4583 } else { 4584 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4585 tb, IFLA_MAX, ifla_policy, 4586 extack); 4587 } 4588 if (err < 0) 4589 return err; 4590 4591 /* new attributes should only be added with strict checking */ 4592 for (i = 0; i <= IFLA_MAX; ++i) { 4593 if (!tb[i]) 4594 continue; 4595 4596 switch (i) { 4597 case IFLA_EXT_MASK: 4598 *filter_mask = nla_get_u32(tb[i]); 4599 break; 4600 default: 4601 if (strict_check) { 4602 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 4603 return -EINVAL; 4604 } 4605 } 4606 } 4607 4608 return 0; 4609 } 4610 4611 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 4612 { 4613 const struct nlmsghdr *nlh = cb->nlh; 4614 struct net *net = sock_net(skb->sk); 4615 struct net_device *dev; 4616 int idx = 0; 4617 u32 portid = NETLINK_CB(cb->skb).portid; 4618 u32 seq = nlh->nlmsg_seq; 4619 u32 filter_mask = 0; 4620 int err; 4621 4622 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 4623 cb->extack); 4624 if (err < 0 && cb->strict_check) 4625 return err; 4626 4627 rcu_read_lock(); 4628 for_each_netdev_rcu(net, dev) { 4629 const struct net_device_ops *ops = dev->netdev_ops; 4630 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4631 4632 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 4633 if (idx >= cb->args[0]) { 4634 err = br_dev->netdev_ops->ndo_bridge_getlink( 4635 skb, portid, seq, dev, 4636 filter_mask, NLM_F_MULTI); 4637 if (err < 0 && err != -EOPNOTSUPP) { 4638 if (likely(skb->len)) 4639 break; 4640 4641 goto out_err; 4642 } 4643 } 4644 idx++; 4645 } 4646 4647 if (ops->ndo_bridge_getlink) { 4648 if (idx >= cb->args[0]) { 4649 err = ops->ndo_bridge_getlink(skb, portid, 4650 seq, dev, 4651 filter_mask, 4652 NLM_F_MULTI); 4653 if (err < 0 && err != -EOPNOTSUPP) { 4654 if (likely(skb->len)) 4655 break; 4656 4657 goto out_err; 4658 } 4659 } 4660 idx++; 4661 } 4662 } 4663 err = skb->len; 4664 out_err: 4665 rcu_read_unlock(); 4666 cb->args[0] = idx; 4667 4668 return err; 4669 } 4670 4671 static inline size_t bridge_nlmsg_size(void) 4672 { 4673 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 4674 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 4675 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 4676 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 4677 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 4678 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 4679 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 4680 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 4681 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 4682 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 4683 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 4684 } 4685 4686 static int rtnl_bridge_notify(struct net_device *dev) 4687 { 4688 struct net *net = dev_net(dev); 4689 struct sk_buff *skb; 4690 int err = -EOPNOTSUPP; 4691 4692 if (!dev->netdev_ops->ndo_bridge_getlink) 4693 return 0; 4694 4695 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 4696 if (!skb) { 4697 err = -ENOMEM; 4698 goto errout; 4699 } 4700 4701 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 4702 if (err < 0) 4703 goto errout; 4704 4705 if (!skb->len) 4706 goto errout; 4707 4708 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 4709 return 0; 4710 errout: 4711 WARN_ON(err == -EMSGSIZE); 4712 kfree_skb(skb); 4713 if (err) 4714 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4715 return err; 4716 } 4717 4718 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 4719 struct netlink_ext_ack *extack) 4720 { 4721 struct net *net = sock_net(skb->sk); 4722 struct ifinfomsg *ifm; 4723 struct net_device *dev; 4724 struct nlattr *br_spec, *attr = NULL; 4725 int rem, err = -EOPNOTSUPP; 4726 u16 flags = 0; 4727 bool have_flags = false; 4728 4729 if (nlmsg_len(nlh) < sizeof(*ifm)) 4730 return -EINVAL; 4731 4732 ifm = nlmsg_data(nlh); 4733 if (ifm->ifi_family != AF_BRIDGE) 4734 return -EPFNOSUPPORT; 4735 4736 dev = __dev_get_by_index(net, ifm->ifi_index); 4737 if (!dev) { 4738 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4739 return -ENODEV; 4740 } 4741 4742 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4743 if (br_spec) { 4744 nla_for_each_nested(attr, br_spec, rem) { 4745 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 4746 if (nla_len(attr) < sizeof(flags)) 4747 return -EINVAL; 4748 4749 have_flags = true; 4750 flags = nla_get_u16(attr); 4751 break; 4752 } 4753 } 4754 } 4755 4756 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 4757 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4758 4759 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 4760 err = -EOPNOTSUPP; 4761 goto out; 4762 } 4763 4764 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 4765 extack); 4766 if (err) 4767 goto out; 4768 4769 flags &= ~BRIDGE_FLAGS_MASTER; 4770 } 4771 4772 if ((flags & BRIDGE_FLAGS_SELF)) { 4773 if (!dev->netdev_ops->ndo_bridge_setlink) 4774 err = -EOPNOTSUPP; 4775 else 4776 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 4777 flags, 4778 extack); 4779 if (!err) { 4780 flags &= ~BRIDGE_FLAGS_SELF; 4781 4782 /* Generate event to notify upper layer of bridge 4783 * change 4784 */ 4785 err = rtnl_bridge_notify(dev); 4786 } 4787 } 4788 4789 if (have_flags) 4790 memcpy(nla_data(attr), &flags, sizeof(flags)); 4791 out: 4792 return err; 4793 } 4794 4795 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 4796 struct netlink_ext_ack *extack) 4797 { 4798 struct net *net = sock_net(skb->sk); 4799 struct ifinfomsg *ifm; 4800 struct net_device *dev; 4801 struct nlattr *br_spec, *attr = NULL; 4802 int rem, err = -EOPNOTSUPP; 4803 u16 flags = 0; 4804 bool have_flags = false; 4805 4806 if (nlmsg_len(nlh) < sizeof(*ifm)) 4807 return -EINVAL; 4808 4809 ifm = nlmsg_data(nlh); 4810 if (ifm->ifi_family != AF_BRIDGE) 4811 return -EPFNOSUPPORT; 4812 4813 dev = __dev_get_by_index(net, ifm->ifi_index); 4814 if (!dev) { 4815 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4816 return -ENODEV; 4817 } 4818 4819 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4820 if (br_spec) { 4821 nla_for_each_nested(attr, br_spec, rem) { 4822 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 4823 if (nla_len(attr) < sizeof(flags)) 4824 return -EINVAL; 4825 4826 have_flags = true; 4827 flags = nla_get_u16(attr); 4828 break; 4829 } 4830 } 4831 } 4832 4833 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 4834 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4835 4836 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 4837 err = -EOPNOTSUPP; 4838 goto out; 4839 } 4840 4841 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 4842 if (err) 4843 goto out; 4844 4845 flags &= ~BRIDGE_FLAGS_MASTER; 4846 } 4847 4848 if ((flags & BRIDGE_FLAGS_SELF)) { 4849 if (!dev->netdev_ops->ndo_bridge_dellink) 4850 err = -EOPNOTSUPP; 4851 else 4852 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 4853 flags); 4854 4855 if (!err) { 4856 flags &= ~BRIDGE_FLAGS_SELF; 4857 4858 /* Generate event to notify upper layer of bridge 4859 * change 4860 */ 4861 err = rtnl_bridge_notify(dev); 4862 } 4863 } 4864 4865 if (have_flags) 4866 memcpy(nla_data(attr), &flags, sizeof(flags)); 4867 out: 4868 return err; 4869 } 4870 4871 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 4872 { 4873 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 4874 (!idxattr || idxattr == attrid); 4875 } 4876 4877 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1) 4878 static int rtnl_get_offload_stats_attr_size(int attr_id) 4879 { 4880 switch (attr_id) { 4881 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 4882 return sizeof(struct rtnl_link_stats64); 4883 } 4884 4885 return 0; 4886 } 4887 4888 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev, 4889 int *prividx) 4890 { 4891 struct nlattr *attr = NULL; 4892 int attr_id, size; 4893 void *attr_data; 4894 int err; 4895 4896 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats && 4897 dev->netdev_ops->ndo_get_offload_stats)) 4898 return -ENODATA; 4899 4900 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; 4901 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { 4902 if (attr_id < *prividx) 4903 continue; 4904 4905 size = rtnl_get_offload_stats_attr_size(attr_id); 4906 if (!size) 4907 continue; 4908 4909 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) 4910 continue; 4911 4912 attr = nla_reserve_64bit(skb, attr_id, size, 4913 IFLA_OFFLOAD_XSTATS_UNSPEC); 4914 if (!attr) 4915 goto nla_put_failure; 4916 4917 attr_data = nla_data(attr); 4918 memset(attr_data, 0, size); 4919 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, 4920 attr_data); 4921 if (err) 4922 goto get_offload_stats_failure; 4923 } 4924 4925 if (!attr) 4926 return -ENODATA; 4927 4928 *prividx = 0; 4929 return 0; 4930 4931 nla_put_failure: 4932 err = -EMSGSIZE; 4933 get_offload_stats_failure: 4934 *prividx = attr_id; 4935 return err; 4936 } 4937 4938 static int rtnl_get_offload_stats_size(const struct net_device *dev) 4939 { 4940 int nla_size = 0; 4941 int attr_id; 4942 int size; 4943 4944 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats && 4945 dev->netdev_ops->ndo_get_offload_stats)) 4946 return 0; 4947 4948 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; 4949 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { 4950 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) 4951 continue; 4952 size = rtnl_get_offload_stats_attr_size(attr_id); 4953 nla_size += nla_total_size_64bit(size); 4954 } 4955 4956 if (nla_size != 0) 4957 nla_size += nla_total_size(0); 4958 4959 return nla_size; 4960 } 4961 4962 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 4963 int type, u32 pid, u32 seq, u32 change, 4964 unsigned int flags, unsigned int filter_mask, 4965 int *idxattr, int *prividx) 4966 { 4967 struct if_stats_msg *ifsm; 4968 struct nlmsghdr *nlh; 4969 struct nlattr *attr; 4970 int s_prividx = *prividx; 4971 int err; 4972 4973 ASSERT_RTNL(); 4974 4975 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 4976 if (!nlh) 4977 return -EMSGSIZE; 4978 4979 ifsm = nlmsg_data(nlh); 4980 ifsm->family = PF_UNSPEC; 4981 ifsm->pad1 = 0; 4982 ifsm->pad2 = 0; 4983 ifsm->ifindex = dev->ifindex; 4984 ifsm->filter_mask = filter_mask; 4985 4986 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 4987 struct rtnl_link_stats64 *sp; 4988 4989 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 4990 sizeof(struct rtnl_link_stats64), 4991 IFLA_STATS_UNSPEC); 4992 if (!attr) 4993 goto nla_put_failure; 4994 4995 sp = nla_data(attr); 4996 dev_get_stats(dev, sp); 4997 } 4998 4999 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5000 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5001 5002 if (ops && ops->fill_linkxstats) { 5003 *idxattr = IFLA_STATS_LINK_XSTATS; 5004 attr = nla_nest_start_noflag(skb, 5005 IFLA_STATS_LINK_XSTATS); 5006 if (!attr) 5007 goto nla_put_failure; 5008 5009 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5010 nla_nest_end(skb, attr); 5011 if (err) 5012 goto nla_put_failure; 5013 *idxattr = 0; 5014 } 5015 } 5016 5017 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5018 *idxattr)) { 5019 const struct rtnl_link_ops *ops = NULL; 5020 const struct net_device *master; 5021 5022 master = netdev_master_upper_dev_get(dev); 5023 if (master) 5024 ops = master->rtnl_link_ops; 5025 if (ops && ops->fill_linkxstats) { 5026 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5027 attr = nla_nest_start_noflag(skb, 5028 IFLA_STATS_LINK_XSTATS_SLAVE); 5029 if (!attr) 5030 goto nla_put_failure; 5031 5032 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5033 nla_nest_end(skb, attr); 5034 if (err) 5035 goto nla_put_failure; 5036 *idxattr = 0; 5037 } 5038 } 5039 5040 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5041 *idxattr)) { 5042 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5043 attr = nla_nest_start_noflag(skb, 5044 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5045 if (!attr) 5046 goto nla_put_failure; 5047 5048 err = rtnl_get_offload_stats(skb, dev, prividx); 5049 if (err == -ENODATA) 5050 nla_nest_cancel(skb, attr); 5051 else 5052 nla_nest_end(skb, attr); 5053 5054 if (err && err != -ENODATA) 5055 goto nla_put_failure; 5056 *idxattr = 0; 5057 } 5058 5059 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5060 struct rtnl_af_ops *af_ops; 5061 5062 *idxattr = IFLA_STATS_AF_SPEC; 5063 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5064 if (!attr) 5065 goto nla_put_failure; 5066 5067 rcu_read_lock(); 5068 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5069 if (af_ops->fill_stats_af) { 5070 struct nlattr *af; 5071 int err; 5072 5073 af = nla_nest_start_noflag(skb, 5074 af_ops->family); 5075 if (!af) { 5076 rcu_read_unlock(); 5077 goto nla_put_failure; 5078 } 5079 err = af_ops->fill_stats_af(skb, dev); 5080 5081 if (err == -ENODATA) { 5082 nla_nest_cancel(skb, af); 5083 } else if (err < 0) { 5084 rcu_read_unlock(); 5085 goto nla_put_failure; 5086 } 5087 5088 nla_nest_end(skb, af); 5089 } 5090 } 5091 rcu_read_unlock(); 5092 5093 nla_nest_end(skb, attr); 5094 5095 *idxattr = 0; 5096 } 5097 5098 nlmsg_end(skb, nlh); 5099 5100 return 0; 5101 5102 nla_put_failure: 5103 /* not a multi message or no progress mean a real error */ 5104 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5105 nlmsg_cancel(skb, nlh); 5106 else 5107 nlmsg_end(skb, nlh); 5108 5109 return -EMSGSIZE; 5110 } 5111 5112 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5113 u32 filter_mask) 5114 { 5115 size_t size = 0; 5116 5117 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5118 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5119 5120 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5121 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5122 int attr = IFLA_STATS_LINK_XSTATS; 5123 5124 if (ops && ops->get_linkxstats_size) { 5125 size += nla_total_size(ops->get_linkxstats_size(dev, 5126 attr)); 5127 /* for IFLA_STATS_LINK_XSTATS */ 5128 size += nla_total_size(0); 5129 } 5130 } 5131 5132 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5133 struct net_device *_dev = (struct net_device *)dev; 5134 const struct rtnl_link_ops *ops = NULL; 5135 const struct net_device *master; 5136 5137 /* netdev_master_upper_dev_get can't take const */ 5138 master = netdev_master_upper_dev_get(_dev); 5139 if (master) 5140 ops = master->rtnl_link_ops; 5141 if (ops && ops->get_linkxstats_size) { 5142 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5143 5144 size += nla_total_size(ops->get_linkxstats_size(dev, 5145 attr)); 5146 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5147 size += nla_total_size(0); 5148 } 5149 } 5150 5151 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) 5152 size += rtnl_get_offload_stats_size(dev); 5153 5154 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5155 struct rtnl_af_ops *af_ops; 5156 5157 /* for IFLA_STATS_AF_SPEC */ 5158 size += nla_total_size(0); 5159 5160 rcu_read_lock(); 5161 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5162 if (af_ops->get_stats_af_size) { 5163 size += nla_total_size( 5164 af_ops->get_stats_af_size(dev)); 5165 5166 /* for AF_* */ 5167 size += nla_total_size(0); 5168 } 5169 } 5170 rcu_read_unlock(); 5171 } 5172 5173 return size; 5174 } 5175 5176 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5177 bool is_dump, struct netlink_ext_ack *extack) 5178 { 5179 struct if_stats_msg *ifsm; 5180 5181 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5182 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5183 return -EINVAL; 5184 } 5185 5186 if (!strict_check) 5187 return 0; 5188 5189 ifsm = nlmsg_data(nlh); 5190 5191 /* only requests using strict checks can pass data to influence 5192 * the dump. The legacy exception is filter_mask. 5193 */ 5194 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5195 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5196 return -EINVAL; 5197 } 5198 if (nlmsg_attrlen(nlh, sizeof(*ifsm))) { 5199 NL_SET_ERR_MSG(extack, "Invalid attributes after stats header"); 5200 return -EINVAL; 5201 } 5202 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5203 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5204 return -EINVAL; 5205 } 5206 5207 return 0; 5208 } 5209 5210 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5211 struct netlink_ext_ack *extack) 5212 { 5213 struct net *net = sock_net(skb->sk); 5214 struct net_device *dev = NULL; 5215 int idxattr = 0, prividx = 0; 5216 struct if_stats_msg *ifsm; 5217 struct sk_buff *nskb; 5218 u32 filter_mask; 5219 int err; 5220 5221 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5222 false, extack); 5223 if (err) 5224 return err; 5225 5226 ifsm = nlmsg_data(nlh); 5227 if (ifsm->ifindex > 0) 5228 dev = __dev_get_by_index(net, ifsm->ifindex); 5229 else 5230 return -EINVAL; 5231 5232 if (!dev) 5233 return -ENODEV; 5234 5235 filter_mask = ifsm->filter_mask; 5236 if (!filter_mask) 5237 return -EINVAL; 5238 5239 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL); 5240 if (!nskb) 5241 return -ENOBUFS; 5242 5243 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5244 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5245 0, filter_mask, &idxattr, &prividx); 5246 if (err < 0) { 5247 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5248 WARN_ON(err == -EMSGSIZE); 5249 kfree_skb(nskb); 5250 } else { 5251 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5252 } 5253 5254 return err; 5255 } 5256 5257 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5258 { 5259 struct netlink_ext_ack *extack = cb->extack; 5260 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5261 struct net *net = sock_net(skb->sk); 5262 unsigned int flags = NLM_F_MULTI; 5263 struct if_stats_msg *ifsm; 5264 struct hlist_head *head; 5265 struct net_device *dev; 5266 u32 filter_mask = 0; 5267 int idx = 0; 5268 5269 s_h = cb->args[0]; 5270 s_idx = cb->args[1]; 5271 s_idxattr = cb->args[2]; 5272 s_prividx = cb->args[3]; 5273 5274 cb->seq = net->dev_base_seq; 5275 5276 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 5277 if (err) 5278 return err; 5279 5280 ifsm = nlmsg_data(cb->nlh); 5281 filter_mask = ifsm->filter_mask; 5282 if (!filter_mask) { 5283 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 5284 return -EINVAL; 5285 } 5286 5287 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 5288 idx = 0; 5289 head = &net->dev_index_head[h]; 5290 hlist_for_each_entry(dev, head, index_hlist) { 5291 if (idx < s_idx) 5292 goto cont; 5293 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 5294 NETLINK_CB(cb->skb).portid, 5295 cb->nlh->nlmsg_seq, 0, 5296 flags, filter_mask, 5297 &s_idxattr, &s_prividx); 5298 /* If we ran out of room on the first message, 5299 * we're in trouble 5300 */ 5301 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 5302 5303 if (err < 0) 5304 goto out; 5305 s_prividx = 0; 5306 s_idxattr = 0; 5307 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 5308 cont: 5309 idx++; 5310 } 5311 } 5312 out: 5313 cb->args[3] = s_prividx; 5314 cb->args[2] = s_idxattr; 5315 cb->args[1] = idx; 5316 cb->args[0] = h; 5317 5318 return skb->len; 5319 } 5320 5321 /* Process one rtnetlink message. */ 5322 5323 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 5324 struct netlink_ext_ack *extack) 5325 { 5326 struct net *net = sock_net(skb->sk); 5327 struct rtnl_link *link; 5328 struct module *owner; 5329 int err = -EOPNOTSUPP; 5330 rtnl_doit_func doit; 5331 unsigned int flags; 5332 int kind; 5333 int family; 5334 int type; 5335 5336 type = nlh->nlmsg_type; 5337 if (type > RTM_MAX) 5338 return -EOPNOTSUPP; 5339 5340 type -= RTM_BASE; 5341 5342 /* All the messages must have at least 1 byte length */ 5343 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 5344 return 0; 5345 5346 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 5347 kind = type&3; 5348 5349 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN)) 5350 return -EPERM; 5351 5352 rcu_read_lock(); 5353 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 5354 struct sock *rtnl; 5355 rtnl_dumpit_func dumpit; 5356 u16 min_dump_alloc = 0; 5357 5358 link = rtnl_get_link(family, type); 5359 if (!link || !link->dumpit) { 5360 family = PF_UNSPEC; 5361 link = rtnl_get_link(family, type); 5362 if (!link || !link->dumpit) 5363 goto err_unlock; 5364 } 5365 owner = link->owner; 5366 dumpit = link->dumpit; 5367 5368 if (type == RTM_GETLINK - RTM_BASE) 5369 min_dump_alloc = rtnl_calcit(skb, nlh); 5370 5371 err = 0; 5372 /* need to do this before rcu_read_unlock() */ 5373 if (!try_module_get(owner)) 5374 err = -EPROTONOSUPPORT; 5375 5376 rcu_read_unlock(); 5377 5378 rtnl = net->rtnl; 5379 if (err == 0) { 5380 struct netlink_dump_control c = { 5381 .dump = dumpit, 5382 .min_dump_alloc = min_dump_alloc, 5383 .module = owner, 5384 }; 5385 err = netlink_dump_start(rtnl, skb, nlh, &c); 5386 /* netlink_dump_start() will keep a reference on 5387 * module if dump is still in progress. 5388 */ 5389 module_put(owner); 5390 } 5391 return err; 5392 } 5393 5394 link = rtnl_get_link(family, type); 5395 if (!link || !link->doit) { 5396 family = PF_UNSPEC; 5397 link = rtnl_get_link(PF_UNSPEC, type); 5398 if (!link || !link->doit) 5399 goto out_unlock; 5400 } 5401 5402 owner = link->owner; 5403 if (!try_module_get(owner)) { 5404 err = -EPROTONOSUPPORT; 5405 goto out_unlock; 5406 } 5407 5408 flags = link->flags; 5409 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 5410 doit = link->doit; 5411 rcu_read_unlock(); 5412 if (doit) 5413 err = doit(skb, nlh, extack); 5414 module_put(owner); 5415 return err; 5416 } 5417 rcu_read_unlock(); 5418 5419 rtnl_lock(); 5420 link = rtnl_get_link(family, type); 5421 if (link && link->doit) 5422 err = link->doit(skb, nlh, extack); 5423 rtnl_unlock(); 5424 5425 module_put(owner); 5426 5427 return err; 5428 5429 out_unlock: 5430 rcu_read_unlock(); 5431 return err; 5432 5433 err_unlock: 5434 rcu_read_unlock(); 5435 return -EOPNOTSUPP; 5436 } 5437 5438 static void rtnetlink_rcv(struct sk_buff *skb) 5439 { 5440 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 5441 } 5442 5443 static int rtnetlink_bind(struct net *net, int group) 5444 { 5445 switch (group) { 5446 case RTNLGRP_IPV4_MROUTE_R: 5447 case RTNLGRP_IPV6_MROUTE_R: 5448 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 5449 return -EPERM; 5450 break; 5451 } 5452 return 0; 5453 } 5454 5455 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 5456 { 5457 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5458 5459 switch (event) { 5460 case NETDEV_REBOOT: 5461 case NETDEV_CHANGEMTU: 5462 case NETDEV_CHANGEADDR: 5463 case NETDEV_CHANGENAME: 5464 case NETDEV_FEAT_CHANGE: 5465 case NETDEV_BONDING_FAILOVER: 5466 case NETDEV_POST_TYPE_CHANGE: 5467 case NETDEV_NOTIFY_PEERS: 5468 case NETDEV_CHANGEUPPER: 5469 case NETDEV_RESEND_IGMP: 5470 case NETDEV_CHANGEINFODATA: 5471 case NETDEV_CHANGELOWERSTATE: 5472 case NETDEV_CHANGE_TX_QUEUE_LEN: 5473 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 5474 GFP_KERNEL, NULL, 0); 5475 break; 5476 default: 5477 break; 5478 } 5479 return NOTIFY_DONE; 5480 } 5481 5482 static struct notifier_block rtnetlink_dev_notifier = { 5483 .notifier_call = rtnetlink_event, 5484 }; 5485 5486 5487 static int __net_init rtnetlink_net_init(struct net *net) 5488 { 5489 struct sock *sk; 5490 struct netlink_kernel_cfg cfg = { 5491 .groups = RTNLGRP_MAX, 5492 .input = rtnetlink_rcv, 5493 .cb_mutex = &rtnl_mutex, 5494 .flags = NL_CFG_F_NONROOT_RECV, 5495 .bind = rtnetlink_bind, 5496 }; 5497 5498 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 5499 if (!sk) 5500 return -ENOMEM; 5501 net->rtnl = sk; 5502 return 0; 5503 } 5504 5505 static void __net_exit rtnetlink_net_exit(struct net *net) 5506 { 5507 netlink_kernel_release(net->rtnl); 5508 net->rtnl = NULL; 5509 } 5510 5511 static struct pernet_operations rtnetlink_net_ops = { 5512 .init = rtnetlink_net_init, 5513 .exit = rtnetlink_net_exit, 5514 }; 5515 5516 void __init rtnetlink_init(void) 5517 { 5518 if (register_pernet_subsys(&rtnetlink_net_ops)) 5519 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 5520 5521 register_netdevice_notifier(&rtnetlink_dev_notifier); 5522 5523 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 5524 rtnl_dump_ifinfo, 0); 5525 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 5526 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 5527 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 5528 5529 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 5530 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 5531 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 5532 5533 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 5534 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 5535 5536 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 5537 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0); 5538 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 5539 5540 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 5541 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 5542 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 5543 5544 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 5545 0); 5546 } 5547