1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Routing netlink socket interface: protocol independent part. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 * 15 * Fixes: 16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong. 17 */ 18 19 #include <linux/errno.h> 20 #include <linux/module.h> 21 #include <linux/types.h> 22 #include <linux/socket.h> 23 #include <linux/kernel.h> 24 #include <linux/timer.h> 25 #include <linux/string.h> 26 #include <linux/sockios.h> 27 #include <linux/net.h> 28 #include <linux/fcntl.h> 29 #include <linux/mm.h> 30 #include <linux/slab.h> 31 #include <linux/interrupt.h> 32 #include <linux/capability.h> 33 #include <linux/skbuff.h> 34 #include <linux/init.h> 35 #include <linux/security.h> 36 #include <linux/mutex.h> 37 #include <linux/if_addr.h> 38 39 #include <asm/uaccess.h> 40 #include <asm/system.h> 41 #include <asm/string.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/sock.h> 51 #include <net/pkt_sched.h> 52 #include <net/fib_rules.h> 53 #include <net/rtnetlink.h> 54 #include <net/net_namespace.h> 55 56 struct rtnl_link 57 { 58 rtnl_doit_func doit; 59 rtnl_dumpit_func dumpit; 60 }; 61 62 static DEFINE_MUTEX(rtnl_mutex); 63 64 void rtnl_lock(void) 65 { 66 mutex_lock(&rtnl_mutex); 67 } 68 69 void __rtnl_unlock(void) 70 { 71 mutex_unlock(&rtnl_mutex); 72 } 73 74 void rtnl_unlock(void) 75 { 76 /* This fellow will unlock it for us. */ 77 netdev_run_todo(); 78 } 79 80 int rtnl_trylock(void) 81 { 82 return mutex_trylock(&rtnl_mutex); 83 } 84 85 int rtnl_is_locked(void) 86 { 87 return mutex_is_locked(&rtnl_mutex); 88 } 89 90 static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 91 92 static inline int rtm_msgindex(int msgtype) 93 { 94 int msgindex = msgtype - RTM_BASE; 95 96 /* 97 * msgindex < 0 implies someone tried to register a netlink 98 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 99 * the message type has not been added to linux/rtnetlink.h 100 */ 101 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 102 103 return msgindex; 104 } 105 106 static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) 107 { 108 struct rtnl_link *tab; 109 110 tab = rtnl_msg_handlers[protocol]; 111 if (tab == NULL || tab[msgindex].doit == NULL) 112 tab = rtnl_msg_handlers[PF_UNSPEC]; 113 114 return tab ? tab[msgindex].doit : NULL; 115 } 116 117 static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) 118 { 119 struct rtnl_link *tab; 120 121 tab = rtnl_msg_handlers[protocol]; 122 if (tab == NULL || tab[msgindex].dumpit == NULL) 123 tab = rtnl_msg_handlers[PF_UNSPEC]; 124 125 return tab ? tab[msgindex].dumpit : NULL; 126 } 127 128 /** 129 * __rtnl_register - Register a rtnetlink message type 130 * @protocol: Protocol family or PF_UNSPEC 131 * @msgtype: rtnetlink message type 132 * @doit: Function pointer called for each request message 133 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 134 * 135 * Registers the specified function pointers (at least one of them has 136 * to be non-NULL) to be called whenever a request message for the 137 * specified protocol family and message type is received. 138 * 139 * The special protocol family PF_UNSPEC may be used to define fallback 140 * function pointers for the case when no entry for the specific protocol 141 * family exists. 142 * 143 * Returns 0 on success or a negative error code. 144 */ 145 int __rtnl_register(int protocol, int msgtype, 146 rtnl_doit_func doit, rtnl_dumpit_func dumpit) 147 { 148 struct rtnl_link *tab; 149 int msgindex; 150 151 BUG_ON(protocol < 0 || protocol >= NPROTO); 152 msgindex = rtm_msgindex(msgtype); 153 154 tab = rtnl_msg_handlers[protocol]; 155 if (tab == NULL) { 156 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL); 157 if (tab == NULL) 158 return -ENOBUFS; 159 160 rtnl_msg_handlers[protocol] = tab; 161 } 162 163 if (doit) 164 tab[msgindex].doit = doit; 165 166 if (dumpit) 167 tab[msgindex].dumpit = dumpit; 168 169 return 0; 170 } 171 172 EXPORT_SYMBOL_GPL(__rtnl_register); 173 174 /** 175 * rtnl_register - Register a rtnetlink message type 176 * 177 * Identical to __rtnl_register() but panics on failure. This is useful 178 * as failure of this function is very unlikely, it can only happen due 179 * to lack of memory when allocating the chain to store all message 180 * handlers for a protocol. Meant for use in init functions where lack 181 * of memory implies no sense in continueing. 182 */ 183 void rtnl_register(int protocol, int msgtype, 184 rtnl_doit_func doit, rtnl_dumpit_func dumpit) 185 { 186 if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0) 187 panic("Unable to register rtnetlink message handler, " 188 "protocol = %d, message type = %d\n", 189 protocol, msgtype); 190 } 191 192 EXPORT_SYMBOL_GPL(rtnl_register); 193 194 /** 195 * rtnl_unregister - Unregister a rtnetlink message type 196 * @protocol: Protocol family or PF_UNSPEC 197 * @msgtype: rtnetlink message type 198 * 199 * Returns 0 on success or a negative error code. 200 */ 201 int rtnl_unregister(int protocol, int msgtype) 202 { 203 int msgindex; 204 205 BUG_ON(protocol < 0 || protocol >= NPROTO); 206 msgindex = rtm_msgindex(msgtype); 207 208 if (rtnl_msg_handlers[protocol] == NULL) 209 return -ENOENT; 210 211 rtnl_msg_handlers[protocol][msgindex].doit = NULL; 212 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; 213 214 return 0; 215 } 216 217 EXPORT_SYMBOL_GPL(rtnl_unregister); 218 219 /** 220 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 221 * @protocol : Protocol family or PF_UNSPEC 222 * 223 * Identical to calling rtnl_unregster() for all registered message types 224 * of a certain protocol family. 225 */ 226 void rtnl_unregister_all(int protocol) 227 { 228 BUG_ON(protocol < 0 || protocol >= NPROTO); 229 230 kfree(rtnl_msg_handlers[protocol]); 231 rtnl_msg_handlers[protocol] = NULL; 232 } 233 234 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 235 236 static LIST_HEAD(link_ops); 237 238 /** 239 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 240 * @ops: struct rtnl_link_ops * to register 241 * 242 * The caller must hold the rtnl_mutex. This function should be used 243 * by drivers that create devices during module initialization. It 244 * must be called before registering the devices. 245 * 246 * Returns 0 on success or a negative error code. 247 */ 248 int __rtnl_link_register(struct rtnl_link_ops *ops) 249 { 250 if (!ops->dellink) 251 ops->dellink = unregister_netdevice; 252 253 list_add_tail(&ops->list, &link_ops); 254 return 0; 255 } 256 257 EXPORT_SYMBOL_GPL(__rtnl_link_register); 258 259 /** 260 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 261 * @ops: struct rtnl_link_ops * to register 262 * 263 * Returns 0 on success or a negative error code. 264 */ 265 int rtnl_link_register(struct rtnl_link_ops *ops) 266 { 267 int err; 268 269 rtnl_lock(); 270 err = __rtnl_link_register(ops); 271 rtnl_unlock(); 272 return err; 273 } 274 275 EXPORT_SYMBOL_GPL(rtnl_link_register); 276 277 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 278 { 279 struct net_device *dev; 280 restart: 281 for_each_netdev(net, dev) { 282 if (dev->rtnl_link_ops == ops) { 283 ops->dellink(dev); 284 goto restart; 285 } 286 } 287 } 288 289 void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 290 { 291 rtnl_lock(); 292 __rtnl_kill_links(net, ops); 293 rtnl_unlock(); 294 } 295 EXPORT_SYMBOL_GPL(rtnl_kill_links); 296 297 /** 298 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 299 * @ops: struct rtnl_link_ops * to unregister 300 * 301 * The caller must hold the rtnl_mutex. 302 */ 303 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 304 { 305 struct net *net; 306 307 for_each_net(net) { 308 __rtnl_kill_links(net, ops); 309 } 310 list_del(&ops->list); 311 } 312 313 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 314 315 /** 316 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 317 * @ops: struct rtnl_link_ops * to unregister 318 */ 319 void rtnl_link_unregister(struct rtnl_link_ops *ops) 320 { 321 rtnl_lock(); 322 __rtnl_link_unregister(ops); 323 rtnl_unlock(); 324 } 325 326 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 327 328 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 329 { 330 const struct rtnl_link_ops *ops; 331 332 list_for_each_entry(ops, &link_ops, list) { 333 if (!strcmp(ops->kind, kind)) 334 return ops; 335 } 336 return NULL; 337 } 338 339 static size_t rtnl_link_get_size(const struct net_device *dev) 340 { 341 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 342 size_t size; 343 344 if (!ops) 345 return 0; 346 347 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 348 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 349 350 if (ops->get_size) 351 /* IFLA_INFO_DATA + nested data */ 352 size += nlmsg_total_size(sizeof(struct nlattr)) + 353 ops->get_size(dev); 354 355 if (ops->get_xstats_size) 356 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ 357 358 return size; 359 } 360 361 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 362 { 363 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 364 struct nlattr *linkinfo, *data; 365 int err = -EMSGSIZE; 366 367 linkinfo = nla_nest_start(skb, IFLA_LINKINFO); 368 if (linkinfo == NULL) 369 goto out; 370 371 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 372 goto err_cancel_link; 373 if (ops->fill_xstats) { 374 err = ops->fill_xstats(skb, dev); 375 if (err < 0) 376 goto err_cancel_link; 377 } 378 if (ops->fill_info) { 379 data = nla_nest_start(skb, IFLA_INFO_DATA); 380 if (data == NULL) 381 goto err_cancel_link; 382 err = ops->fill_info(skb, dev); 383 if (err < 0) 384 goto err_cancel_data; 385 nla_nest_end(skb, data); 386 } 387 388 nla_nest_end(skb, linkinfo); 389 return 0; 390 391 err_cancel_data: 392 nla_nest_cancel(skb, data); 393 err_cancel_link: 394 nla_nest_cancel(skb, linkinfo); 395 out: 396 return err; 397 } 398 399 static const int rtm_min[RTM_NR_FAMILIES] = 400 { 401 [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)), 402 [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)), 403 [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)), 404 [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)), 405 [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)), 406 [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), 407 [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), 408 [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), 409 [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), 410 [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), 411 }; 412 413 static const int rta_max[RTM_NR_FAMILIES] = 414 { 415 [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX, 416 [RTM_FAM(RTM_NEWADDR)] = IFA_MAX, 417 [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX, 418 [RTM_FAM(RTM_NEWRULE)] = FRA_MAX, 419 [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX, 420 [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX, 421 [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX, 422 [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX, 423 }; 424 425 void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data) 426 { 427 struct rtattr *rta; 428 int size = RTA_LENGTH(attrlen); 429 430 rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); 431 rta->rta_type = attrtype; 432 rta->rta_len = size; 433 memcpy(RTA_DATA(rta), data, attrlen); 434 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); 435 } 436 437 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) 438 { 439 struct sock *rtnl = net->rtnl; 440 int err = 0; 441 442 NETLINK_CB(skb).dst_group = group; 443 if (echo) 444 atomic_inc(&skb->users); 445 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 446 if (echo) 447 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 448 return err; 449 } 450 451 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 452 { 453 struct sock *rtnl = net->rtnl; 454 455 return nlmsg_unicast(rtnl, skb, pid); 456 } 457 458 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 459 struct nlmsghdr *nlh, gfp_t flags) 460 { 461 struct sock *rtnl = net->rtnl; 462 int report = 0; 463 464 if (nlh) 465 report = nlmsg_report(nlh); 466 467 nlmsg_notify(rtnl, skb, pid, group, report, flags); 468 } 469 470 void rtnl_set_sk_err(struct net *net, u32 group, int error) 471 { 472 struct sock *rtnl = net->rtnl; 473 474 netlink_set_err(rtnl, 0, group, error); 475 } 476 477 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 478 { 479 struct nlattr *mx; 480 int i, valid = 0; 481 482 mx = nla_nest_start(skb, RTA_METRICS); 483 if (mx == NULL) 484 return -ENOBUFS; 485 486 for (i = 0; i < RTAX_MAX; i++) { 487 if (metrics[i]) { 488 valid++; 489 NLA_PUT_U32(skb, i+1, metrics[i]); 490 } 491 } 492 493 if (!valid) { 494 nla_nest_cancel(skb, mx); 495 return 0; 496 } 497 498 return nla_nest_end(skb, mx); 499 500 nla_put_failure: 501 nla_nest_cancel(skb, mx); 502 return -EMSGSIZE; 503 } 504 505 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 506 u32 ts, u32 tsage, long expires, u32 error) 507 { 508 struct rta_cacheinfo ci = { 509 .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse), 510 .rta_used = dst->__use, 511 .rta_clntref = atomic_read(&(dst->__refcnt)), 512 .rta_error = error, 513 .rta_id = id, 514 .rta_ts = ts, 515 .rta_tsage = tsage, 516 }; 517 518 if (expires) 519 ci.rta_expires = jiffies_to_clock_t(expires); 520 521 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 522 } 523 524 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 525 526 static void set_operstate(struct net_device *dev, unsigned char transition) 527 { 528 unsigned char operstate = dev->operstate; 529 530 switch(transition) { 531 case IF_OPER_UP: 532 if ((operstate == IF_OPER_DORMANT || 533 operstate == IF_OPER_UNKNOWN) && 534 !netif_dormant(dev)) 535 operstate = IF_OPER_UP; 536 break; 537 538 case IF_OPER_DORMANT: 539 if (operstate == IF_OPER_UP || 540 operstate == IF_OPER_UNKNOWN) 541 operstate = IF_OPER_DORMANT; 542 break; 543 } 544 545 if (dev->operstate != operstate) { 546 write_lock_bh(&dev_base_lock); 547 dev->operstate = operstate; 548 write_unlock_bh(&dev_base_lock); 549 netdev_state_change(dev); 550 } 551 } 552 553 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 554 const struct net_device_stats *b) 555 { 556 a->rx_packets = b->rx_packets; 557 a->tx_packets = b->tx_packets; 558 a->rx_bytes = b->rx_bytes; 559 a->tx_bytes = b->tx_bytes; 560 a->rx_errors = b->rx_errors; 561 a->tx_errors = b->tx_errors; 562 a->rx_dropped = b->rx_dropped; 563 a->tx_dropped = b->tx_dropped; 564 565 a->multicast = b->multicast; 566 a->collisions = b->collisions; 567 568 a->rx_length_errors = b->rx_length_errors; 569 a->rx_over_errors = b->rx_over_errors; 570 a->rx_crc_errors = b->rx_crc_errors; 571 a->rx_frame_errors = b->rx_frame_errors; 572 a->rx_fifo_errors = b->rx_fifo_errors; 573 a->rx_missed_errors = b->rx_missed_errors; 574 575 a->tx_aborted_errors = b->tx_aborted_errors; 576 a->tx_carrier_errors = b->tx_carrier_errors; 577 a->tx_fifo_errors = b->tx_fifo_errors; 578 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 579 a->tx_window_errors = b->tx_window_errors; 580 581 a->rx_compressed = b->rx_compressed; 582 a->tx_compressed = b->tx_compressed; 583 }; 584 585 static inline size_t if_nlmsg_size(const struct net_device *dev) 586 { 587 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 588 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 589 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 590 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 591 + nla_total_size(sizeof(struct rtnl_link_ifmap)) 592 + nla_total_size(sizeof(struct rtnl_link_stats)) 593 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 594 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 595 + nla_total_size(4) /* IFLA_TXQLEN */ 596 + nla_total_size(4) /* IFLA_WEIGHT */ 597 + nla_total_size(4) /* IFLA_MTU */ 598 + nla_total_size(4) /* IFLA_LINK */ 599 + nla_total_size(4) /* IFLA_MASTER */ 600 + nla_total_size(1) /* IFLA_OPERSTATE */ 601 + nla_total_size(1) /* IFLA_LINKMODE */ 602 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 603 } 604 605 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 606 int type, u32 pid, u32 seq, u32 change, 607 unsigned int flags) 608 { 609 struct ifinfomsg *ifm; 610 struct nlmsghdr *nlh; 611 const struct net_device_stats *stats; 612 struct nlattr *attr; 613 614 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 615 if (nlh == NULL) 616 return -EMSGSIZE; 617 618 ifm = nlmsg_data(nlh); 619 ifm->ifi_family = AF_UNSPEC; 620 ifm->__ifi_pad = 0; 621 ifm->ifi_type = dev->type; 622 ifm->ifi_index = dev->ifindex; 623 ifm->ifi_flags = dev_get_flags(dev); 624 ifm->ifi_change = change; 625 626 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 627 NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); 628 NLA_PUT_U8(skb, IFLA_OPERSTATE, 629 netif_running(dev) ? dev->operstate : IF_OPER_DOWN); 630 NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); 631 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); 632 633 if (dev->ifindex != dev->iflink) 634 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); 635 636 if (dev->master) 637 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 638 639 if (dev->qdisc) 640 NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id); 641 642 if (dev->ifalias) 643 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); 644 645 if (1) { 646 struct rtnl_link_ifmap map = { 647 .mem_start = dev->mem_start, 648 .mem_end = dev->mem_end, 649 .base_addr = dev->base_addr, 650 .irq = dev->irq, 651 .dma = dev->dma, 652 .port = dev->if_port, 653 }; 654 NLA_PUT(skb, IFLA_MAP, sizeof(map), &map); 655 } 656 657 if (dev->addr_len) { 658 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); 659 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); 660 } 661 662 attr = nla_reserve(skb, IFLA_STATS, 663 sizeof(struct rtnl_link_stats)); 664 if (attr == NULL) 665 goto nla_put_failure; 666 667 stats = dev_get_stats(dev); 668 copy_rtnl_link_stats(nla_data(attr), stats); 669 670 if (dev->rtnl_link_ops) { 671 if (rtnl_link_fill(skb, dev) < 0) 672 goto nla_put_failure; 673 } 674 675 return nlmsg_end(skb, nlh); 676 677 nla_put_failure: 678 nlmsg_cancel(skb, nlh); 679 return -EMSGSIZE; 680 } 681 682 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 683 { 684 struct net *net = sock_net(skb->sk); 685 int h, s_h; 686 int idx = 0, s_idx; 687 struct net_device *dev; 688 struct hlist_head *head; 689 struct hlist_node *node; 690 691 s_h = cb->args[0]; 692 s_idx = cb->args[1]; 693 694 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 695 idx = 0; 696 head = &net->dev_index_head[h]; 697 hlist_for_each_entry(dev, node, head, index_hlist) { 698 if (idx < s_idx) 699 goto cont; 700 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 701 NETLINK_CB(cb->skb).pid, 702 cb->nlh->nlmsg_seq, 0, 703 NLM_F_MULTI) <= 0) 704 goto out; 705 cont: 706 idx++; 707 } 708 } 709 out: 710 cb->args[1] = idx; 711 cb->args[0] = h; 712 713 return skb->len; 714 } 715 716 const struct nla_policy ifla_policy[IFLA_MAX+1] = { 717 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 718 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 719 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 720 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 721 [IFLA_MTU] = { .type = NLA_U32 }, 722 [IFLA_LINK] = { .type = NLA_U32 }, 723 [IFLA_TXQLEN] = { .type = NLA_U32 }, 724 [IFLA_WEIGHT] = { .type = NLA_U32 }, 725 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 726 [IFLA_LINKMODE] = { .type = NLA_U8 }, 727 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 728 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 729 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 730 }; 731 732 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 733 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 734 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 735 }; 736 737 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) 738 { 739 if (dev) { 740 if (tb[IFLA_ADDRESS] && 741 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 742 return -EINVAL; 743 744 if (tb[IFLA_BROADCAST] && 745 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 746 return -EINVAL; 747 } 748 749 return 0; 750 } 751 752 static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, 753 struct nlattr **tb, char *ifname, int modified) 754 { 755 const struct net_device_ops *ops = dev->netdev_ops; 756 int send_addr_notify = 0; 757 int err; 758 759 if (tb[IFLA_NET_NS_PID]) { 760 struct net *net; 761 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 762 if (IS_ERR(net)) { 763 err = PTR_ERR(net); 764 goto errout; 765 } 766 err = dev_change_net_namespace(dev, net, ifname); 767 put_net(net); 768 if (err) 769 goto errout; 770 modified = 1; 771 } 772 773 if (tb[IFLA_MAP]) { 774 struct rtnl_link_ifmap *u_map; 775 struct ifmap k_map; 776 777 if (!ops->ndo_set_config) { 778 err = -EOPNOTSUPP; 779 goto errout; 780 } 781 782 if (!netif_device_present(dev)) { 783 err = -ENODEV; 784 goto errout; 785 } 786 787 u_map = nla_data(tb[IFLA_MAP]); 788 k_map.mem_start = (unsigned long) u_map->mem_start; 789 k_map.mem_end = (unsigned long) u_map->mem_end; 790 k_map.base_addr = (unsigned short) u_map->base_addr; 791 k_map.irq = (unsigned char) u_map->irq; 792 k_map.dma = (unsigned char) u_map->dma; 793 k_map.port = (unsigned char) u_map->port; 794 795 err = ops->ndo_set_config(dev, &k_map); 796 if (err < 0) 797 goto errout; 798 799 modified = 1; 800 } 801 802 if (tb[IFLA_ADDRESS]) { 803 struct sockaddr *sa; 804 int len; 805 806 if (!ops->ndo_set_mac_address) { 807 err = -EOPNOTSUPP; 808 goto errout; 809 } 810 811 if (!netif_device_present(dev)) { 812 err = -ENODEV; 813 goto errout; 814 } 815 816 len = sizeof(sa_family_t) + dev->addr_len; 817 sa = kmalloc(len, GFP_KERNEL); 818 if (!sa) { 819 err = -ENOMEM; 820 goto errout; 821 } 822 sa->sa_family = dev->type; 823 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 824 dev->addr_len); 825 err = ops->ndo_set_mac_address(dev, sa); 826 kfree(sa); 827 if (err) 828 goto errout; 829 send_addr_notify = 1; 830 modified = 1; 831 } 832 833 if (tb[IFLA_MTU]) { 834 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 835 if (err < 0) 836 goto errout; 837 modified = 1; 838 } 839 840 /* 841 * Interface selected by interface index but interface 842 * name provided implies that a name change has been 843 * requested. 844 */ 845 if (ifm->ifi_index > 0 && ifname[0]) { 846 err = dev_change_name(dev, ifname); 847 if (err < 0) 848 goto errout; 849 modified = 1; 850 } 851 852 if (tb[IFLA_IFALIAS]) { 853 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 854 nla_len(tb[IFLA_IFALIAS])); 855 if (err < 0) 856 goto errout; 857 modified = 1; 858 } 859 860 if (tb[IFLA_BROADCAST]) { 861 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 862 send_addr_notify = 1; 863 } 864 865 if (ifm->ifi_flags || ifm->ifi_change) { 866 unsigned int flags = ifm->ifi_flags; 867 868 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 869 if (ifm->ifi_change) 870 flags = (flags & ifm->ifi_change) | 871 (dev->flags & ~ifm->ifi_change); 872 err = dev_change_flags(dev, flags); 873 if (err < 0) 874 goto errout; 875 } 876 877 if (tb[IFLA_TXQLEN]) 878 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 879 880 if (tb[IFLA_OPERSTATE]) 881 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 882 883 if (tb[IFLA_LINKMODE]) { 884 write_lock_bh(&dev_base_lock); 885 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 886 write_unlock_bh(&dev_base_lock); 887 } 888 889 err = 0; 890 891 errout: 892 if (err < 0 && modified && net_ratelimit()) 893 printk(KERN_WARNING "A link change request failed with " 894 "some changes comitted already. Interface %s may " 895 "have been left with an inconsistent configuration, " 896 "please check.\n", dev->name); 897 898 if (send_addr_notify) 899 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 900 return err; 901 } 902 903 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 904 { 905 struct net *net = sock_net(skb->sk); 906 struct ifinfomsg *ifm; 907 struct net_device *dev; 908 int err; 909 struct nlattr *tb[IFLA_MAX+1]; 910 char ifname[IFNAMSIZ]; 911 912 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 913 if (err < 0) 914 goto errout; 915 916 if (tb[IFLA_IFNAME]) 917 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 918 else 919 ifname[0] = '\0'; 920 921 err = -EINVAL; 922 ifm = nlmsg_data(nlh); 923 if (ifm->ifi_index > 0) 924 dev = __dev_get_by_index(net, ifm->ifi_index); 925 else if (tb[IFLA_IFNAME]) 926 dev = __dev_get_by_name(net, ifname); 927 else 928 goto errout; 929 930 if (dev == NULL) { 931 err = -ENODEV; 932 goto errout; 933 } 934 935 if ((err = validate_linkmsg(dev, tb)) < 0) 936 goto errout; 937 938 err = do_setlink(dev, ifm, tb, ifname, 0); 939 errout: 940 return err; 941 } 942 943 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 944 { 945 struct net *net = sock_net(skb->sk); 946 const struct rtnl_link_ops *ops; 947 struct net_device *dev; 948 struct ifinfomsg *ifm; 949 char ifname[IFNAMSIZ]; 950 struct nlattr *tb[IFLA_MAX+1]; 951 int err; 952 953 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 954 if (err < 0) 955 return err; 956 957 if (tb[IFLA_IFNAME]) 958 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 959 960 ifm = nlmsg_data(nlh); 961 if (ifm->ifi_index > 0) 962 dev = __dev_get_by_index(net, ifm->ifi_index); 963 else if (tb[IFLA_IFNAME]) 964 dev = __dev_get_by_name(net, ifname); 965 else 966 return -EINVAL; 967 968 if (!dev) 969 return -ENODEV; 970 971 ops = dev->rtnl_link_ops; 972 if (!ops) 973 return -EOPNOTSUPP; 974 975 ops->dellink(dev); 976 return 0; 977 } 978 979 struct net_device *rtnl_create_link(struct net *net, char *ifname, 980 const struct rtnl_link_ops *ops, struct nlattr *tb[]) 981 { 982 int err; 983 struct net_device *dev; 984 unsigned int num_queues = 1; 985 unsigned int real_num_queues = 1; 986 987 if (ops->get_tx_queues) { 988 err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues); 989 if (err) 990 goto err; 991 } 992 err = -ENOMEM; 993 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); 994 if (!dev) 995 goto err; 996 997 dev->real_num_tx_queues = real_num_queues; 998 if (strchr(dev->name, '%')) { 999 err = dev_alloc_name(dev, dev->name); 1000 if (err < 0) 1001 goto err_free; 1002 } 1003 1004 dev_net_set(dev, net); 1005 dev->rtnl_link_ops = ops; 1006 1007 if (tb[IFLA_MTU]) 1008 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1009 if (tb[IFLA_ADDRESS]) 1010 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), 1011 nla_len(tb[IFLA_ADDRESS])); 1012 if (tb[IFLA_BROADCAST]) 1013 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 1014 nla_len(tb[IFLA_BROADCAST])); 1015 if (tb[IFLA_TXQLEN]) 1016 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 1017 if (tb[IFLA_OPERSTATE]) 1018 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 1019 if (tb[IFLA_LINKMODE]) 1020 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 1021 1022 return dev; 1023 1024 err_free: 1025 free_netdev(dev); 1026 err: 1027 return ERR_PTR(err); 1028 } 1029 1030 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1031 { 1032 struct net *net = sock_net(skb->sk); 1033 const struct rtnl_link_ops *ops; 1034 struct net_device *dev; 1035 struct ifinfomsg *ifm; 1036 char kind[MODULE_NAME_LEN]; 1037 char ifname[IFNAMSIZ]; 1038 struct nlattr *tb[IFLA_MAX+1]; 1039 struct nlattr *linkinfo[IFLA_INFO_MAX+1]; 1040 int err; 1041 1042 #ifdef CONFIG_MODULES 1043 replay: 1044 #endif 1045 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1046 if (err < 0) 1047 return err; 1048 1049 if (tb[IFLA_IFNAME]) 1050 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 1051 else 1052 ifname[0] = '\0'; 1053 1054 ifm = nlmsg_data(nlh); 1055 if (ifm->ifi_index > 0) 1056 dev = __dev_get_by_index(net, ifm->ifi_index); 1057 else if (ifname[0]) 1058 dev = __dev_get_by_name(net, ifname); 1059 else 1060 dev = NULL; 1061 1062 if ((err = validate_linkmsg(dev, tb)) < 0) 1063 return err; 1064 1065 if (tb[IFLA_LINKINFO]) { 1066 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX, 1067 tb[IFLA_LINKINFO], ifla_info_policy); 1068 if (err < 0) 1069 return err; 1070 } else 1071 memset(linkinfo, 0, sizeof(linkinfo)); 1072 1073 if (linkinfo[IFLA_INFO_KIND]) { 1074 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 1075 ops = rtnl_link_ops_get(kind); 1076 } else { 1077 kind[0] = '\0'; 1078 ops = NULL; 1079 } 1080 1081 if (1) { 1082 struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL; 1083 1084 if (ops) { 1085 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 1086 err = nla_parse_nested(attr, ops->maxtype, 1087 linkinfo[IFLA_INFO_DATA], 1088 ops->policy); 1089 if (err < 0) 1090 return err; 1091 data = attr; 1092 } 1093 if (ops->validate) { 1094 err = ops->validate(tb, data); 1095 if (err < 0) 1096 return err; 1097 } 1098 } 1099 1100 if (dev) { 1101 int modified = 0; 1102 1103 if (nlh->nlmsg_flags & NLM_F_EXCL) 1104 return -EEXIST; 1105 if (nlh->nlmsg_flags & NLM_F_REPLACE) 1106 return -EOPNOTSUPP; 1107 1108 if (linkinfo[IFLA_INFO_DATA]) { 1109 if (!ops || ops != dev->rtnl_link_ops || 1110 !ops->changelink) 1111 return -EOPNOTSUPP; 1112 1113 err = ops->changelink(dev, tb, data); 1114 if (err < 0) 1115 return err; 1116 modified = 1; 1117 } 1118 1119 return do_setlink(dev, ifm, tb, ifname, modified); 1120 } 1121 1122 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) 1123 return -ENODEV; 1124 1125 if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change) 1126 return -EOPNOTSUPP; 1127 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) 1128 return -EOPNOTSUPP; 1129 1130 if (!ops) { 1131 #ifdef CONFIG_MODULES 1132 if (kind[0]) { 1133 __rtnl_unlock(); 1134 request_module("rtnl-link-%s", kind); 1135 rtnl_lock(); 1136 ops = rtnl_link_ops_get(kind); 1137 if (ops) 1138 goto replay; 1139 } 1140 #endif 1141 return -EOPNOTSUPP; 1142 } 1143 1144 if (!ifname[0]) 1145 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 1146 1147 dev = rtnl_create_link(net, ifname, ops, tb); 1148 1149 if (IS_ERR(dev)) 1150 err = PTR_ERR(dev); 1151 else if (ops->newlink) 1152 err = ops->newlink(dev, tb, data); 1153 else 1154 err = register_netdevice(dev); 1155 1156 if (err < 0 && !IS_ERR(dev)) 1157 free_netdev(dev); 1158 return err; 1159 } 1160 } 1161 1162 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 1163 { 1164 struct net *net = sock_net(skb->sk); 1165 struct ifinfomsg *ifm; 1166 char ifname[IFNAMSIZ]; 1167 struct nlattr *tb[IFLA_MAX+1]; 1168 struct net_device *dev = NULL; 1169 struct sk_buff *nskb; 1170 int err; 1171 1172 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1173 if (err < 0) 1174 return err; 1175 1176 if (tb[IFLA_IFNAME]) 1177 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 1178 1179 ifm = nlmsg_data(nlh); 1180 if (ifm->ifi_index > 0) 1181 dev = __dev_get_by_index(net, ifm->ifi_index); 1182 else if (tb[IFLA_IFNAME]) 1183 dev = __dev_get_by_name(net, ifname); 1184 else 1185 return -EINVAL; 1186 1187 if (dev == NULL) 1188 return -ENODEV; 1189 1190 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1191 if (nskb == NULL) 1192 return -ENOBUFS; 1193 1194 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, 1195 nlh->nlmsg_seq, 0, 0); 1196 if (err < 0) { 1197 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 1198 WARN_ON(err == -EMSGSIZE); 1199 kfree_skb(nskb); 1200 } else 1201 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); 1202 1203 return err; 1204 } 1205 1206 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 1207 { 1208 int idx; 1209 int s_idx = cb->family; 1210 1211 if (s_idx == 0) 1212 s_idx = 1; 1213 for (idx=1; idx<NPROTO; idx++) { 1214 int type = cb->nlh->nlmsg_type-RTM_BASE; 1215 if (idx < s_idx || idx == PF_PACKET) 1216 continue; 1217 if (rtnl_msg_handlers[idx] == NULL || 1218 rtnl_msg_handlers[idx][type].dumpit == NULL) 1219 continue; 1220 if (idx > s_idx) 1221 memset(&cb->args[0], 0, sizeof(cb->args)); 1222 if (rtnl_msg_handlers[idx][type].dumpit(skb, cb)) 1223 break; 1224 } 1225 cb->family = idx; 1226 1227 return skb->len; 1228 } 1229 1230 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) 1231 { 1232 struct net *net = dev_net(dev); 1233 struct sk_buff *skb; 1234 int err = -ENOBUFS; 1235 1236 skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1237 if (skb == NULL) 1238 goto errout; 1239 1240 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0); 1241 if (err < 0) { 1242 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 1243 WARN_ON(err == -EMSGSIZE); 1244 kfree_skb(skb); 1245 goto errout; 1246 } 1247 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); 1248 return; 1249 errout: 1250 if (err < 0) 1251 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 1252 } 1253 1254 /* Protected by RTNL sempahore. */ 1255 static struct rtattr **rta_buf; 1256 static int rtattr_max; 1257 1258 /* Process one rtnetlink message. */ 1259 1260 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1261 { 1262 struct net *net = sock_net(skb->sk); 1263 rtnl_doit_func doit; 1264 int sz_idx, kind; 1265 int min_len; 1266 int family; 1267 int type; 1268 int err; 1269 1270 type = nlh->nlmsg_type; 1271 if (type > RTM_MAX) 1272 return -EOPNOTSUPP; 1273 1274 type -= RTM_BASE; 1275 1276 /* All the messages must have at least 1 byte length */ 1277 if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) 1278 return 0; 1279 1280 family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; 1281 if (family >= NPROTO) 1282 return -EAFNOSUPPORT; 1283 1284 sz_idx = type>>2; 1285 kind = type&3; 1286 1287 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) 1288 return -EPERM; 1289 1290 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 1291 struct sock *rtnl; 1292 rtnl_dumpit_func dumpit; 1293 1294 dumpit = rtnl_get_dumpit(family, type); 1295 if (dumpit == NULL) 1296 return -EOPNOTSUPP; 1297 1298 __rtnl_unlock(); 1299 rtnl = net->rtnl; 1300 err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); 1301 rtnl_lock(); 1302 return err; 1303 } 1304 1305 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); 1306 1307 min_len = rtm_min[sz_idx]; 1308 if (nlh->nlmsg_len < min_len) 1309 return -EINVAL; 1310 1311 if (nlh->nlmsg_len > min_len) { 1312 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); 1313 struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len); 1314 1315 while (RTA_OK(attr, attrlen)) { 1316 unsigned flavor = attr->rta_type; 1317 if (flavor) { 1318 if (flavor > rta_max[sz_idx]) 1319 return -EINVAL; 1320 rta_buf[flavor-1] = attr; 1321 } 1322 attr = RTA_NEXT(attr, attrlen); 1323 } 1324 } 1325 1326 doit = rtnl_get_doit(family, type); 1327 if (doit == NULL) 1328 return -EOPNOTSUPP; 1329 1330 return doit(skb, nlh, (void *)&rta_buf[0]); 1331 } 1332 1333 static void rtnetlink_rcv(struct sk_buff *skb) 1334 { 1335 rtnl_lock(); 1336 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 1337 rtnl_unlock(); 1338 } 1339 1340 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 1341 { 1342 struct net_device *dev = ptr; 1343 1344 switch (event) { 1345 case NETDEV_UNREGISTER: 1346 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 1347 break; 1348 case NETDEV_REGISTER: 1349 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 1350 break; 1351 case NETDEV_UP: 1352 case NETDEV_DOWN: 1353 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1354 break; 1355 case NETDEV_CHANGE: 1356 case NETDEV_GOING_DOWN: 1357 break; 1358 default: 1359 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1360 break; 1361 } 1362 return NOTIFY_DONE; 1363 } 1364 1365 static struct notifier_block rtnetlink_dev_notifier = { 1366 .notifier_call = rtnetlink_event, 1367 }; 1368 1369 1370 static int rtnetlink_net_init(struct net *net) 1371 { 1372 struct sock *sk; 1373 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1374 rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); 1375 if (!sk) 1376 return -ENOMEM; 1377 net->rtnl = sk; 1378 return 0; 1379 } 1380 1381 static void rtnetlink_net_exit(struct net *net) 1382 { 1383 netlink_kernel_release(net->rtnl); 1384 net->rtnl = NULL; 1385 } 1386 1387 static struct pernet_operations rtnetlink_net_ops = { 1388 .init = rtnetlink_net_init, 1389 .exit = rtnetlink_net_exit, 1390 }; 1391 1392 void __init rtnetlink_init(void) 1393 { 1394 int i; 1395 1396 rtattr_max = 0; 1397 for (i = 0; i < ARRAY_SIZE(rta_max); i++) 1398 if (rta_max[i] > rtattr_max) 1399 rtattr_max = rta_max[i]; 1400 rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL); 1401 if (!rta_buf) 1402 panic("rtnetlink_init: cannot allocate rta_buf\n"); 1403 1404 if (register_pernet_subsys(&rtnetlink_net_ops)) 1405 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 1406 1407 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); 1408 register_netdevice_notifier(&rtnetlink_dev_notifier); 1409 1410 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo); 1411 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL); 1412 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL); 1413 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL); 1414 1415 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all); 1416 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); 1417 } 1418 1419 EXPORT_SYMBOL(__rta_fill); 1420 EXPORT_SYMBOL(rtnetlink_put_metrics); 1421 EXPORT_SYMBOL(rtnl_lock); 1422 EXPORT_SYMBOL(rtnl_trylock); 1423 EXPORT_SYMBOL(rtnl_unlock); 1424 EXPORT_SYMBOL(rtnl_is_locked); 1425 EXPORT_SYMBOL(rtnl_unicast); 1426 EXPORT_SYMBOL(rtnl_notify); 1427 EXPORT_SYMBOL(rtnl_set_sk_err); 1428 EXPORT_SYMBOL(rtnl_create_link); 1429 EXPORT_SYMBOL(ifla_policy); 1430