1 /* 2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding 3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us> 4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/errno.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/if_link.h> 19 #include <linux/if_ether.h> 20 #include <net/netlink.h> 21 #include <net/rtnetlink.h> 22 #include "bonding.h" 23 24 static size_t bond_get_slave_size(const struct net_device *bond_dev, 25 const struct net_device *slave_dev) 26 { 27 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */ 28 nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */ 29 nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */ 30 nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */ 31 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */ 32 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */ 33 0; 34 } 35 36 static int bond_fill_slave_info(struct sk_buff *skb, 37 const struct net_device *bond_dev, 38 const struct net_device *slave_dev) 39 { 40 struct slave *slave = bond_slave_get_rtnl(slave_dev); 41 42 if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave))) 43 goto nla_put_failure; 44 45 if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link)) 46 goto nla_put_failure; 47 48 if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, 49 slave->link_failure_count)) 50 goto nla_put_failure; 51 52 if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR, 53 slave_dev->addr_len, slave->perm_hwaddr)) 54 goto nla_put_failure; 55 56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id)) 57 goto nla_put_failure; 58 59 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) { 60 const struct aggregator *agg; 61 62 agg = SLAVE_AD_INFO(slave)->port.aggregator; 63 if (agg) 64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, 65 agg->aggregator_identifier)) 66 goto nla_put_failure; 67 } 68 69 return 0; 70 71 nla_put_failure: 72 return -EMSGSIZE; 73 } 74 75 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { 76 [IFLA_BOND_MODE] = { .type = NLA_U8 }, 77 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 }, 78 [IFLA_BOND_MIIMON] = { .type = NLA_U32 }, 79 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 }, 80 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 }, 81 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 }, 82 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 }, 83 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED }, 84 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 }, 85 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 }, 86 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 }, 87 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 }, 88 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 }, 89 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 }, 90 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 }, 91 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 }, 92 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 }, 93 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 }, 94 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 }, 95 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 }, 96 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 }, 97 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 }, 98 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED }, 99 }; 100 101 static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) 102 { 103 if (tb[IFLA_ADDRESS]) { 104 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 105 return -EINVAL; 106 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 107 return -EADDRNOTAVAIL; 108 } 109 return 0; 110 } 111 112 static int bond_changelink(struct net_device *bond_dev, 113 struct nlattr *tb[], struct nlattr *data[]) 114 { 115 struct bonding *bond = netdev_priv(bond_dev); 116 struct bond_opt_value newval; 117 int miimon = 0; 118 int err; 119 120 if (!data) 121 return 0; 122 123 if (data[IFLA_BOND_MODE]) { 124 int mode = nla_get_u8(data[IFLA_BOND_MODE]); 125 126 bond_opt_initval(&newval, mode); 127 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval); 128 if (err) 129 return err; 130 } 131 if (data[IFLA_BOND_ACTIVE_SLAVE]) { 132 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]); 133 struct net_device *slave_dev; 134 char *active_slave = ""; 135 136 if (ifindex != 0) { 137 slave_dev = __dev_get_by_index(dev_net(bond_dev), 138 ifindex); 139 if (!slave_dev) 140 return -ENODEV; 141 active_slave = slave_dev->name; 142 } 143 bond_opt_initstr(&newval, active_slave); 144 err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval); 145 if (err) 146 return err; 147 } 148 if (data[IFLA_BOND_MIIMON]) { 149 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]); 150 151 bond_opt_initval(&newval, miimon); 152 err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval); 153 if (err) 154 return err; 155 } 156 if (data[IFLA_BOND_UPDELAY]) { 157 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]); 158 159 bond_opt_initval(&newval, updelay); 160 err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval); 161 if (err) 162 return err; 163 } 164 if (data[IFLA_BOND_DOWNDELAY]) { 165 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]); 166 167 bond_opt_initval(&newval, downdelay); 168 err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval); 169 if (err) 170 return err; 171 } 172 if (data[IFLA_BOND_USE_CARRIER]) { 173 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]); 174 175 bond_opt_initval(&newval, use_carrier); 176 err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval); 177 if (err) 178 return err; 179 } 180 if (data[IFLA_BOND_ARP_INTERVAL]) { 181 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]); 182 183 if (arp_interval && miimon) { 184 pr_err("%s: ARP monitoring cannot be used with MII monitoring\n", 185 bond->dev->name); 186 return -EINVAL; 187 } 188 189 bond_opt_initval(&newval, arp_interval); 190 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval); 191 if (err) 192 return err; 193 } 194 if (data[IFLA_BOND_ARP_IP_TARGET]) { 195 struct nlattr *attr; 196 int i = 0, rem; 197 198 bond_option_arp_ip_targets_clear(bond); 199 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) { 200 __be32 target = nla_get_be32(attr); 201 202 bond_opt_initval(&newval, (__force u64)target); 203 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS, 204 &newval); 205 if (err) 206 break; 207 i++; 208 } 209 if (i == 0 && bond->params.arp_interval) 210 pr_warn("%s: Removing last arp target with arp_interval on\n", 211 bond->dev->name); 212 if (err) 213 return err; 214 } 215 if (data[IFLA_BOND_ARP_VALIDATE]) { 216 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]); 217 218 if (arp_validate && miimon) { 219 pr_err("%s: ARP validating cannot be used with MII monitoring\n", 220 bond->dev->name); 221 return -EINVAL; 222 } 223 224 bond_opt_initval(&newval, arp_validate); 225 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval); 226 if (err) 227 return err; 228 } 229 if (data[IFLA_BOND_ARP_ALL_TARGETS]) { 230 int arp_all_targets = 231 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]); 232 233 bond_opt_initval(&newval, arp_all_targets); 234 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval); 235 if (err) 236 return err; 237 } 238 if (data[IFLA_BOND_PRIMARY]) { 239 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]); 240 struct net_device *dev; 241 char *primary = ""; 242 243 dev = __dev_get_by_index(dev_net(bond_dev), ifindex); 244 if (dev) 245 primary = dev->name; 246 247 bond_opt_initstr(&newval, primary); 248 err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval); 249 if (err) 250 return err; 251 } 252 if (data[IFLA_BOND_PRIMARY_RESELECT]) { 253 int primary_reselect = 254 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]); 255 256 bond_opt_initval(&newval, primary_reselect); 257 err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval); 258 if (err) 259 return err; 260 } 261 if (data[IFLA_BOND_FAIL_OVER_MAC]) { 262 int fail_over_mac = 263 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]); 264 265 bond_opt_initval(&newval, fail_over_mac); 266 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval); 267 if (err) 268 return err; 269 } 270 if (data[IFLA_BOND_XMIT_HASH_POLICY]) { 271 int xmit_hash_policy = 272 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]); 273 274 bond_opt_initval(&newval, xmit_hash_policy); 275 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval); 276 if (err) 277 return err; 278 } 279 if (data[IFLA_BOND_RESEND_IGMP]) { 280 int resend_igmp = 281 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]); 282 283 bond_opt_initval(&newval, resend_igmp); 284 err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval); 285 if (err) 286 return err; 287 } 288 if (data[IFLA_BOND_NUM_PEER_NOTIF]) { 289 int num_peer_notif = 290 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]); 291 292 bond_opt_initval(&newval, num_peer_notif); 293 err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval); 294 if (err) 295 return err; 296 } 297 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) { 298 int all_slaves_active = 299 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]); 300 301 bond_opt_initval(&newval, all_slaves_active); 302 err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval); 303 if (err) 304 return err; 305 } 306 if (data[IFLA_BOND_MIN_LINKS]) { 307 int min_links = 308 nla_get_u32(data[IFLA_BOND_MIN_LINKS]); 309 310 bond_opt_initval(&newval, min_links); 311 err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval); 312 if (err) 313 return err; 314 } 315 if (data[IFLA_BOND_LP_INTERVAL]) { 316 int lp_interval = 317 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]); 318 319 bond_opt_initval(&newval, lp_interval); 320 err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval); 321 if (err) 322 return err; 323 } 324 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) { 325 int packets_per_slave = 326 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]); 327 328 bond_opt_initval(&newval, packets_per_slave); 329 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval); 330 if (err) 331 return err; 332 } 333 if (data[IFLA_BOND_AD_LACP_RATE]) { 334 int lacp_rate = 335 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]); 336 337 bond_opt_initval(&newval, lacp_rate); 338 err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval); 339 if (err) 340 return err; 341 } 342 if (data[IFLA_BOND_AD_SELECT]) { 343 int ad_select = 344 nla_get_u8(data[IFLA_BOND_AD_SELECT]); 345 346 bond_opt_initval(&newval, ad_select); 347 err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval); 348 if (err) 349 return err; 350 } 351 return 0; 352 } 353 354 static int bond_newlink(struct net *src_net, struct net_device *bond_dev, 355 struct nlattr *tb[], struct nlattr *data[]) 356 { 357 int err; 358 359 err = bond_changelink(bond_dev, tb, data); 360 if (err < 0) 361 return err; 362 363 return register_netdevice(bond_dev); 364 } 365 366 static size_t bond_get_size(const struct net_device *bond_dev) 367 { 368 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */ 369 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */ 370 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */ 371 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */ 372 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */ 373 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */ 374 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */ 375 /* IFLA_BOND_ARP_IP_TARGET */ 376 nla_total_size(sizeof(struct nlattr)) + 377 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS + 378 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */ 379 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */ 380 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */ 381 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */ 382 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */ 383 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */ 384 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */ 385 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */ 386 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */ 387 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */ 388 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */ 389 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */ 390 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */ 391 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */ 392 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */ 393 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */ 394 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */ 395 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */ 396 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/ 397 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/ 398 0; 399 } 400 401 static int bond_fill_info(struct sk_buff *skb, 402 const struct net_device *bond_dev) 403 { 404 struct bonding *bond = netdev_priv(bond_dev); 405 struct net_device *slave_dev = bond_option_active_slave_get(bond); 406 struct nlattr *targets; 407 unsigned int packets_per_slave; 408 int i, targets_added; 409 410 if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond))) 411 goto nla_put_failure; 412 413 if (slave_dev && 414 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)) 415 goto nla_put_failure; 416 417 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon)) 418 goto nla_put_failure; 419 420 if (nla_put_u32(skb, IFLA_BOND_UPDELAY, 421 bond->params.updelay * bond->params.miimon)) 422 goto nla_put_failure; 423 424 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY, 425 bond->params.downdelay * bond->params.miimon)) 426 goto nla_put_failure; 427 428 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier)) 429 goto nla_put_failure; 430 431 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval)) 432 goto nla_put_failure; 433 434 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET); 435 if (!targets) 436 goto nla_put_failure; 437 438 targets_added = 0; 439 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) { 440 if (bond->params.arp_targets[i]) { 441 nla_put_be32(skb, i, bond->params.arp_targets[i]); 442 targets_added = 1; 443 } 444 } 445 446 if (targets_added) 447 nla_nest_end(skb, targets); 448 else 449 nla_nest_cancel(skb, targets); 450 451 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate)) 452 goto nla_put_failure; 453 454 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS, 455 bond->params.arp_all_targets)) 456 goto nla_put_failure; 457 458 if (bond->primary_slave && 459 nla_put_u32(skb, IFLA_BOND_PRIMARY, 460 bond->primary_slave->dev->ifindex)) 461 goto nla_put_failure; 462 463 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT, 464 bond->params.primary_reselect)) 465 goto nla_put_failure; 466 467 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC, 468 bond->params.fail_over_mac)) 469 goto nla_put_failure; 470 471 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY, 472 bond->params.xmit_policy)) 473 goto nla_put_failure; 474 475 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP, 476 bond->params.resend_igmp)) 477 goto nla_put_failure; 478 479 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF, 480 bond->params.num_peer_notif)) 481 goto nla_put_failure; 482 483 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE, 484 bond->params.all_slaves_active)) 485 goto nla_put_failure; 486 487 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS, 488 bond->params.min_links)) 489 goto nla_put_failure; 490 491 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL, 492 bond->params.lp_interval)) 493 goto nla_put_failure; 494 495 packets_per_slave = bond->params.packets_per_slave; 496 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE, 497 packets_per_slave)) 498 goto nla_put_failure; 499 500 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE, 501 bond->params.lacp_fast)) 502 goto nla_put_failure; 503 504 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT, 505 bond->params.ad_select)) 506 goto nla_put_failure; 507 508 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 509 struct ad_info info; 510 511 if (!bond_3ad_get_active_agg_info(bond, &info)) { 512 struct nlattr *nest; 513 514 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO); 515 if (!nest) 516 goto nla_put_failure; 517 518 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR, 519 info.aggregator_id)) 520 goto nla_put_failure; 521 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS, 522 info.ports)) 523 goto nla_put_failure; 524 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY, 525 info.actor_key)) 526 goto nla_put_failure; 527 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY, 528 info.partner_key)) 529 goto nla_put_failure; 530 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC, 531 sizeof(info.partner_system), 532 &info.partner_system)) 533 goto nla_put_failure; 534 535 nla_nest_end(skb, nest); 536 } 537 } 538 539 return 0; 540 541 nla_put_failure: 542 return -EMSGSIZE; 543 } 544 545 struct rtnl_link_ops bond_link_ops __read_mostly = { 546 .kind = "bond", 547 .priv_size = sizeof(struct bonding), 548 .setup = bond_setup, 549 .maxtype = IFLA_BOND_MAX, 550 .policy = bond_policy, 551 .validate = bond_validate, 552 .newlink = bond_newlink, 553 .changelink = bond_changelink, 554 .get_size = bond_get_size, 555 .fill_info = bond_fill_info, 556 .get_num_tx_queues = bond_get_num_tx_queues, 557 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number 558 as for TX queues */ 559 .get_slave_size = bond_get_slave_size, 560 .fill_slave_info = bond_fill_slave_info, 561 }; 562 563 int __init bond_netlink_init(void) 564 { 565 return rtnl_link_register(&bond_link_ops); 566 } 567 568 void bond_netlink_fini(void) 569 { 570 rtnl_link_unregister(&bond_link_ops); 571 } 572 573 MODULE_ALIAS_RTNL_LINK("bond"); 574