1 /* 2 * originally based on the dummy device. 3 * 4 * Copyright 1999, Thomas Davis, tadavis@lbl.gov. 5 * Licensed under the GPL. Based on dummy.c, and eql.c devices. 6 * 7 * bonding.c: an Ethernet Bonding driver 8 * 9 * This is useful to talk to a Cisco EtherChannel compatible equipment: 10 * Cisco 5500 11 * Sun Trunking (Solaris) 12 * Alteon AceDirector Trunks 13 * Linux Bonding 14 * and probably many L2 switches ... 15 * 16 * How it works: 17 * ifconfig bond0 ipaddress netmask up 18 * will setup a network device, with an ip address. No mac address 19 * will be assigned at this time. The hw mac address will come from 20 * the first slave bonded to the channel. All slaves will then use 21 * this hw mac address. 22 * 23 * ifconfig bond0 down 24 * will release all slaves, marking them as down. 25 * 26 * ifenslave bond0 eth0 27 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either 28 * a: be used as initial mac address 29 * b: if a hw mac address already is there, eth0's hw mac address 30 * will then be set from bond0. 31 * 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/types.h> 37 #include <linux/fcntl.h> 38 #include <linux/interrupt.h> 39 #include <linux/ptrace.h> 40 #include <linux/ioport.h> 41 #include <linux/in.h> 42 #include <net/ip.h> 43 #include <linux/ip.h> 44 #include <linux/icmp.h> 45 #include <linux/icmpv6.h> 46 #include <linux/tcp.h> 47 #include <linux/udp.h> 48 #include <linux/slab.h> 49 #include <linux/string.h> 50 #include <linux/init.h> 51 #include <linux/timer.h> 52 #include <linux/socket.h> 53 #include <linux/ctype.h> 54 #include <linux/inet.h> 55 #include <linux/bitops.h> 56 #include <linux/io.h> 57 #include <asm/dma.h> 58 #include <linux/uaccess.h> 59 #include <linux/errno.h> 60 #include <linux/netdevice.h> 61 #include <linux/inetdevice.h> 62 #include <linux/igmp.h> 63 #include <linux/etherdevice.h> 64 #include <linux/skbuff.h> 65 #include <net/sock.h> 66 #include <linux/rtnetlink.h> 67 #include <linux/smp.h> 68 #include <linux/if_ether.h> 69 #include <net/arp.h> 70 #include <linux/mii.h> 71 #include <linux/ethtool.h> 72 #include <linux/if_vlan.h> 73 #include <linux/if_bonding.h> 74 #include <linux/jiffies.h> 75 #include <linux/preempt.h> 76 #include <net/route.h> 77 #include <net/net_namespace.h> 78 #include <net/netns/generic.h> 79 #include <net/pkt_sched.h> 80 #include <linux/rculist.h> 81 #include <net/flow_dissector.h> 82 #include <net/xfrm.h> 83 #include <net/bonding.h> 84 #include <net/bond_3ad.h> 85 #include <net/bond_alb.h> 86 #if IS_ENABLED(CONFIG_TLS_DEVICE) 87 #include <net/tls.h> 88 #endif 89 90 #include "bonding_priv.h" 91 92 /*---------------------------- Module parameters ----------------------------*/ 93 94 /* monitor all links that often (in milliseconds). <=0 disables monitoring */ 95 96 static int max_bonds = BOND_DEFAULT_MAX_BONDS; 97 static int tx_queues = BOND_DEFAULT_TX_QUEUES; 98 static int num_peer_notif = 1; 99 static int miimon; 100 static int updelay; 101 static int downdelay; 102 static int use_carrier = 1; 103 static char *mode; 104 static char *primary; 105 static char *primary_reselect; 106 static char *lacp_rate; 107 static int min_links; 108 static char *ad_select; 109 static char *xmit_hash_policy; 110 static int arp_interval; 111 static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; 112 static char *arp_validate; 113 static char *arp_all_targets; 114 static char *fail_over_mac; 115 static int all_slaves_active; 116 static struct bond_params bonding_defaults; 117 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; 118 static int packets_per_slave = 1; 119 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 120 121 module_param(max_bonds, int, 0); 122 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 123 module_param(tx_queues, int, 0); 124 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); 125 module_param_named(num_grat_arp, num_peer_notif, int, 0644); 126 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " 127 "failover event (alias of num_unsol_na)"); 128 module_param_named(num_unsol_na, num_peer_notif, int, 0644); 129 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " 130 "failover event (alias of num_grat_arp)"); 131 module_param(miimon, int, 0); 132 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 133 module_param(updelay, int, 0); 134 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); 135 module_param(downdelay, int, 0); 136 MODULE_PARM_DESC(downdelay, "Delay before considering link down, " 137 "in milliseconds"); 138 module_param(use_carrier, int, 0); 139 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " 140 "0 for off, 1 for on (default)"); 141 module_param(mode, charp, 0); 142 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " 143 "1 for active-backup, 2 for balance-xor, " 144 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " 145 "6 for balance-alb"); 146 module_param(primary, charp, 0); 147 MODULE_PARM_DESC(primary, "Primary network device to use"); 148 module_param(primary_reselect, charp, 0); 149 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " 150 "once it comes up; " 151 "0 for always (default), " 152 "1 for only if speed of primary is " 153 "better, " 154 "2 for only on active slave " 155 "failure"); 156 module_param(lacp_rate, charp, 0); 157 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " 158 "0 for slow, 1 for fast"); 159 module_param(ad_select, charp, 0); 160 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " 161 "0 for stable (default), 1 for bandwidth, " 162 "2 for count"); 163 module_param(min_links, int, 0); 164 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); 165 166 module_param(xmit_hash_policy, charp, 0); 167 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; " 168 "0 for layer 2 (default), 1 for layer 3+4, " 169 "2 for layer 2+3, 3 for encap layer 2+3, " 170 "4 for encap layer 3+4, 5 for vlan+srcmac"); 171 module_param(arp_interval, int, 0); 172 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 173 module_param_array(arp_ip_target, charp, NULL, 0); 174 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); 175 module_param(arp_validate, charp, 0); 176 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " 177 "0 for none (default), 1 for active, " 178 "2 for backup, 3 for all"); 179 module_param(arp_all_targets, charp, 0); 180 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all"); 181 module_param(fail_over_mac, charp, 0); 182 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " 183 "the same MAC; 0 for none (default), " 184 "1 for active, 2 for follow"); 185 module_param(all_slaves_active, int, 0); 186 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " 187 "by setting active flag for all slaves; " 188 "0 for never (default), 1 for always."); 189 module_param(resend_igmp, int, 0); 190 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " 191 "link failure"); 192 module_param(packets_per_slave, int, 0); 193 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr " 194 "mode; 0 for a random slave, 1 packet per " 195 "slave (default), >1 packets per slave."); 196 module_param(lp_interval, uint, 0); 197 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where " 198 "the bonding driver sends learning packets to " 199 "each slaves peer switch. The default is 1."); 200 201 /*----------------------------- Global variables ----------------------------*/ 202 203 #ifdef CONFIG_NET_POLL_CONTROLLER 204 atomic_t netpoll_block_tx = ATOMIC_INIT(0); 205 #endif 206 207 unsigned int bond_net_id __read_mostly; 208 209 static const struct flow_dissector_key flow_keys_bonding_keys[] = { 210 { 211 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 212 .offset = offsetof(struct flow_keys, control), 213 }, 214 { 215 .key_id = FLOW_DISSECTOR_KEY_BASIC, 216 .offset = offsetof(struct flow_keys, basic), 217 }, 218 { 219 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 220 .offset = offsetof(struct flow_keys, addrs.v4addrs), 221 }, 222 { 223 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 224 .offset = offsetof(struct flow_keys, addrs.v6addrs), 225 }, 226 { 227 .key_id = FLOW_DISSECTOR_KEY_TIPC, 228 .offset = offsetof(struct flow_keys, addrs.tipckey), 229 }, 230 { 231 .key_id = FLOW_DISSECTOR_KEY_PORTS, 232 .offset = offsetof(struct flow_keys, ports), 233 }, 234 { 235 .key_id = FLOW_DISSECTOR_KEY_ICMP, 236 .offset = offsetof(struct flow_keys, icmp), 237 }, 238 { 239 .key_id = FLOW_DISSECTOR_KEY_VLAN, 240 .offset = offsetof(struct flow_keys, vlan), 241 }, 242 { 243 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, 244 .offset = offsetof(struct flow_keys, tags), 245 }, 246 { 247 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 248 .offset = offsetof(struct flow_keys, keyid), 249 }, 250 }; 251 252 static struct flow_dissector flow_keys_bonding __read_mostly; 253 254 /*-------------------------- Forward declarations ---------------------------*/ 255 256 static int bond_init(struct net_device *bond_dev); 257 static void bond_uninit(struct net_device *bond_dev); 258 static void bond_get_stats(struct net_device *bond_dev, 259 struct rtnl_link_stats64 *stats); 260 static void bond_slave_arr_handler(struct work_struct *work); 261 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 262 int mod); 263 static void bond_netdev_notify_work(struct work_struct *work); 264 265 /*---------------------------- General routines -----------------------------*/ 266 267 const char *bond_mode_name(int mode) 268 { 269 static const char *names[] = { 270 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", 271 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)", 272 [BOND_MODE_XOR] = "load balancing (xor)", 273 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)", 274 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation", 275 [BOND_MODE_TLB] = "transmit load balancing", 276 [BOND_MODE_ALB] = "adaptive load balancing", 277 }; 278 279 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB) 280 return "unknown"; 281 282 return names[mode]; 283 } 284 285 /** 286 * bond_dev_queue_xmit - Prepare skb for xmit. 287 * 288 * @bond: bond device that got this skb for tx. 289 * @skb: hw accel VLAN tagged skb to transmit 290 * @slave_dev: slave that is supposed to xmit this skbuff 291 */ 292 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, 293 struct net_device *slave_dev) 294 { 295 skb->dev = slave_dev; 296 297 BUILD_BUG_ON(sizeof(skb->queue_mapping) != 298 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); 299 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); 300 301 if (unlikely(netpoll_tx_running(bond->dev))) 302 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 303 304 return dev_queue_xmit(skb); 305 } 306 307 bool bond_sk_check(struct bonding *bond) 308 { 309 switch (BOND_MODE(bond)) { 310 case BOND_MODE_8023AD: 311 case BOND_MODE_XOR: 312 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) 313 return true; 314 fallthrough; 315 default: 316 return false; 317 } 318 } 319 320 /*---------------------------------- VLAN -----------------------------------*/ 321 322 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, 323 * We don't protect the slave list iteration with a lock because: 324 * a. This operation is performed in IOCTL context, 325 * b. The operation is protected by the RTNL semaphore in the 8021q code, 326 * c. Holding a lock with BH disabled while directly calling a base driver 327 * entry point is generally a BAD idea. 328 * 329 * The design of synchronization/protection for this operation in the 8021q 330 * module is good for one or more VLAN devices over a single physical device 331 * and cannot be extended for a teaming solution like bonding, so there is a 332 * potential race condition here where a net device from the vlan group might 333 * be referenced (either by a base driver or the 8021q code) while it is being 334 * removed from the system. However, it turns out we're not making matters 335 * worse, and if it works for regular VLAN usage it will work here too. 336 */ 337 338 /** 339 * bond_vlan_rx_add_vid - Propagates adding an id to slaves 340 * @bond_dev: bonding net device that got called 341 * @proto: network protocol ID 342 * @vid: vlan id being added 343 */ 344 static int bond_vlan_rx_add_vid(struct net_device *bond_dev, 345 __be16 proto, u16 vid) 346 { 347 struct bonding *bond = netdev_priv(bond_dev); 348 struct slave *slave, *rollback_slave; 349 struct list_head *iter; 350 int res; 351 352 bond_for_each_slave(bond, slave, iter) { 353 res = vlan_vid_add(slave->dev, proto, vid); 354 if (res) 355 goto unwind; 356 } 357 358 return 0; 359 360 unwind: 361 /* unwind to the slave that failed */ 362 bond_for_each_slave(bond, rollback_slave, iter) { 363 if (rollback_slave == slave) 364 break; 365 366 vlan_vid_del(rollback_slave->dev, proto, vid); 367 } 368 369 return res; 370 } 371 372 /** 373 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves 374 * @bond_dev: bonding net device that got called 375 * @proto: network protocol ID 376 * @vid: vlan id being removed 377 */ 378 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, 379 __be16 proto, u16 vid) 380 { 381 struct bonding *bond = netdev_priv(bond_dev); 382 struct list_head *iter; 383 struct slave *slave; 384 385 bond_for_each_slave(bond, slave, iter) 386 vlan_vid_del(slave->dev, proto, vid); 387 388 if (bond_is_lb(bond)) 389 bond_alb_clear_vlan(bond, vid); 390 391 return 0; 392 } 393 394 /*---------------------------------- XFRM -----------------------------------*/ 395 396 #ifdef CONFIG_XFRM_OFFLOAD 397 /** 398 * bond_ipsec_add_sa - program device with a security association 399 * @xs: pointer to transformer state struct 400 **/ 401 static int bond_ipsec_add_sa(struct xfrm_state *xs) 402 { 403 struct net_device *bond_dev = xs->xso.dev; 404 struct bond_ipsec *ipsec; 405 struct bonding *bond; 406 struct slave *slave; 407 int err; 408 409 if (!bond_dev) 410 return -EINVAL; 411 412 rcu_read_lock(); 413 bond = netdev_priv(bond_dev); 414 slave = rcu_dereference(bond->curr_active_slave); 415 if (!slave) { 416 rcu_read_unlock(); 417 return -ENODEV; 418 } 419 420 if (!slave->dev->xfrmdev_ops || 421 !slave->dev->xfrmdev_ops->xdo_dev_state_add || 422 netif_is_bond_master(slave->dev)) { 423 slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); 424 rcu_read_unlock(); 425 return -EINVAL; 426 } 427 428 ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); 429 if (!ipsec) { 430 rcu_read_unlock(); 431 return -ENOMEM; 432 } 433 xs->xso.real_dev = slave->dev; 434 435 err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); 436 if (!err) { 437 ipsec->xs = xs; 438 INIT_LIST_HEAD(&ipsec->list); 439 spin_lock_bh(&bond->ipsec_lock); 440 list_add(&ipsec->list, &bond->ipsec_list); 441 spin_unlock_bh(&bond->ipsec_lock); 442 } else { 443 kfree(ipsec); 444 } 445 rcu_read_unlock(); 446 return err; 447 } 448 449 static void bond_ipsec_add_sa_all(struct bonding *bond) 450 { 451 struct net_device *bond_dev = bond->dev; 452 struct bond_ipsec *ipsec; 453 struct slave *slave; 454 455 rcu_read_lock(); 456 slave = rcu_dereference(bond->curr_active_slave); 457 if (!slave) 458 goto out; 459 460 if (!slave->dev->xfrmdev_ops || 461 !slave->dev->xfrmdev_ops->xdo_dev_state_add || 462 netif_is_bond_master(slave->dev)) { 463 spin_lock_bh(&bond->ipsec_lock); 464 if (!list_empty(&bond->ipsec_list)) 465 slave_warn(bond_dev, slave->dev, 466 "%s: no slave xdo_dev_state_add\n", 467 __func__); 468 spin_unlock_bh(&bond->ipsec_lock); 469 goto out; 470 } 471 472 spin_lock_bh(&bond->ipsec_lock); 473 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 474 ipsec->xs->xso.real_dev = slave->dev; 475 if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) { 476 slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); 477 ipsec->xs->xso.real_dev = NULL; 478 } 479 } 480 spin_unlock_bh(&bond->ipsec_lock); 481 out: 482 rcu_read_unlock(); 483 } 484 485 /** 486 * bond_ipsec_del_sa - clear out this specific SA 487 * @xs: pointer to transformer state struct 488 **/ 489 static void bond_ipsec_del_sa(struct xfrm_state *xs) 490 { 491 struct net_device *bond_dev = xs->xso.dev; 492 struct bond_ipsec *ipsec; 493 struct bonding *bond; 494 struct slave *slave; 495 496 if (!bond_dev) 497 return; 498 499 rcu_read_lock(); 500 bond = netdev_priv(bond_dev); 501 slave = rcu_dereference(bond->curr_active_slave); 502 503 if (!slave) 504 goto out; 505 506 if (!xs->xso.real_dev) 507 goto out; 508 509 WARN_ON(xs->xso.real_dev != slave->dev); 510 511 if (!slave->dev->xfrmdev_ops || 512 !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 513 netif_is_bond_master(slave->dev)) { 514 slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); 515 goto out; 516 } 517 518 slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); 519 out: 520 spin_lock_bh(&bond->ipsec_lock); 521 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 522 if (ipsec->xs == xs) { 523 list_del(&ipsec->list); 524 kfree(ipsec); 525 break; 526 } 527 } 528 spin_unlock_bh(&bond->ipsec_lock); 529 rcu_read_unlock(); 530 } 531 532 static void bond_ipsec_del_sa_all(struct bonding *bond) 533 { 534 struct net_device *bond_dev = bond->dev; 535 struct bond_ipsec *ipsec; 536 struct slave *slave; 537 538 rcu_read_lock(); 539 slave = rcu_dereference(bond->curr_active_slave); 540 if (!slave) { 541 rcu_read_unlock(); 542 return; 543 } 544 545 spin_lock_bh(&bond->ipsec_lock); 546 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 547 if (!ipsec->xs->xso.real_dev) 548 continue; 549 550 if (!slave->dev->xfrmdev_ops || 551 !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 552 netif_is_bond_master(slave->dev)) { 553 slave_warn(bond_dev, slave->dev, 554 "%s: no slave xdo_dev_state_delete\n", 555 __func__); 556 } else { 557 slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); 558 } 559 ipsec->xs->xso.real_dev = NULL; 560 } 561 spin_unlock_bh(&bond->ipsec_lock); 562 rcu_read_unlock(); 563 } 564 565 /** 566 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload 567 * @skb: current data packet 568 * @xs: pointer to transformer state struct 569 **/ 570 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 571 { 572 struct net_device *bond_dev = xs->xso.dev; 573 struct net_device *real_dev; 574 struct slave *curr_active; 575 struct bonding *bond; 576 int err; 577 578 bond = netdev_priv(bond_dev); 579 rcu_read_lock(); 580 curr_active = rcu_dereference(bond->curr_active_slave); 581 real_dev = curr_active->dev; 582 583 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 584 err = false; 585 goto out; 586 } 587 588 if (!xs->xso.real_dev) { 589 err = false; 590 goto out; 591 } 592 593 if (!real_dev->xfrmdev_ops || 594 !real_dev->xfrmdev_ops->xdo_dev_offload_ok || 595 netif_is_bond_master(real_dev)) { 596 err = false; 597 goto out; 598 } 599 600 err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); 601 out: 602 rcu_read_unlock(); 603 return err; 604 } 605 606 static const struct xfrmdev_ops bond_xfrmdev_ops = { 607 .xdo_dev_state_add = bond_ipsec_add_sa, 608 .xdo_dev_state_delete = bond_ipsec_del_sa, 609 .xdo_dev_offload_ok = bond_ipsec_offload_ok, 610 }; 611 #endif /* CONFIG_XFRM_OFFLOAD */ 612 613 /*------------------------------- Link status -------------------------------*/ 614 615 /* Set the carrier state for the master according to the state of its 616 * slaves. If any slaves are up, the master is up. In 802.3ad mode, 617 * do special 802.3ad magic. 618 * 619 * Returns zero if carrier state does not change, nonzero if it does. 620 */ 621 int bond_set_carrier(struct bonding *bond) 622 { 623 struct list_head *iter; 624 struct slave *slave; 625 626 if (!bond_has_slaves(bond)) 627 goto down; 628 629 if (BOND_MODE(bond) == BOND_MODE_8023AD) 630 return bond_3ad_set_carrier(bond); 631 632 bond_for_each_slave(bond, slave, iter) { 633 if (slave->link == BOND_LINK_UP) { 634 if (!netif_carrier_ok(bond->dev)) { 635 netif_carrier_on(bond->dev); 636 return 1; 637 } 638 return 0; 639 } 640 } 641 642 down: 643 if (netif_carrier_ok(bond->dev)) { 644 netif_carrier_off(bond->dev); 645 return 1; 646 } 647 return 0; 648 } 649 650 /* Get link speed and duplex from the slave's base driver 651 * using ethtool. If for some reason the call fails or the 652 * values are invalid, set speed and duplex to -1, 653 * and return. Return 1 if speed or duplex settings are 654 * UNKNOWN; 0 otherwise. 655 */ 656 static int bond_update_speed_duplex(struct slave *slave) 657 { 658 struct net_device *slave_dev = slave->dev; 659 struct ethtool_link_ksettings ecmd; 660 int res; 661 662 slave->speed = SPEED_UNKNOWN; 663 slave->duplex = DUPLEX_UNKNOWN; 664 665 res = __ethtool_get_link_ksettings(slave_dev, &ecmd); 666 if (res < 0) 667 return 1; 668 if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) 669 return 1; 670 switch (ecmd.base.duplex) { 671 case DUPLEX_FULL: 672 case DUPLEX_HALF: 673 break; 674 default: 675 return 1; 676 } 677 678 slave->speed = ecmd.base.speed; 679 slave->duplex = ecmd.base.duplex; 680 681 return 0; 682 } 683 684 const char *bond_slave_link_status(s8 link) 685 { 686 switch (link) { 687 case BOND_LINK_UP: 688 return "up"; 689 case BOND_LINK_FAIL: 690 return "going down"; 691 case BOND_LINK_DOWN: 692 return "down"; 693 case BOND_LINK_BACK: 694 return "going back"; 695 default: 696 return "unknown"; 697 } 698 } 699 700 /* if <dev> supports MII link status reporting, check its link status. 701 * 702 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), 703 * depending upon the setting of the use_carrier parameter. 704 * 705 * Return either BMSR_LSTATUS, meaning that the link is up (or we 706 * can't tell and just pretend it is), or 0, meaning that the link is 707 * down. 708 * 709 * If reporting is non-zero, instead of faking link up, return -1 if 710 * both ETHTOOL and MII ioctls fail (meaning the device does not 711 * support them). If use_carrier is set, return whatever it says. 712 * It'd be nice if there was a good way to tell if a driver supports 713 * netif_carrier, but there really isn't. 714 */ 715 static int bond_check_dev_link(struct bonding *bond, 716 struct net_device *slave_dev, int reporting) 717 { 718 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 719 int (*ioctl)(struct net_device *, struct ifreq *, int); 720 struct ifreq ifr; 721 struct mii_ioctl_data *mii; 722 723 if (!reporting && !netif_running(slave_dev)) 724 return 0; 725 726 if (bond->params.use_carrier) 727 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; 728 729 /* Try to get link status using Ethtool first. */ 730 if (slave_dev->ethtool_ops->get_link) 731 return slave_dev->ethtool_ops->get_link(slave_dev) ? 732 BMSR_LSTATUS : 0; 733 734 /* Ethtool can't be used, fallback to MII ioctls. */ 735 ioctl = slave_ops->ndo_do_ioctl; 736 if (ioctl) { 737 /* TODO: set pointer to correct ioctl on a per team member 738 * bases to make this more efficient. that is, once 739 * we determine the correct ioctl, we will always 740 * call it and not the others for that team 741 * member. 742 */ 743 744 /* We cannot assume that SIOCGMIIPHY will also read a 745 * register; not all network drivers (e.g., e100) 746 * support that. 747 */ 748 749 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ 750 strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ); 751 mii = if_mii(&ifr); 752 if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { 753 mii->reg_num = MII_BMSR; 754 if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0) 755 return mii->val_out & BMSR_LSTATUS; 756 } 757 } 758 759 /* If reporting, report that either there's no dev->do_ioctl, 760 * or both SIOCGMIIREG and get_link failed (meaning that we 761 * cannot report link status). If not reporting, pretend 762 * we're ok. 763 */ 764 return reporting ? -1 : BMSR_LSTATUS; 765 } 766 767 /*----------------------------- Multicast list ------------------------------*/ 768 769 /* Push the promiscuity flag down to appropriate slaves */ 770 static int bond_set_promiscuity(struct bonding *bond, int inc) 771 { 772 struct list_head *iter; 773 int err = 0; 774 775 if (bond_uses_primary(bond)) { 776 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); 777 778 if (curr_active) 779 err = dev_set_promiscuity(curr_active->dev, inc); 780 } else { 781 struct slave *slave; 782 783 bond_for_each_slave(bond, slave, iter) { 784 err = dev_set_promiscuity(slave->dev, inc); 785 if (err) 786 return err; 787 } 788 } 789 return err; 790 } 791 792 /* Push the allmulti flag down to all slaves */ 793 static int bond_set_allmulti(struct bonding *bond, int inc) 794 { 795 struct list_head *iter; 796 int err = 0; 797 798 if (bond_uses_primary(bond)) { 799 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); 800 801 if (curr_active) 802 err = dev_set_allmulti(curr_active->dev, inc); 803 } else { 804 struct slave *slave; 805 806 bond_for_each_slave(bond, slave, iter) { 807 err = dev_set_allmulti(slave->dev, inc); 808 if (err) 809 return err; 810 } 811 } 812 return err; 813 } 814 815 /* Retrieve the list of registered multicast addresses for the bonding 816 * device and retransmit an IGMP JOIN request to the current active 817 * slave. 818 */ 819 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) 820 { 821 struct bonding *bond = container_of(work, struct bonding, 822 mcast_work.work); 823 824 if (!rtnl_trylock()) { 825 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 826 return; 827 } 828 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev); 829 830 if (bond->igmp_retrans > 1) { 831 bond->igmp_retrans--; 832 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 833 } 834 rtnl_unlock(); 835 } 836 837 /* Flush bond's hardware addresses from slave */ 838 static void bond_hw_addr_flush(struct net_device *bond_dev, 839 struct net_device *slave_dev) 840 { 841 struct bonding *bond = netdev_priv(bond_dev); 842 843 dev_uc_unsync(slave_dev, bond_dev); 844 dev_mc_unsync(slave_dev, bond_dev); 845 846 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 847 /* del lacpdu mc addr from mc list */ 848 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 849 850 dev_mc_del(slave_dev, lacpdu_multicast); 851 } 852 } 853 854 /*--------------------------- Active slave change ---------------------------*/ 855 856 /* Update the hardware address list and promisc/allmulti for the new and 857 * old active slaves (if any). Modes that are not using primary keep all 858 * slaves up date at all times; only the modes that use primary need to call 859 * this function to swap these settings during a failover. 860 */ 861 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, 862 struct slave *old_active) 863 { 864 if (old_active) { 865 if (bond->dev->flags & IFF_PROMISC) 866 dev_set_promiscuity(old_active->dev, -1); 867 868 if (bond->dev->flags & IFF_ALLMULTI) 869 dev_set_allmulti(old_active->dev, -1); 870 871 bond_hw_addr_flush(bond->dev, old_active->dev); 872 } 873 874 if (new_active) { 875 /* FIXME: Signal errors upstream. */ 876 if (bond->dev->flags & IFF_PROMISC) 877 dev_set_promiscuity(new_active->dev, 1); 878 879 if (bond->dev->flags & IFF_ALLMULTI) 880 dev_set_allmulti(new_active->dev, 1); 881 882 netif_addr_lock_bh(bond->dev); 883 dev_uc_sync(new_active->dev, bond->dev); 884 dev_mc_sync(new_active->dev, bond->dev); 885 netif_addr_unlock_bh(bond->dev); 886 } 887 } 888 889 /** 890 * bond_set_dev_addr - clone slave's address to bond 891 * @bond_dev: bond net device 892 * @slave_dev: slave net device 893 * 894 * Should be called with RTNL held. 895 */ 896 static int bond_set_dev_addr(struct net_device *bond_dev, 897 struct net_device *slave_dev) 898 { 899 int err; 900 901 slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n", 902 bond_dev, slave_dev, slave_dev->addr_len); 903 err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL); 904 if (err) 905 return err; 906 907 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); 908 bond_dev->addr_assign_type = NET_ADDR_STOLEN; 909 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 910 return 0; 911 } 912 913 static struct slave *bond_get_old_active(struct bonding *bond, 914 struct slave *new_active) 915 { 916 struct slave *slave; 917 struct list_head *iter; 918 919 bond_for_each_slave(bond, slave, iter) { 920 if (slave == new_active) 921 continue; 922 923 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) 924 return slave; 925 } 926 927 return NULL; 928 } 929 930 /* bond_do_fail_over_mac 931 * 932 * Perform special MAC address swapping for fail_over_mac settings 933 * 934 * Called with RTNL 935 */ 936 static void bond_do_fail_over_mac(struct bonding *bond, 937 struct slave *new_active, 938 struct slave *old_active) 939 { 940 u8 tmp_mac[MAX_ADDR_LEN]; 941 struct sockaddr_storage ss; 942 int rv; 943 944 switch (bond->params.fail_over_mac) { 945 case BOND_FOM_ACTIVE: 946 if (new_active) { 947 rv = bond_set_dev_addr(bond->dev, new_active->dev); 948 if (rv) 949 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n", 950 -rv); 951 } 952 break; 953 case BOND_FOM_FOLLOW: 954 /* if new_active && old_active, swap them 955 * if just old_active, do nothing (going to no active slave) 956 * if just new_active, set new_active to bond's MAC 957 */ 958 if (!new_active) 959 return; 960 961 if (!old_active) 962 old_active = bond_get_old_active(bond, new_active); 963 964 if (old_active) { 965 bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr, 966 new_active->dev->addr_len); 967 bond_hw_addr_copy(ss.__data, 968 old_active->dev->dev_addr, 969 old_active->dev->addr_len); 970 ss.ss_family = new_active->dev->type; 971 } else { 972 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, 973 bond->dev->addr_len); 974 ss.ss_family = bond->dev->type; 975 } 976 977 rv = dev_set_mac_address(new_active->dev, 978 (struct sockaddr *)&ss, NULL); 979 if (rv) { 980 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n", 981 -rv); 982 goto out; 983 } 984 985 if (!old_active) 986 goto out; 987 988 bond_hw_addr_copy(ss.__data, tmp_mac, 989 new_active->dev->addr_len); 990 ss.ss_family = old_active->dev->type; 991 992 rv = dev_set_mac_address(old_active->dev, 993 (struct sockaddr *)&ss, NULL); 994 if (rv) 995 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n", 996 -rv); 997 out: 998 break; 999 default: 1000 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", 1001 bond->params.fail_over_mac); 1002 break; 1003 } 1004 1005 } 1006 1007 static struct slave *bond_choose_primary_or_current(struct bonding *bond) 1008 { 1009 struct slave *prim = rtnl_dereference(bond->primary_slave); 1010 struct slave *curr = rtnl_dereference(bond->curr_active_slave); 1011 1012 if (!prim || prim->link != BOND_LINK_UP) { 1013 if (!curr || curr->link != BOND_LINK_UP) 1014 return NULL; 1015 return curr; 1016 } 1017 1018 if (bond->force_primary) { 1019 bond->force_primary = false; 1020 return prim; 1021 } 1022 1023 if (!curr || curr->link != BOND_LINK_UP) 1024 return prim; 1025 1026 /* At this point, prim and curr are both up */ 1027 switch (bond->params.primary_reselect) { 1028 case BOND_PRI_RESELECT_ALWAYS: 1029 return prim; 1030 case BOND_PRI_RESELECT_BETTER: 1031 if (prim->speed < curr->speed) 1032 return curr; 1033 if (prim->speed == curr->speed && prim->duplex <= curr->duplex) 1034 return curr; 1035 return prim; 1036 case BOND_PRI_RESELECT_FAILURE: 1037 return curr; 1038 default: 1039 netdev_err(bond->dev, "impossible primary_reselect %d\n", 1040 bond->params.primary_reselect); 1041 return curr; 1042 } 1043 } 1044 1045 /** 1046 * bond_find_best_slave - select the best available slave to be the active one 1047 * @bond: our bonding struct 1048 */ 1049 static struct slave *bond_find_best_slave(struct bonding *bond) 1050 { 1051 struct slave *slave, *bestslave = NULL; 1052 struct list_head *iter; 1053 int mintime = bond->params.updelay; 1054 1055 slave = bond_choose_primary_or_current(bond); 1056 if (slave) 1057 return slave; 1058 1059 bond_for_each_slave(bond, slave, iter) { 1060 if (slave->link == BOND_LINK_UP) 1061 return slave; 1062 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) && 1063 slave->delay < mintime) { 1064 mintime = slave->delay; 1065 bestslave = slave; 1066 } 1067 } 1068 1069 return bestslave; 1070 } 1071 1072 static bool bond_should_notify_peers(struct bonding *bond) 1073 { 1074 struct slave *slave; 1075 1076 rcu_read_lock(); 1077 slave = rcu_dereference(bond->curr_active_slave); 1078 rcu_read_unlock(); 1079 1080 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", 1081 slave ? slave->dev->name : "NULL"); 1082 1083 if (!slave || !bond->send_peer_notif || 1084 bond->send_peer_notif % 1085 max(1, bond->params.peer_notif_delay) != 0 || 1086 !netif_carrier_ok(bond->dev) || 1087 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 1088 return false; 1089 1090 return true; 1091 } 1092 1093 /** 1094 * bond_change_active_slave - change the active slave into the specified one 1095 * @bond: our bonding struct 1096 * @new_active: the new slave to make the active one 1097 * 1098 * Set the new slave to the bond's settings and unset them on the old 1099 * curr_active_slave. 1100 * Setting include flags, mc-list, promiscuity, allmulti, etc. 1101 * 1102 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP, 1103 * because it is apparently the best available slave we have, even though its 1104 * updelay hasn't timed out yet. 1105 * 1106 * Caller must hold RTNL. 1107 */ 1108 void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 1109 { 1110 struct slave *old_active; 1111 1112 ASSERT_RTNL(); 1113 1114 old_active = rtnl_dereference(bond->curr_active_slave); 1115 1116 if (old_active == new_active) 1117 return; 1118 1119 #ifdef CONFIG_XFRM_OFFLOAD 1120 bond_ipsec_del_sa_all(bond); 1121 #endif /* CONFIG_XFRM_OFFLOAD */ 1122 1123 if (new_active) { 1124 new_active->last_link_up = jiffies; 1125 1126 if (new_active->link == BOND_LINK_BACK) { 1127 if (bond_uses_primary(bond)) { 1128 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n", 1129 (bond->params.updelay - new_active->delay) * bond->params.miimon); 1130 } 1131 1132 new_active->delay = 0; 1133 bond_set_slave_link_state(new_active, BOND_LINK_UP, 1134 BOND_SLAVE_NOTIFY_NOW); 1135 1136 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1137 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 1138 1139 if (bond_is_lb(bond)) 1140 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 1141 } else { 1142 if (bond_uses_primary(bond)) 1143 slave_info(bond->dev, new_active->dev, "making interface the new active one\n"); 1144 } 1145 } 1146 1147 if (bond_uses_primary(bond)) 1148 bond_hw_addr_swap(bond, new_active, old_active); 1149 1150 if (bond_is_lb(bond)) { 1151 bond_alb_handle_active_change(bond, new_active); 1152 if (old_active) 1153 bond_set_slave_inactive_flags(old_active, 1154 BOND_SLAVE_NOTIFY_NOW); 1155 if (new_active) 1156 bond_set_slave_active_flags(new_active, 1157 BOND_SLAVE_NOTIFY_NOW); 1158 } else { 1159 rcu_assign_pointer(bond->curr_active_slave, new_active); 1160 } 1161 1162 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { 1163 if (old_active) 1164 bond_set_slave_inactive_flags(old_active, 1165 BOND_SLAVE_NOTIFY_NOW); 1166 1167 if (new_active) { 1168 bool should_notify_peers = false; 1169 1170 bond_set_slave_active_flags(new_active, 1171 BOND_SLAVE_NOTIFY_NOW); 1172 1173 if (bond->params.fail_over_mac) 1174 bond_do_fail_over_mac(bond, new_active, 1175 old_active); 1176 1177 if (netif_running(bond->dev)) { 1178 bond->send_peer_notif = 1179 bond->params.num_peer_notif * 1180 max(1, bond->params.peer_notif_delay); 1181 should_notify_peers = 1182 bond_should_notify_peers(bond); 1183 } 1184 1185 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); 1186 if (should_notify_peers) { 1187 bond->send_peer_notif--; 1188 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 1189 bond->dev); 1190 } 1191 } 1192 } 1193 1194 #ifdef CONFIG_XFRM_OFFLOAD 1195 bond_ipsec_add_sa_all(bond); 1196 #endif /* CONFIG_XFRM_OFFLOAD */ 1197 1198 /* resend IGMP joins since active slave has changed or 1199 * all were sent on curr_active_slave. 1200 * resend only if bond is brought up with the affected 1201 * bonding modes and the retransmission is enabled 1202 */ 1203 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && 1204 ((bond_uses_primary(bond) && new_active) || 1205 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { 1206 bond->igmp_retrans = bond->params.resend_igmp; 1207 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 1208 } 1209 } 1210 1211 /** 1212 * bond_select_active_slave - select a new active slave, if needed 1213 * @bond: our bonding struct 1214 * 1215 * This functions should be called when one of the following occurs: 1216 * - The old curr_active_slave has been released or lost its link. 1217 * - The primary_slave has got its link back. 1218 * - A slave has got its link back and there's no old curr_active_slave. 1219 * 1220 * Caller must hold RTNL. 1221 */ 1222 void bond_select_active_slave(struct bonding *bond) 1223 { 1224 struct slave *best_slave; 1225 int rv; 1226 1227 ASSERT_RTNL(); 1228 1229 best_slave = bond_find_best_slave(bond); 1230 if (best_slave != rtnl_dereference(bond->curr_active_slave)) { 1231 bond_change_active_slave(bond, best_slave); 1232 rv = bond_set_carrier(bond); 1233 if (!rv) 1234 return; 1235 1236 if (netif_carrier_ok(bond->dev)) 1237 netdev_info(bond->dev, "active interface up!\n"); 1238 else 1239 netdev_info(bond->dev, "now running without any active interface!\n"); 1240 } 1241 } 1242 1243 #ifdef CONFIG_NET_POLL_CONTROLLER 1244 static inline int slave_enable_netpoll(struct slave *slave) 1245 { 1246 struct netpoll *np; 1247 int err = 0; 1248 1249 np = kzalloc(sizeof(*np), GFP_KERNEL); 1250 err = -ENOMEM; 1251 if (!np) 1252 goto out; 1253 1254 err = __netpoll_setup(np, slave->dev); 1255 if (err) { 1256 kfree(np); 1257 goto out; 1258 } 1259 slave->np = np; 1260 out: 1261 return err; 1262 } 1263 static inline void slave_disable_netpoll(struct slave *slave) 1264 { 1265 struct netpoll *np = slave->np; 1266 1267 if (!np) 1268 return; 1269 1270 slave->np = NULL; 1271 1272 __netpoll_free(np); 1273 } 1274 1275 static void bond_poll_controller(struct net_device *bond_dev) 1276 { 1277 struct bonding *bond = netdev_priv(bond_dev); 1278 struct slave *slave = NULL; 1279 struct list_head *iter; 1280 struct ad_info ad_info; 1281 1282 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1283 if (bond_3ad_get_active_agg_info(bond, &ad_info)) 1284 return; 1285 1286 bond_for_each_slave_rcu(bond, slave, iter) { 1287 if (!bond_slave_is_up(slave)) 1288 continue; 1289 1290 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 1291 struct aggregator *agg = 1292 SLAVE_AD_INFO(slave)->port.aggregator; 1293 1294 if (agg && 1295 agg->aggregator_identifier != ad_info.aggregator_id) 1296 continue; 1297 } 1298 1299 netpoll_poll_dev(slave->dev); 1300 } 1301 } 1302 1303 static void bond_netpoll_cleanup(struct net_device *bond_dev) 1304 { 1305 struct bonding *bond = netdev_priv(bond_dev); 1306 struct list_head *iter; 1307 struct slave *slave; 1308 1309 bond_for_each_slave(bond, slave, iter) 1310 if (bond_slave_is_up(slave)) 1311 slave_disable_netpoll(slave); 1312 } 1313 1314 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 1315 { 1316 struct bonding *bond = netdev_priv(dev); 1317 struct list_head *iter; 1318 struct slave *slave; 1319 int err = 0; 1320 1321 bond_for_each_slave(bond, slave, iter) { 1322 err = slave_enable_netpoll(slave); 1323 if (err) { 1324 bond_netpoll_cleanup(dev); 1325 break; 1326 } 1327 } 1328 return err; 1329 } 1330 #else 1331 static inline int slave_enable_netpoll(struct slave *slave) 1332 { 1333 return 0; 1334 } 1335 static inline void slave_disable_netpoll(struct slave *slave) 1336 { 1337 } 1338 static void bond_netpoll_cleanup(struct net_device *bond_dev) 1339 { 1340 } 1341 #endif 1342 1343 /*---------------------------------- IOCTL ----------------------------------*/ 1344 1345 static netdev_features_t bond_fix_features(struct net_device *dev, 1346 netdev_features_t features) 1347 { 1348 struct bonding *bond = netdev_priv(dev); 1349 struct list_head *iter; 1350 netdev_features_t mask; 1351 struct slave *slave; 1352 1353 #if IS_ENABLED(CONFIG_TLS_DEVICE) 1354 if (bond_sk_check(bond)) 1355 features |= BOND_TLS_FEATURES; 1356 else 1357 features &= ~BOND_TLS_FEATURES; 1358 #endif 1359 1360 mask = features; 1361 1362 features &= ~NETIF_F_ONE_FOR_ALL; 1363 features |= NETIF_F_ALL_FOR_ALL; 1364 1365 bond_for_each_slave(bond, slave, iter) { 1366 features = netdev_increment_features(features, 1367 slave->dev->features, 1368 mask); 1369 } 1370 features = netdev_add_tso_features(features, mask); 1371 1372 return features; 1373 } 1374 1375 #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1376 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ 1377 NETIF_F_HIGHDMA | NETIF_F_LRO) 1378 1379 #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1380 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) 1381 1382 #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1383 NETIF_F_GSO_SOFTWARE) 1384 1385 1386 static void bond_compute_features(struct bonding *bond) 1387 { 1388 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | 1389 IFF_XMIT_DST_RELEASE_PERM; 1390 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1391 netdev_features_t enc_features = BOND_ENC_FEATURES; 1392 #ifdef CONFIG_XFRM_OFFLOAD 1393 netdev_features_t xfrm_features = BOND_XFRM_FEATURES; 1394 #endif /* CONFIG_XFRM_OFFLOAD */ 1395 netdev_features_t mpls_features = BOND_MPLS_FEATURES; 1396 struct net_device *bond_dev = bond->dev; 1397 struct list_head *iter; 1398 struct slave *slave; 1399 unsigned short max_hard_header_len = ETH_HLEN; 1400 unsigned int gso_max_size = GSO_MAX_SIZE; 1401 u16 gso_max_segs = GSO_MAX_SEGS; 1402 1403 if (!bond_has_slaves(bond)) 1404 goto done; 1405 vlan_features &= NETIF_F_ALL_FOR_ALL; 1406 mpls_features &= NETIF_F_ALL_FOR_ALL; 1407 1408 bond_for_each_slave(bond, slave, iter) { 1409 vlan_features = netdev_increment_features(vlan_features, 1410 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1411 1412 enc_features = netdev_increment_features(enc_features, 1413 slave->dev->hw_enc_features, 1414 BOND_ENC_FEATURES); 1415 1416 #ifdef CONFIG_XFRM_OFFLOAD 1417 xfrm_features = netdev_increment_features(xfrm_features, 1418 slave->dev->hw_enc_features, 1419 BOND_XFRM_FEATURES); 1420 #endif /* CONFIG_XFRM_OFFLOAD */ 1421 1422 mpls_features = netdev_increment_features(mpls_features, 1423 slave->dev->mpls_features, 1424 BOND_MPLS_FEATURES); 1425 1426 dst_release_flag &= slave->dev->priv_flags; 1427 if (slave->dev->hard_header_len > max_hard_header_len) 1428 max_hard_header_len = slave->dev->hard_header_len; 1429 1430 gso_max_size = min(gso_max_size, slave->dev->gso_max_size); 1431 gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); 1432 } 1433 bond_dev->hard_header_len = max_hard_header_len; 1434 1435 done: 1436 bond_dev->vlan_features = vlan_features; 1437 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1438 NETIF_F_HW_VLAN_CTAG_TX | 1439 NETIF_F_HW_VLAN_STAG_TX; 1440 #ifdef CONFIG_XFRM_OFFLOAD 1441 bond_dev->hw_enc_features |= xfrm_features; 1442 #endif /* CONFIG_XFRM_OFFLOAD */ 1443 bond_dev->mpls_features = mpls_features; 1444 bond_dev->gso_max_segs = gso_max_segs; 1445 netif_set_gso_max_size(bond_dev, gso_max_size); 1446 1447 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1448 if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && 1449 dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) 1450 bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1451 1452 netdev_change_features(bond_dev); 1453 } 1454 1455 static void bond_setup_by_slave(struct net_device *bond_dev, 1456 struct net_device *slave_dev) 1457 { 1458 bond_dev->header_ops = slave_dev->header_ops; 1459 1460 bond_dev->type = slave_dev->type; 1461 bond_dev->hard_header_len = slave_dev->hard_header_len; 1462 bond_dev->needed_headroom = slave_dev->needed_headroom; 1463 bond_dev->addr_len = slave_dev->addr_len; 1464 1465 memcpy(bond_dev->broadcast, slave_dev->broadcast, 1466 slave_dev->addr_len); 1467 } 1468 1469 /* On bonding slaves other than the currently active slave, suppress 1470 * duplicates except for alb non-mcast/bcast. 1471 */ 1472 static bool bond_should_deliver_exact_match(struct sk_buff *skb, 1473 struct slave *slave, 1474 struct bonding *bond) 1475 { 1476 if (bond_is_slave_inactive(slave)) { 1477 if (BOND_MODE(bond) == BOND_MODE_ALB && 1478 skb->pkt_type != PACKET_BROADCAST && 1479 skb->pkt_type != PACKET_MULTICAST) 1480 return false; 1481 return true; 1482 } 1483 return false; 1484 } 1485 1486 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) 1487 { 1488 struct sk_buff *skb = *pskb; 1489 struct slave *slave; 1490 struct bonding *bond; 1491 int (*recv_probe)(const struct sk_buff *, struct bonding *, 1492 struct slave *); 1493 int ret = RX_HANDLER_ANOTHER; 1494 1495 skb = skb_share_check(skb, GFP_ATOMIC); 1496 if (unlikely(!skb)) 1497 return RX_HANDLER_CONSUMED; 1498 1499 *pskb = skb; 1500 1501 slave = bond_slave_get_rcu(skb->dev); 1502 bond = slave->bond; 1503 1504 recv_probe = READ_ONCE(bond->recv_probe); 1505 if (recv_probe) { 1506 ret = recv_probe(skb, bond, slave); 1507 if (ret == RX_HANDLER_CONSUMED) { 1508 consume_skb(skb); 1509 return ret; 1510 } 1511 } 1512 1513 /* 1514 * For packets determined by bond_should_deliver_exact_match() call to 1515 * be suppressed we want to make an exception for link-local packets. 1516 * This is necessary for e.g. LLDP daemons to be able to monitor 1517 * inactive slave links without being forced to bind to them 1518 * explicitly. 1519 * 1520 * At the same time, packets that are passed to the bonding master 1521 * (including link-local ones) can have their originating interface 1522 * determined via PACKET_ORIGDEV socket option. 1523 */ 1524 if (bond_should_deliver_exact_match(skb, slave, bond)) { 1525 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) 1526 return RX_HANDLER_PASS; 1527 return RX_HANDLER_EXACT; 1528 } 1529 1530 skb->dev = bond->dev; 1531 1532 if (BOND_MODE(bond) == BOND_MODE_ALB && 1533 netif_is_bridge_port(bond->dev) && 1534 skb->pkt_type == PACKET_HOST) { 1535 1536 if (unlikely(skb_cow_head(skb, 1537 skb->data - skb_mac_header(skb)))) { 1538 kfree_skb(skb); 1539 return RX_HANDLER_CONSUMED; 1540 } 1541 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, 1542 bond->dev->addr_len); 1543 } 1544 1545 return ret; 1546 } 1547 1548 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) 1549 { 1550 switch (BOND_MODE(bond)) { 1551 case BOND_MODE_ROUNDROBIN: 1552 return NETDEV_LAG_TX_TYPE_ROUNDROBIN; 1553 case BOND_MODE_ACTIVEBACKUP: 1554 return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; 1555 case BOND_MODE_BROADCAST: 1556 return NETDEV_LAG_TX_TYPE_BROADCAST; 1557 case BOND_MODE_XOR: 1558 case BOND_MODE_8023AD: 1559 return NETDEV_LAG_TX_TYPE_HASH; 1560 default: 1561 return NETDEV_LAG_TX_TYPE_UNKNOWN; 1562 } 1563 } 1564 1565 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, 1566 enum netdev_lag_tx_type type) 1567 { 1568 if (type != NETDEV_LAG_TX_TYPE_HASH) 1569 return NETDEV_LAG_HASH_NONE; 1570 1571 switch (bond->params.xmit_policy) { 1572 case BOND_XMIT_POLICY_LAYER2: 1573 return NETDEV_LAG_HASH_L2; 1574 case BOND_XMIT_POLICY_LAYER34: 1575 return NETDEV_LAG_HASH_L34; 1576 case BOND_XMIT_POLICY_LAYER23: 1577 return NETDEV_LAG_HASH_L23; 1578 case BOND_XMIT_POLICY_ENCAP23: 1579 return NETDEV_LAG_HASH_E23; 1580 case BOND_XMIT_POLICY_ENCAP34: 1581 return NETDEV_LAG_HASH_E34; 1582 case BOND_XMIT_POLICY_VLAN_SRCMAC: 1583 return NETDEV_LAG_HASH_VLAN_SRCMAC; 1584 default: 1585 return NETDEV_LAG_HASH_UNKNOWN; 1586 } 1587 } 1588 1589 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, 1590 struct netlink_ext_ack *extack) 1591 { 1592 struct netdev_lag_upper_info lag_upper_info; 1593 enum netdev_lag_tx_type type; 1594 1595 type = bond_lag_tx_type(bond); 1596 lag_upper_info.tx_type = type; 1597 lag_upper_info.hash_type = bond_lag_hash_type(bond, type); 1598 1599 return netdev_master_upper_dev_link(slave->dev, bond->dev, slave, 1600 &lag_upper_info, extack); 1601 } 1602 1603 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) 1604 { 1605 netdev_upper_dev_unlink(slave->dev, bond->dev); 1606 slave->dev->flags &= ~IFF_SLAVE; 1607 } 1608 1609 static void slave_kobj_release(struct kobject *kobj) 1610 { 1611 struct slave *slave = to_slave(kobj); 1612 struct bonding *bond = bond_get_bond_by_slave(slave); 1613 1614 cancel_delayed_work_sync(&slave->notify_work); 1615 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1616 kfree(SLAVE_AD_INFO(slave)); 1617 1618 kfree(slave); 1619 } 1620 1621 static struct kobj_type slave_ktype = { 1622 .release = slave_kobj_release, 1623 #ifdef CONFIG_SYSFS 1624 .sysfs_ops = &slave_sysfs_ops, 1625 #endif 1626 }; 1627 1628 static int bond_kobj_init(struct slave *slave) 1629 { 1630 int err; 1631 1632 err = kobject_init_and_add(&slave->kobj, &slave_ktype, 1633 &(slave->dev->dev.kobj), "bonding_slave"); 1634 if (err) 1635 kobject_put(&slave->kobj); 1636 1637 return err; 1638 } 1639 1640 static struct slave *bond_alloc_slave(struct bonding *bond, 1641 struct net_device *slave_dev) 1642 { 1643 struct slave *slave = NULL; 1644 1645 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 1646 if (!slave) 1647 return NULL; 1648 1649 slave->bond = bond; 1650 slave->dev = slave_dev; 1651 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); 1652 1653 if (bond_kobj_init(slave)) 1654 return NULL; 1655 1656 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 1657 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), 1658 GFP_KERNEL); 1659 if (!SLAVE_AD_INFO(slave)) { 1660 kobject_put(&slave->kobj); 1661 return NULL; 1662 } 1663 } 1664 1665 return slave; 1666 } 1667 1668 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) 1669 { 1670 info->bond_mode = BOND_MODE(bond); 1671 info->miimon = bond->params.miimon; 1672 info->num_slaves = bond->slave_cnt; 1673 } 1674 1675 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) 1676 { 1677 strcpy(info->slave_name, slave->dev->name); 1678 info->link = slave->link; 1679 info->state = bond_slave_state(slave); 1680 info->link_failure_count = slave->link_failure_count; 1681 } 1682 1683 static void bond_netdev_notify_work(struct work_struct *_work) 1684 { 1685 struct slave *slave = container_of(_work, struct slave, 1686 notify_work.work); 1687 1688 if (rtnl_trylock()) { 1689 struct netdev_bonding_info binfo; 1690 1691 bond_fill_ifslave(slave, &binfo.slave); 1692 bond_fill_ifbond(slave->bond, &binfo.master); 1693 netdev_bonding_info_change(slave->dev, &binfo); 1694 rtnl_unlock(); 1695 } else { 1696 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); 1697 } 1698 } 1699 1700 void bond_queue_slave_event(struct slave *slave) 1701 { 1702 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); 1703 } 1704 1705 void bond_lower_state_changed(struct slave *slave) 1706 { 1707 struct netdev_lag_lower_state_info info; 1708 1709 info.link_up = slave->link == BOND_LINK_UP || 1710 slave->link == BOND_LINK_FAIL; 1711 info.tx_enabled = bond_is_active_slave(slave); 1712 netdev_lower_state_changed(slave->dev, &info); 1713 } 1714 1715 /* enslave device <slave> to bond device <master> */ 1716 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, 1717 struct netlink_ext_ack *extack) 1718 { 1719 struct bonding *bond = netdev_priv(bond_dev); 1720 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 1721 struct slave *new_slave = NULL, *prev_slave; 1722 struct sockaddr_storage ss; 1723 int link_reporting; 1724 int res = 0, i; 1725 1726 if (slave_dev->flags & IFF_MASTER && 1727 !netif_is_bond_master(slave_dev)) { 1728 NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved"); 1729 netdev_err(bond_dev, 1730 "Error: Device with IFF_MASTER cannot be enslaved\n"); 1731 return -EPERM; 1732 } 1733 1734 if (!bond->params.use_carrier && 1735 slave_dev->ethtool_ops->get_link == NULL && 1736 slave_ops->ndo_do_ioctl == NULL) { 1737 slave_warn(bond_dev, slave_dev, "no link monitoring support\n"); 1738 } 1739 1740 /* already in-use? */ 1741 if (netdev_is_rx_handler_busy(slave_dev)) { 1742 NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved"); 1743 slave_err(bond_dev, slave_dev, 1744 "Error: Device is in use and cannot be enslaved\n"); 1745 return -EBUSY; 1746 } 1747 1748 if (bond_dev == slave_dev) { 1749 NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself."); 1750 netdev_err(bond_dev, "cannot enslave bond to itself.\n"); 1751 return -EPERM; 1752 } 1753 1754 /* vlan challenged mutual exclusion */ 1755 /* no need to lock since we're protected by rtnl_lock */ 1756 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { 1757 slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n"); 1758 if (vlan_uses_dev(bond_dev)) { 1759 NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond"); 1760 slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n"); 1761 return -EPERM; 1762 } else { 1763 slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n"); 1764 } 1765 } else { 1766 slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n"); 1767 } 1768 1769 if (slave_dev->features & NETIF_F_HW_ESP) 1770 slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n"); 1771 1772 /* Old ifenslave binaries are no longer supported. These can 1773 * be identified with moderate accuracy by the state of the slave: 1774 * the current ifenslave will set the interface down prior to 1775 * enslaving it; the old ifenslave will not. 1776 */ 1777 if (slave_dev->flags & IFF_UP) { 1778 NL_SET_ERR_MSG(extack, "Device can not be enslaved while up"); 1779 slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n"); 1780 return -EPERM; 1781 } 1782 1783 /* set bonding device ether type by slave - bonding netdevices are 1784 * created with ether_setup, so when the slave type is not ARPHRD_ETHER 1785 * there is a need to override some of the type dependent attribs/funcs. 1786 * 1787 * bond ether type mutual exclusion - don't allow slaves of dissimilar 1788 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond 1789 */ 1790 if (!bond_has_slaves(bond)) { 1791 if (bond_dev->type != slave_dev->type) { 1792 slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n", 1793 bond_dev->type, slave_dev->type); 1794 1795 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, 1796 bond_dev); 1797 res = notifier_to_errno(res); 1798 if (res) { 1799 slave_err(bond_dev, slave_dev, "refused to change device type\n"); 1800 return -EBUSY; 1801 } 1802 1803 /* Flush unicast and multicast addresses */ 1804 dev_uc_flush(bond_dev); 1805 dev_mc_flush(bond_dev); 1806 1807 if (slave_dev->type != ARPHRD_ETHER) 1808 bond_setup_by_slave(bond_dev, slave_dev); 1809 else { 1810 ether_setup(bond_dev); 1811 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1812 } 1813 1814 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, 1815 bond_dev); 1816 } 1817 } else if (bond_dev->type != slave_dev->type) { 1818 NL_SET_ERR_MSG(extack, "Device type is different from other slaves"); 1819 slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n", 1820 slave_dev->type, bond_dev->type); 1821 return -EINVAL; 1822 } 1823 1824 if (slave_dev->type == ARPHRD_INFINIBAND && 1825 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 1826 NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves"); 1827 slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n", 1828 slave_dev->type); 1829 res = -EOPNOTSUPP; 1830 goto err_undo_flags; 1831 } 1832 1833 if (!slave_ops->ndo_set_mac_address || 1834 slave_dev->type == ARPHRD_INFINIBAND) { 1835 slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n"); 1836 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && 1837 bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1838 if (!bond_has_slaves(bond)) { 1839 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1840 slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n"); 1841 } else { 1842 NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active"); 1843 slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n"); 1844 res = -EOPNOTSUPP; 1845 goto err_undo_flags; 1846 } 1847 } 1848 } 1849 1850 call_netdevice_notifiers(NETDEV_JOIN, slave_dev); 1851 1852 /* If this is the first slave, then we need to set the master's hardware 1853 * address to be the same as the slave's. 1854 */ 1855 if (!bond_has_slaves(bond) && 1856 bond->dev->addr_assign_type == NET_ADDR_RANDOM) { 1857 res = bond_set_dev_addr(bond->dev, slave_dev); 1858 if (res) 1859 goto err_undo_flags; 1860 } 1861 1862 new_slave = bond_alloc_slave(bond, slave_dev); 1863 if (!new_slave) { 1864 res = -ENOMEM; 1865 goto err_undo_flags; 1866 } 1867 1868 /* Set the new_slave's queue_id to be zero. Queue ID mapping 1869 * is set via sysfs or module option if desired. 1870 */ 1871 new_slave->queue_id = 0; 1872 1873 /* Save slave's original mtu and then set it to match the bond */ 1874 new_slave->original_mtu = slave_dev->mtu; 1875 res = dev_set_mtu(slave_dev, bond->dev->mtu); 1876 if (res) { 1877 slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res); 1878 goto err_free; 1879 } 1880 1881 /* Save slave's original ("permanent") mac address for modes 1882 * that need it, and for restoring it upon release, and then 1883 * set it to the master's address 1884 */ 1885 bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr, 1886 slave_dev->addr_len); 1887 1888 if (!bond->params.fail_over_mac || 1889 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 1890 /* Set slave to master's mac address. The application already 1891 * set the master's mac address to that of the first slave 1892 */ 1893 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); 1894 ss.ss_family = slave_dev->type; 1895 res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, 1896 extack); 1897 if (res) { 1898 slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res); 1899 goto err_restore_mtu; 1900 } 1901 } 1902 1903 /* set slave flag before open to prevent IPv6 addrconf */ 1904 slave_dev->flags |= IFF_SLAVE; 1905 1906 /* open the slave since the application closed it */ 1907 res = dev_open(slave_dev, extack); 1908 if (res) { 1909 slave_err(bond_dev, slave_dev, "Opening slave failed\n"); 1910 goto err_restore_mac; 1911 } 1912 1913 slave_dev->priv_flags |= IFF_BONDING; 1914 /* initialize slave stats */ 1915 dev_get_stats(new_slave->dev, &new_slave->slave_stats); 1916 1917 if (bond_is_lb(bond)) { 1918 /* bond_alb_init_slave() must be called before all other stages since 1919 * it might fail and we do not want to have to undo everything 1920 */ 1921 res = bond_alb_init_slave(bond, new_slave); 1922 if (res) 1923 goto err_close; 1924 } 1925 1926 res = vlan_vids_add_by_dev(slave_dev, bond_dev); 1927 if (res) { 1928 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n"); 1929 goto err_close; 1930 } 1931 1932 prev_slave = bond_last_slave(bond); 1933 1934 new_slave->delay = 0; 1935 new_slave->link_failure_count = 0; 1936 1937 if (bond_update_speed_duplex(new_slave) && 1938 bond_needs_speed_duplex(bond)) 1939 new_slave->link = BOND_LINK_DOWN; 1940 1941 new_slave->last_rx = jiffies - 1942 (msecs_to_jiffies(bond->params.arp_interval) + 1); 1943 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) 1944 new_slave->target_last_arp_rx[i] = new_slave->last_rx; 1945 1946 if (bond->params.miimon && !bond->params.use_carrier) { 1947 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1948 1949 if ((link_reporting == -1) && !bond->params.arp_interval) { 1950 /* miimon is set but a bonded network driver 1951 * does not support ETHTOOL/MII and 1952 * arp_interval is not set. Note: if 1953 * use_carrier is enabled, we will never go 1954 * here (because netif_carrier is always 1955 * supported); thus, we don't need to change 1956 * the messages for netif_carrier. 1957 */ 1958 slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n"); 1959 } else if (link_reporting == -1) { 1960 /* unable get link status using mii/ethtool */ 1961 slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n"); 1962 } 1963 } 1964 1965 /* check for initial state */ 1966 new_slave->link = BOND_LINK_NOCHANGE; 1967 if (bond->params.miimon) { 1968 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 1969 if (bond->params.updelay) { 1970 bond_set_slave_link_state(new_slave, 1971 BOND_LINK_BACK, 1972 BOND_SLAVE_NOTIFY_NOW); 1973 new_slave->delay = bond->params.updelay; 1974 } else { 1975 bond_set_slave_link_state(new_slave, 1976 BOND_LINK_UP, 1977 BOND_SLAVE_NOTIFY_NOW); 1978 } 1979 } else { 1980 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN, 1981 BOND_SLAVE_NOTIFY_NOW); 1982 } 1983 } else if (bond->params.arp_interval) { 1984 bond_set_slave_link_state(new_slave, 1985 (netif_carrier_ok(slave_dev) ? 1986 BOND_LINK_UP : BOND_LINK_DOWN), 1987 BOND_SLAVE_NOTIFY_NOW); 1988 } else { 1989 bond_set_slave_link_state(new_slave, BOND_LINK_UP, 1990 BOND_SLAVE_NOTIFY_NOW); 1991 } 1992 1993 if (new_slave->link != BOND_LINK_DOWN) 1994 new_slave->last_link_up = jiffies; 1995 slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n", 1996 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 1997 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 1998 1999 if (bond_uses_primary(bond) && bond->params.primary[0]) { 2000 /* if there is a primary slave, remember it */ 2001 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { 2002 rcu_assign_pointer(bond->primary_slave, new_slave); 2003 bond->force_primary = true; 2004 } 2005 } 2006 2007 switch (BOND_MODE(bond)) { 2008 case BOND_MODE_ACTIVEBACKUP: 2009 bond_set_slave_inactive_flags(new_slave, 2010 BOND_SLAVE_NOTIFY_NOW); 2011 break; 2012 case BOND_MODE_8023AD: 2013 /* in 802.3ad mode, the internal mechanism 2014 * will activate the slaves in the selected 2015 * aggregator 2016 */ 2017 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 2018 /* if this is the first slave */ 2019 if (!prev_slave) { 2020 SLAVE_AD_INFO(new_slave)->id = 1; 2021 /* Initialize AD with the number of times that the AD timer is called in 1 second 2022 * can be called only after the mac address of the bond is set 2023 */ 2024 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); 2025 } else { 2026 SLAVE_AD_INFO(new_slave)->id = 2027 SLAVE_AD_INFO(prev_slave)->id + 1; 2028 } 2029 2030 bond_3ad_bind_slave(new_slave); 2031 break; 2032 case BOND_MODE_TLB: 2033 case BOND_MODE_ALB: 2034 bond_set_active_slave(new_slave); 2035 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 2036 break; 2037 default: 2038 slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n"); 2039 2040 /* always active in trunk mode */ 2041 bond_set_active_slave(new_slave); 2042 2043 /* In trunking mode there is little meaning to curr_active_slave 2044 * anyway (it holds no special properties of the bond device), 2045 * so we can change it without calling change_active_interface() 2046 */ 2047 if (!rcu_access_pointer(bond->curr_active_slave) && 2048 new_slave->link == BOND_LINK_UP) 2049 rcu_assign_pointer(bond->curr_active_slave, new_slave); 2050 2051 break; 2052 } /* switch(bond_mode) */ 2053 2054 #ifdef CONFIG_NET_POLL_CONTROLLER 2055 if (bond->dev->npinfo) { 2056 if (slave_enable_netpoll(new_slave)) { 2057 slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); 2058 res = -EBUSY; 2059 goto err_detach; 2060 } 2061 } 2062 #endif 2063 2064 if (!(bond_dev->features & NETIF_F_LRO)) 2065 dev_disable_lro(slave_dev); 2066 2067 res = netdev_rx_handler_register(slave_dev, bond_handle_frame, 2068 new_slave); 2069 if (res) { 2070 slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res); 2071 goto err_detach; 2072 } 2073 2074 res = bond_master_upper_dev_link(bond, new_slave, extack); 2075 if (res) { 2076 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res); 2077 goto err_unregister; 2078 } 2079 2080 bond_lower_state_changed(new_slave); 2081 2082 res = bond_sysfs_slave_add(new_slave); 2083 if (res) { 2084 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res); 2085 goto err_upper_unlink; 2086 } 2087 2088 /* If the mode uses primary, then the following is handled by 2089 * bond_change_active_slave(). 2090 */ 2091 if (!bond_uses_primary(bond)) { 2092 /* set promiscuity level to new slave */ 2093 if (bond_dev->flags & IFF_PROMISC) { 2094 res = dev_set_promiscuity(slave_dev, 1); 2095 if (res) 2096 goto err_sysfs_del; 2097 } 2098 2099 /* set allmulti level to new slave */ 2100 if (bond_dev->flags & IFF_ALLMULTI) { 2101 res = dev_set_allmulti(slave_dev, 1); 2102 if (res) { 2103 if (bond_dev->flags & IFF_PROMISC) 2104 dev_set_promiscuity(slave_dev, -1); 2105 goto err_sysfs_del; 2106 } 2107 } 2108 2109 netif_addr_lock_bh(bond_dev); 2110 dev_mc_sync_multiple(slave_dev, bond_dev); 2111 dev_uc_sync_multiple(slave_dev, bond_dev); 2112 netif_addr_unlock_bh(bond_dev); 2113 2114 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 2115 /* add lacpdu mc addr to mc list */ 2116 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 2117 2118 dev_mc_add(slave_dev, lacpdu_multicast); 2119 } 2120 } 2121 2122 bond->slave_cnt++; 2123 bond_compute_features(bond); 2124 bond_set_carrier(bond); 2125 2126 if (bond_uses_primary(bond)) { 2127 block_netpoll_tx(); 2128 bond_select_active_slave(bond); 2129 unblock_netpoll_tx(); 2130 } 2131 2132 if (bond_mode_can_use_xmit_hash(bond)) 2133 bond_update_slave_arr(bond, NULL); 2134 2135 2136 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", 2137 bond_is_active_slave(new_slave) ? "an active" : "a backup", 2138 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); 2139 2140 /* enslave is successful */ 2141 bond_queue_slave_event(new_slave); 2142 return 0; 2143 2144 /* Undo stages on error */ 2145 err_sysfs_del: 2146 bond_sysfs_slave_del(new_slave); 2147 2148 err_upper_unlink: 2149 bond_upper_dev_unlink(bond, new_slave); 2150 2151 err_unregister: 2152 netdev_rx_handler_unregister(slave_dev); 2153 2154 err_detach: 2155 vlan_vids_del_by_dev(slave_dev, bond_dev); 2156 if (rcu_access_pointer(bond->primary_slave) == new_slave) 2157 RCU_INIT_POINTER(bond->primary_slave, NULL); 2158 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { 2159 block_netpoll_tx(); 2160 bond_change_active_slave(bond, NULL); 2161 bond_select_active_slave(bond); 2162 unblock_netpoll_tx(); 2163 } 2164 /* either primary_slave or curr_active_slave might've changed */ 2165 synchronize_rcu(); 2166 slave_disable_netpoll(new_slave); 2167 2168 err_close: 2169 if (!netif_is_bond_master(slave_dev)) 2170 slave_dev->priv_flags &= ~IFF_BONDING; 2171 dev_close(slave_dev); 2172 2173 err_restore_mac: 2174 slave_dev->flags &= ~IFF_SLAVE; 2175 if (!bond->params.fail_over_mac || 2176 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2177 /* XXX TODO - fom follow mode needs to change master's 2178 * MAC if this slave's MAC is in use by the bond, or at 2179 * least print a warning. 2180 */ 2181 bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr, 2182 new_slave->dev->addr_len); 2183 ss.ss_family = slave_dev->type; 2184 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); 2185 } 2186 2187 err_restore_mtu: 2188 dev_set_mtu(slave_dev, new_slave->original_mtu); 2189 2190 err_free: 2191 kobject_put(&new_slave->kobj); 2192 2193 err_undo_flags: 2194 /* Enslave of first slave has failed and we need to fix master's mac */ 2195 if (!bond_has_slaves(bond)) { 2196 if (ether_addr_equal_64bits(bond_dev->dev_addr, 2197 slave_dev->dev_addr)) 2198 eth_hw_addr_random(bond_dev); 2199 if (bond_dev->type != ARPHRD_ETHER) { 2200 dev_close(bond_dev); 2201 ether_setup(bond_dev); 2202 bond_dev->flags |= IFF_MASTER; 2203 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2204 } 2205 } 2206 2207 return res; 2208 } 2209 2210 /* Try to release the slave device <slave> from the bond device <master> 2211 * It is legal to access curr_active_slave without a lock because all the function 2212 * is RTNL-locked. If "all" is true it means that the function is being called 2213 * while destroying a bond interface and all slaves are being released. 2214 * 2215 * The rules for slave state should be: 2216 * for Active/Backup: 2217 * Active stays on all backups go down 2218 * for Bonded connections: 2219 * The first up interface should be left on and all others downed. 2220 */ 2221 static int __bond_release_one(struct net_device *bond_dev, 2222 struct net_device *slave_dev, 2223 bool all, bool unregister) 2224 { 2225 struct bonding *bond = netdev_priv(bond_dev); 2226 struct slave *slave, *oldcurrent; 2227 struct sockaddr_storage ss; 2228 int old_flags = bond_dev->flags; 2229 netdev_features_t old_features = bond_dev->features; 2230 2231 /* slave is not a slave or master is not master of this slave */ 2232 if (!(slave_dev->flags & IFF_SLAVE) || 2233 !netdev_has_upper_dev(slave_dev, bond_dev)) { 2234 slave_dbg(bond_dev, slave_dev, "cannot release slave\n"); 2235 return -EINVAL; 2236 } 2237 2238 block_netpoll_tx(); 2239 2240 slave = bond_get_slave_by_dev(bond, slave_dev); 2241 if (!slave) { 2242 /* not a slave of this bond */ 2243 slave_info(bond_dev, slave_dev, "interface not enslaved\n"); 2244 unblock_netpoll_tx(); 2245 return -EINVAL; 2246 } 2247 2248 bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); 2249 2250 bond_sysfs_slave_del(slave); 2251 2252 /* recompute stats just before removing the slave */ 2253 bond_get_stats(bond->dev, &bond->bond_stats); 2254 2255 bond_upper_dev_unlink(bond, slave); 2256 /* unregister rx_handler early so bond_handle_frame wouldn't be called 2257 * for this slave anymore. 2258 */ 2259 netdev_rx_handler_unregister(slave_dev); 2260 2261 if (BOND_MODE(bond) == BOND_MODE_8023AD) 2262 bond_3ad_unbind_slave(slave); 2263 2264 if (bond_mode_can_use_xmit_hash(bond)) 2265 bond_update_slave_arr(bond, slave); 2266 2267 slave_info(bond_dev, slave_dev, "Releasing %s interface\n", 2268 bond_is_active_slave(slave) ? "active" : "backup"); 2269 2270 oldcurrent = rcu_access_pointer(bond->curr_active_slave); 2271 2272 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 2273 2274 if (!all && (!bond->params.fail_over_mac || 2275 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { 2276 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 2277 bond_has_slaves(bond)) 2278 slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n", 2279 slave->perm_hwaddr); 2280 } 2281 2282 if (rtnl_dereference(bond->primary_slave) == slave) 2283 RCU_INIT_POINTER(bond->primary_slave, NULL); 2284 2285 if (oldcurrent == slave) 2286 bond_change_active_slave(bond, NULL); 2287 2288 if (bond_is_lb(bond)) { 2289 /* Must be called only after the slave has been 2290 * detached from the list and the curr_active_slave 2291 * has been cleared (if our_slave == old_current), 2292 * but before a new active slave is selected. 2293 */ 2294 bond_alb_deinit_slave(bond, slave); 2295 } 2296 2297 if (all) { 2298 RCU_INIT_POINTER(bond->curr_active_slave, NULL); 2299 } else if (oldcurrent == slave) { 2300 /* Note that we hold RTNL over this sequence, so there 2301 * is no concern that another slave add/remove event 2302 * will interfere. 2303 */ 2304 bond_select_active_slave(bond); 2305 } 2306 2307 if (!bond_has_slaves(bond)) { 2308 bond_set_carrier(bond); 2309 eth_hw_addr_random(bond_dev); 2310 } 2311 2312 unblock_netpoll_tx(); 2313 synchronize_rcu(); 2314 bond->slave_cnt--; 2315 2316 if (!bond_has_slaves(bond)) { 2317 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 2318 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); 2319 } 2320 2321 bond_compute_features(bond); 2322 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2323 (old_features & NETIF_F_VLAN_CHALLENGED)) 2324 slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n"); 2325 2326 vlan_vids_del_by_dev(slave_dev, bond_dev); 2327 2328 /* If the mode uses primary, then this case was handled above by 2329 * bond_change_active_slave(..., NULL) 2330 */ 2331 if (!bond_uses_primary(bond)) { 2332 /* unset promiscuity level from slave 2333 * NOTE: The NETDEV_CHANGEADDR call above may change the value 2334 * of the IFF_PROMISC flag in the bond_dev, but we need the 2335 * value of that flag before that change, as that was the value 2336 * when this slave was attached, so we cache at the start of the 2337 * function and use it here. Same goes for ALLMULTI below 2338 */ 2339 if (old_flags & IFF_PROMISC) 2340 dev_set_promiscuity(slave_dev, -1); 2341 2342 /* unset allmulti level from slave */ 2343 if (old_flags & IFF_ALLMULTI) 2344 dev_set_allmulti(slave_dev, -1); 2345 2346 bond_hw_addr_flush(bond_dev, slave_dev); 2347 } 2348 2349 slave_disable_netpoll(slave); 2350 2351 /* close slave before restoring its mac address */ 2352 dev_close(slave_dev); 2353 2354 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || 2355 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2356 /* restore original ("permanent") mac address */ 2357 bond_hw_addr_copy(ss.__data, slave->perm_hwaddr, 2358 slave->dev->addr_len); 2359 ss.ss_family = slave_dev->type; 2360 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); 2361 } 2362 2363 if (unregister) 2364 __dev_set_mtu(slave_dev, slave->original_mtu); 2365 else 2366 dev_set_mtu(slave_dev, slave->original_mtu); 2367 2368 if (!netif_is_bond_master(slave_dev)) 2369 slave_dev->priv_flags &= ~IFF_BONDING; 2370 2371 kobject_put(&slave->kobj); 2372 2373 return 0; 2374 } 2375 2376 /* A wrapper used because of ndo_del_link */ 2377 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) 2378 { 2379 return __bond_release_one(bond_dev, slave_dev, false, false); 2380 } 2381 2382 /* First release a slave and then destroy the bond if no more slaves are left. 2383 * Must be under rtnl_lock when this function is called. 2384 */ 2385 static int bond_release_and_destroy(struct net_device *bond_dev, 2386 struct net_device *slave_dev) 2387 { 2388 struct bonding *bond = netdev_priv(bond_dev); 2389 int ret; 2390 2391 ret = __bond_release_one(bond_dev, slave_dev, false, true); 2392 if (ret == 0 && !bond_has_slaves(bond) && 2393 bond_dev->reg_state != NETREG_UNREGISTERING) { 2394 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 2395 netdev_info(bond_dev, "Destroying bond\n"); 2396 bond_remove_proc_entry(bond); 2397 unregister_netdevice(bond_dev); 2398 } 2399 return ret; 2400 } 2401 2402 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info) 2403 { 2404 struct bonding *bond = netdev_priv(bond_dev); 2405 2406 bond_fill_ifbond(bond, info); 2407 } 2408 2409 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) 2410 { 2411 struct bonding *bond = netdev_priv(bond_dev); 2412 struct list_head *iter; 2413 int i = 0, res = -ENODEV; 2414 struct slave *slave; 2415 2416 bond_for_each_slave(bond, slave, iter) { 2417 if (i++ == (int)info->slave_id) { 2418 res = 0; 2419 bond_fill_ifslave(slave, info); 2420 break; 2421 } 2422 } 2423 2424 return res; 2425 } 2426 2427 /*-------------------------------- Monitoring -------------------------------*/ 2428 2429 /* called with rcu_read_lock() */ 2430 static int bond_miimon_inspect(struct bonding *bond) 2431 { 2432 int link_state, commit = 0; 2433 struct list_head *iter; 2434 struct slave *slave; 2435 bool ignore_updelay; 2436 2437 ignore_updelay = !rcu_dereference(bond->curr_active_slave); 2438 2439 bond_for_each_slave_rcu(bond, slave, iter) { 2440 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2441 2442 link_state = bond_check_dev_link(bond, slave->dev, 0); 2443 2444 switch (slave->link) { 2445 case BOND_LINK_UP: 2446 if (link_state) 2447 continue; 2448 2449 bond_propose_link_state(slave, BOND_LINK_FAIL); 2450 commit++; 2451 slave->delay = bond->params.downdelay; 2452 if (slave->delay) { 2453 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n", 2454 (BOND_MODE(bond) == 2455 BOND_MODE_ACTIVEBACKUP) ? 2456 (bond_is_active_slave(slave) ? 2457 "active " : "backup ") : "", 2458 bond->params.downdelay * bond->params.miimon); 2459 } 2460 fallthrough; 2461 case BOND_LINK_FAIL: 2462 if (link_state) { 2463 /* recovered before downdelay expired */ 2464 bond_propose_link_state(slave, BOND_LINK_UP); 2465 slave->last_link_up = jiffies; 2466 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n", 2467 (bond->params.downdelay - slave->delay) * 2468 bond->params.miimon); 2469 commit++; 2470 continue; 2471 } 2472 2473 if (slave->delay <= 0) { 2474 bond_propose_link_state(slave, BOND_LINK_DOWN); 2475 commit++; 2476 continue; 2477 } 2478 2479 slave->delay--; 2480 break; 2481 2482 case BOND_LINK_DOWN: 2483 if (!link_state) 2484 continue; 2485 2486 bond_propose_link_state(slave, BOND_LINK_BACK); 2487 commit++; 2488 slave->delay = bond->params.updelay; 2489 2490 if (slave->delay) { 2491 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n", 2492 ignore_updelay ? 0 : 2493 bond->params.updelay * 2494 bond->params.miimon); 2495 } 2496 fallthrough; 2497 case BOND_LINK_BACK: 2498 if (!link_state) { 2499 bond_propose_link_state(slave, BOND_LINK_DOWN); 2500 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n", 2501 (bond->params.updelay - slave->delay) * 2502 bond->params.miimon); 2503 commit++; 2504 continue; 2505 } 2506 2507 if (ignore_updelay) 2508 slave->delay = 0; 2509 2510 if (slave->delay <= 0) { 2511 bond_propose_link_state(slave, BOND_LINK_UP); 2512 commit++; 2513 ignore_updelay = false; 2514 continue; 2515 } 2516 2517 slave->delay--; 2518 break; 2519 } 2520 } 2521 2522 return commit; 2523 } 2524 2525 static void bond_miimon_link_change(struct bonding *bond, 2526 struct slave *slave, 2527 char link) 2528 { 2529 switch (BOND_MODE(bond)) { 2530 case BOND_MODE_8023AD: 2531 bond_3ad_handle_link_change(slave, link); 2532 break; 2533 case BOND_MODE_TLB: 2534 case BOND_MODE_ALB: 2535 bond_alb_handle_link_change(bond, slave, link); 2536 break; 2537 case BOND_MODE_XOR: 2538 bond_update_slave_arr(bond, NULL); 2539 break; 2540 } 2541 } 2542 2543 static void bond_miimon_commit(struct bonding *bond) 2544 { 2545 struct list_head *iter; 2546 struct slave *slave, *primary; 2547 2548 bond_for_each_slave(bond, slave, iter) { 2549 switch (slave->link_new_state) { 2550 case BOND_LINK_NOCHANGE: 2551 /* For 802.3ad mode, check current slave speed and 2552 * duplex again in case its port was disabled after 2553 * invalid speed/duplex reporting but recovered before 2554 * link monitoring could make a decision on the actual 2555 * link status 2556 */ 2557 if (BOND_MODE(bond) == BOND_MODE_8023AD && 2558 slave->link == BOND_LINK_UP) 2559 bond_3ad_adapter_speed_duplex_changed(slave); 2560 continue; 2561 2562 case BOND_LINK_UP: 2563 if (bond_update_speed_duplex(slave) && 2564 bond_needs_speed_duplex(bond)) { 2565 slave->link = BOND_LINK_DOWN; 2566 if (net_ratelimit()) 2567 slave_warn(bond->dev, slave->dev, 2568 "failed to get link speed/duplex\n"); 2569 continue; 2570 } 2571 bond_set_slave_link_state(slave, BOND_LINK_UP, 2572 BOND_SLAVE_NOTIFY_NOW); 2573 slave->last_link_up = jiffies; 2574 2575 primary = rtnl_dereference(bond->primary_slave); 2576 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 2577 /* prevent it from being the active one */ 2578 bond_set_backup_slave(slave); 2579 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2580 /* make it immediately active */ 2581 bond_set_active_slave(slave); 2582 } 2583 2584 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", 2585 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, 2586 slave->duplex ? "full" : "half"); 2587 2588 bond_miimon_link_change(bond, slave, BOND_LINK_UP); 2589 2590 if (!bond->curr_active_slave || slave == primary) 2591 goto do_failover; 2592 2593 continue; 2594 2595 case BOND_LINK_DOWN: 2596 if (slave->link_failure_count < UINT_MAX) 2597 slave->link_failure_count++; 2598 2599 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 2600 BOND_SLAVE_NOTIFY_NOW); 2601 2602 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || 2603 BOND_MODE(bond) == BOND_MODE_8023AD) 2604 bond_set_slave_inactive_flags(slave, 2605 BOND_SLAVE_NOTIFY_NOW); 2606 2607 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); 2608 2609 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); 2610 2611 if (slave == rcu_access_pointer(bond->curr_active_slave)) 2612 goto do_failover; 2613 2614 continue; 2615 2616 default: 2617 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", 2618 slave->link_new_state); 2619 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2620 2621 continue; 2622 } 2623 2624 do_failover: 2625 block_netpoll_tx(); 2626 bond_select_active_slave(bond); 2627 unblock_netpoll_tx(); 2628 } 2629 2630 bond_set_carrier(bond); 2631 } 2632 2633 /* bond_mii_monitor 2634 * 2635 * Really a wrapper that splits the mii monitor into two phases: an 2636 * inspection, then (if inspection indicates something needs to be done) 2637 * an acquisition of appropriate locks followed by a commit phase to 2638 * implement whatever link state changes are indicated. 2639 */ 2640 static void bond_mii_monitor(struct work_struct *work) 2641 { 2642 struct bonding *bond = container_of(work, struct bonding, 2643 mii_work.work); 2644 bool should_notify_peers = false; 2645 bool commit; 2646 unsigned long delay; 2647 struct slave *slave; 2648 struct list_head *iter; 2649 2650 delay = msecs_to_jiffies(bond->params.miimon); 2651 2652 if (!bond_has_slaves(bond)) 2653 goto re_arm; 2654 2655 rcu_read_lock(); 2656 should_notify_peers = bond_should_notify_peers(bond); 2657 commit = !!bond_miimon_inspect(bond); 2658 if (bond->send_peer_notif) { 2659 rcu_read_unlock(); 2660 if (rtnl_trylock()) { 2661 bond->send_peer_notif--; 2662 rtnl_unlock(); 2663 } 2664 } else { 2665 rcu_read_unlock(); 2666 } 2667 2668 if (commit) { 2669 /* Race avoidance with bond_close cancel of workqueue */ 2670 if (!rtnl_trylock()) { 2671 delay = 1; 2672 should_notify_peers = false; 2673 goto re_arm; 2674 } 2675 2676 bond_for_each_slave(bond, slave, iter) { 2677 bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); 2678 } 2679 bond_miimon_commit(bond); 2680 2681 rtnl_unlock(); /* might sleep, hold no other locks */ 2682 } 2683 2684 re_arm: 2685 if (bond->params.miimon) 2686 queue_delayed_work(bond->wq, &bond->mii_work, delay); 2687 2688 if (should_notify_peers) { 2689 if (!rtnl_trylock()) 2690 return; 2691 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); 2692 rtnl_unlock(); 2693 } 2694 } 2695 2696 static int bond_upper_dev_walk(struct net_device *upper, 2697 struct netdev_nested_priv *priv) 2698 { 2699 __be32 ip = *(__be32 *)priv->data; 2700 2701 return ip == bond_confirm_addr(upper, 0, ip); 2702 } 2703 2704 static bool bond_has_this_ip(struct bonding *bond, __be32 ip) 2705 { 2706 struct netdev_nested_priv priv = { 2707 .data = (void *)&ip, 2708 }; 2709 bool ret = false; 2710 2711 if (ip == bond_confirm_addr(bond->dev, 0, ip)) 2712 return true; 2713 2714 rcu_read_lock(); 2715 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv)) 2716 ret = true; 2717 rcu_read_unlock(); 2718 2719 return ret; 2720 } 2721 2722 /* We go to the (large) trouble of VLAN tagging ARP frames because 2723 * switches in VLAN mode (especially if ports are configured as 2724 * "native" to a VLAN) might not pass non-tagged frames. 2725 */ 2726 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, 2727 __be32 src_ip, struct bond_vlan_tag *tags) 2728 { 2729 struct sk_buff *skb; 2730 struct bond_vlan_tag *outer_tag = tags; 2731 struct net_device *slave_dev = slave->dev; 2732 struct net_device *bond_dev = slave->bond->dev; 2733 2734 slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n", 2735 arp_op, &dest_ip, &src_ip); 2736 2737 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, 2738 NULL, slave_dev->dev_addr, NULL); 2739 2740 if (!skb) { 2741 net_err_ratelimited("ARP packet allocation failed\n"); 2742 return; 2743 } 2744 2745 if (!tags || tags->vlan_proto == VLAN_N_VID) 2746 goto xmit; 2747 2748 tags++; 2749 2750 /* Go through all the tags backwards and add them to the packet */ 2751 while (tags->vlan_proto != VLAN_N_VID) { 2752 if (!tags->vlan_id) { 2753 tags++; 2754 continue; 2755 } 2756 2757 slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n", 2758 ntohs(outer_tag->vlan_proto), tags->vlan_id); 2759 skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto, 2760 tags->vlan_id); 2761 if (!skb) { 2762 net_err_ratelimited("failed to insert inner VLAN tag\n"); 2763 return; 2764 } 2765 2766 tags++; 2767 } 2768 /* Set the outer tag */ 2769 if (outer_tag->vlan_id) { 2770 slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n", 2771 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id); 2772 __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto, 2773 outer_tag->vlan_id); 2774 } 2775 2776 xmit: 2777 arp_xmit(skb); 2778 } 2779 2780 /* Validate the device path between the @start_dev and the @end_dev. 2781 * The path is valid if the @end_dev is reachable through device 2782 * stacking. 2783 * When the path is validated, collect any vlan information in the 2784 * path. 2785 */ 2786 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, 2787 struct net_device *end_dev, 2788 int level) 2789 { 2790 struct bond_vlan_tag *tags; 2791 struct net_device *upper; 2792 struct list_head *iter; 2793 2794 if (start_dev == end_dev) { 2795 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC); 2796 if (!tags) 2797 return ERR_PTR(-ENOMEM); 2798 tags[level].vlan_proto = VLAN_N_VID; 2799 return tags; 2800 } 2801 2802 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { 2803 tags = bond_verify_device_path(upper, end_dev, level + 1); 2804 if (IS_ERR_OR_NULL(tags)) { 2805 if (IS_ERR(tags)) 2806 return tags; 2807 continue; 2808 } 2809 if (is_vlan_dev(upper)) { 2810 tags[level].vlan_proto = vlan_dev_vlan_proto(upper); 2811 tags[level].vlan_id = vlan_dev_vlan_id(upper); 2812 } 2813 2814 return tags; 2815 } 2816 2817 return NULL; 2818 } 2819 2820 static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2821 { 2822 struct rtable *rt; 2823 struct bond_vlan_tag *tags; 2824 __be32 *targets = bond->params.arp_targets, addr; 2825 int i; 2826 2827 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2828 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n", 2829 __func__, &targets[i]); 2830 tags = NULL; 2831 2832 /* Find out through which dev should the packet go */ 2833 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2834 RTO_ONLINK, 0); 2835 if (IS_ERR(rt)) { 2836 /* there's no route to target - try to send arp 2837 * probe to generate any traffic (arp_validate=0) 2838 */ 2839 if (bond->params.arp_validate) 2840 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2841 bond->dev->name, 2842 &targets[i]); 2843 bond_arp_send(slave, ARPOP_REQUEST, targets[i], 2844 0, tags); 2845 continue; 2846 } 2847 2848 /* bond device itself */ 2849 if (rt->dst.dev == bond->dev) 2850 goto found; 2851 2852 rcu_read_lock(); 2853 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0); 2854 rcu_read_unlock(); 2855 2856 if (!IS_ERR_OR_NULL(tags)) 2857 goto found; 2858 2859 /* Not our device - skip */ 2860 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n", 2861 &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL"); 2862 2863 ip_rt_put(rt); 2864 continue; 2865 2866 found: 2867 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2868 ip_rt_put(rt); 2869 bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags); 2870 kfree(tags); 2871 } 2872 } 2873 2874 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) 2875 { 2876 int i; 2877 2878 if (!sip || !bond_has_this_ip(bond, tip)) { 2879 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n", 2880 __func__, &sip, &tip); 2881 return; 2882 } 2883 2884 i = bond_get_targets_ip(bond->params.arp_targets, sip); 2885 if (i == -1) { 2886 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n", 2887 __func__, &sip); 2888 return; 2889 } 2890 slave->last_rx = jiffies; 2891 slave->target_last_arp_rx[i] = jiffies; 2892 } 2893 2894 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, 2895 struct slave *slave) 2896 { 2897 struct arphdr *arp = (struct arphdr *)skb->data; 2898 struct slave *curr_active_slave, *curr_arp_slave; 2899 unsigned char *arp_ptr; 2900 __be32 sip, tip; 2901 int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2902 unsigned int alen; 2903 2904 if (!slave_do_arp_validate(bond, slave)) { 2905 if ((slave_do_arp_validate_only(bond) && is_arp) || 2906 !slave_do_arp_validate_only(bond)) 2907 slave->last_rx = jiffies; 2908 return RX_HANDLER_ANOTHER; 2909 } else if (!is_arp) { 2910 return RX_HANDLER_ANOTHER; 2911 } 2912 2913 alen = arp_hdr_len(bond->dev); 2914 2915 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", 2916 __func__, skb->dev->name); 2917 2918 if (alen > skb_headlen(skb)) { 2919 arp = kmalloc(alen, GFP_ATOMIC); 2920 if (!arp) 2921 goto out_unlock; 2922 if (skb_copy_bits(skb, 0, arp, alen) < 0) 2923 goto out_unlock; 2924 } 2925 2926 if (arp->ar_hln != bond->dev->addr_len || 2927 skb->pkt_type == PACKET_OTHERHOST || 2928 skb->pkt_type == PACKET_LOOPBACK || 2929 arp->ar_hrd != htons(ARPHRD_ETHER) || 2930 arp->ar_pro != htons(ETH_P_IP) || 2931 arp->ar_pln != 4) 2932 goto out_unlock; 2933 2934 arp_ptr = (unsigned char *)(arp + 1); 2935 arp_ptr += bond->dev->addr_len; 2936 memcpy(&sip, arp_ptr, 4); 2937 arp_ptr += 4 + bond->dev->addr_len; 2938 memcpy(&tip, arp_ptr, 4); 2939 2940 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n", 2941 __func__, slave->dev->name, bond_slave_state(slave), 2942 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 2943 &sip, &tip); 2944 2945 curr_active_slave = rcu_dereference(bond->curr_active_slave); 2946 curr_arp_slave = rcu_dereference(bond->current_arp_slave); 2947 2948 /* We 'trust' the received ARP enough to validate it if: 2949 * 2950 * (a) the slave receiving the ARP is active (which includes the 2951 * current ARP slave, if any), or 2952 * 2953 * (b) the receiving slave isn't active, but there is a currently 2954 * active slave and it received valid arp reply(s) after it became 2955 * the currently active slave, or 2956 * 2957 * (c) there is an ARP slave that sent an ARP during the prior ARP 2958 * interval, and we receive an ARP reply on any slave. We accept 2959 * these because switch FDB update delays may deliver the ARP 2960 * reply to a slave other than the sender of the ARP request. 2961 * 2962 * Note: for (b), backup slaves are receiving the broadcast ARP 2963 * request, not a reply. This request passes from the sending 2964 * slave through the L2 switch(es) to the receiving slave. Since 2965 * this is checking the request, sip/tip are swapped for 2966 * validation. 2967 * 2968 * This is done to avoid endless looping when we can't reach the 2969 * arp_ip_target and fool ourselves with our own arp requests. 2970 */ 2971 if (bond_is_active_slave(slave)) 2972 bond_validate_arp(bond, slave, sip, tip); 2973 else if (curr_active_slave && 2974 time_after(slave_last_rx(bond, curr_active_slave), 2975 curr_active_slave->last_link_up)) 2976 bond_validate_arp(bond, slave, tip, sip); 2977 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) && 2978 bond_time_in_interval(bond, 2979 dev_trans_start(curr_arp_slave->dev), 1)) 2980 bond_validate_arp(bond, slave, sip, tip); 2981 2982 out_unlock: 2983 if (arp != (struct arphdr *)skb->data) 2984 kfree(arp); 2985 return RX_HANDLER_ANOTHER; 2986 } 2987 2988 /* function to verify if we're in the arp_interval timeslice, returns true if 2989 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval + 2990 * arp_interval/2) . the arp_interval/2 is needed for really fast networks. 2991 */ 2992 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 2993 int mod) 2994 { 2995 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2996 2997 return time_in_range(jiffies, 2998 last_act - delta_in_ticks, 2999 last_act + mod * delta_in_ticks + delta_in_ticks/2); 3000 } 3001 3002 /* This function is called regularly to monitor each slave's link 3003 * ensuring that traffic is being sent and received when arp monitoring 3004 * is used in load-balancing mode. if the adapter has been dormant, then an 3005 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 3006 * arp monitoring in active backup mode. 3007 */ 3008 static void bond_loadbalance_arp_mon(struct bonding *bond) 3009 { 3010 struct slave *slave, *oldcurrent; 3011 struct list_head *iter; 3012 int do_failover = 0, slave_state_changed = 0; 3013 3014 if (!bond_has_slaves(bond)) 3015 goto re_arm; 3016 3017 rcu_read_lock(); 3018 3019 oldcurrent = rcu_dereference(bond->curr_active_slave); 3020 /* see if any of the previous devices are up now (i.e. they have 3021 * xmt and rcv traffic). the curr_active_slave does not come into 3022 * the picture unless it is null. also, slave->last_link_up is not 3023 * needed here because we send an arp on each slave and give a slave 3024 * as long as it needs to get the tx/rx within the delta. 3025 * TODO: what about up/down delay in arp mode? it wasn't here before 3026 * so it can wait 3027 */ 3028 bond_for_each_slave_rcu(bond, slave, iter) { 3029 unsigned long trans_start = dev_trans_start(slave->dev); 3030 3031 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 3032 3033 if (slave->link != BOND_LINK_UP) { 3034 if (bond_time_in_interval(bond, trans_start, 1) && 3035 bond_time_in_interval(bond, slave->last_rx, 1)) { 3036 3037 bond_propose_link_state(slave, BOND_LINK_UP); 3038 slave_state_changed = 1; 3039 3040 /* primary_slave has no meaning in round-robin 3041 * mode. the window of a slave being up and 3042 * curr_active_slave being null after enslaving 3043 * is closed. 3044 */ 3045 if (!oldcurrent) { 3046 slave_info(bond->dev, slave->dev, "link status definitely up\n"); 3047 do_failover = 1; 3048 } else { 3049 slave_info(bond->dev, slave->dev, "interface is now up\n"); 3050 } 3051 } 3052 } else { 3053 /* slave->link == BOND_LINK_UP */ 3054 3055 /* not all switches will respond to an arp request 3056 * when the source ip is 0, so don't take the link down 3057 * if we don't know our ip yet 3058 */ 3059 if (!bond_time_in_interval(bond, trans_start, 2) || 3060 !bond_time_in_interval(bond, slave->last_rx, 2)) { 3061 3062 bond_propose_link_state(slave, BOND_LINK_DOWN); 3063 slave_state_changed = 1; 3064 3065 if (slave->link_failure_count < UINT_MAX) 3066 slave->link_failure_count++; 3067 3068 slave_info(bond->dev, slave->dev, "interface is now down\n"); 3069 3070 if (slave == oldcurrent) 3071 do_failover = 1; 3072 } 3073 } 3074 3075 /* note: if switch is in round-robin mode, all links 3076 * must tx arp to ensure all links rx an arp - otherwise 3077 * links may oscillate or not come up at all; if switch is 3078 * in something like xor mode, there is nothing we can 3079 * do - all replies will be rx'ed on same link causing slaves 3080 * to be unstable during low/no traffic periods 3081 */ 3082 if (bond_slave_is_up(slave)) 3083 bond_arp_send_all(bond, slave); 3084 } 3085 3086 rcu_read_unlock(); 3087 3088 if (do_failover || slave_state_changed) { 3089 if (!rtnl_trylock()) 3090 goto re_arm; 3091 3092 bond_for_each_slave(bond, slave, iter) { 3093 if (slave->link_new_state != BOND_LINK_NOCHANGE) 3094 slave->link = slave->link_new_state; 3095 } 3096 3097 if (slave_state_changed) { 3098 bond_slave_state_change(bond); 3099 if (BOND_MODE(bond) == BOND_MODE_XOR) 3100 bond_update_slave_arr(bond, NULL); 3101 } 3102 if (do_failover) { 3103 block_netpoll_tx(); 3104 bond_select_active_slave(bond); 3105 unblock_netpoll_tx(); 3106 } 3107 rtnl_unlock(); 3108 } 3109 3110 re_arm: 3111 if (bond->params.arp_interval) 3112 queue_delayed_work(bond->wq, &bond->arp_work, 3113 msecs_to_jiffies(bond->params.arp_interval)); 3114 } 3115 3116 /* Called to inspect slaves for active-backup mode ARP monitor link state 3117 * changes. Sets proposed link state in slaves to specify what action 3118 * should take place for the slave. Returns 0 if no changes are found, >0 3119 * if changes to link states must be committed. 3120 * 3121 * Called with rcu_read_lock held. 3122 */ 3123 static int bond_ab_arp_inspect(struct bonding *bond) 3124 { 3125 unsigned long trans_start, last_rx; 3126 struct list_head *iter; 3127 struct slave *slave; 3128 int commit = 0; 3129 3130 bond_for_each_slave_rcu(bond, slave, iter) { 3131 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 3132 last_rx = slave_last_rx(bond, slave); 3133 3134 if (slave->link != BOND_LINK_UP) { 3135 if (bond_time_in_interval(bond, last_rx, 1)) { 3136 bond_propose_link_state(slave, BOND_LINK_UP); 3137 commit++; 3138 } else if (slave->link == BOND_LINK_BACK) { 3139 bond_propose_link_state(slave, BOND_LINK_FAIL); 3140 commit++; 3141 } 3142 continue; 3143 } 3144 3145 /* Give slaves 2*delta after being enslaved or made 3146 * active. This avoids bouncing, as the last receive 3147 * times need a full ARP monitor cycle to be updated. 3148 */ 3149 if (bond_time_in_interval(bond, slave->last_link_up, 2)) 3150 continue; 3151 3152 /* Backup slave is down if: 3153 * - No current_arp_slave AND 3154 * - more than 3*delta since last receive AND 3155 * - the bond has an IP address 3156 * 3157 * Note: a non-null current_arp_slave indicates 3158 * the curr_active_slave went down and we are 3159 * searching for a new one; under this condition 3160 * we only take the curr_active_slave down - this 3161 * gives each slave a chance to tx/rx traffic 3162 * before being taken out 3163 */ 3164 if (!bond_is_active_slave(slave) && 3165 !rcu_access_pointer(bond->current_arp_slave) && 3166 !bond_time_in_interval(bond, last_rx, 3)) { 3167 bond_propose_link_state(slave, BOND_LINK_DOWN); 3168 commit++; 3169 } 3170 3171 /* Active slave is down if: 3172 * - more than 2*delta since transmitting OR 3173 * - (more than 2*delta since receive AND 3174 * the bond has an IP address) 3175 */ 3176 trans_start = dev_trans_start(slave->dev); 3177 if (bond_is_active_slave(slave) && 3178 (!bond_time_in_interval(bond, trans_start, 2) || 3179 !bond_time_in_interval(bond, last_rx, 2))) { 3180 bond_propose_link_state(slave, BOND_LINK_DOWN); 3181 commit++; 3182 } 3183 } 3184 3185 return commit; 3186 } 3187 3188 /* Called to commit link state changes noted by inspection step of 3189 * active-backup mode ARP monitor. 3190 * 3191 * Called with RTNL hold. 3192 */ 3193 static void bond_ab_arp_commit(struct bonding *bond) 3194 { 3195 unsigned long trans_start; 3196 struct list_head *iter; 3197 struct slave *slave; 3198 3199 bond_for_each_slave(bond, slave, iter) { 3200 switch (slave->link_new_state) { 3201 case BOND_LINK_NOCHANGE: 3202 continue; 3203 3204 case BOND_LINK_UP: 3205 trans_start = dev_trans_start(slave->dev); 3206 if (rtnl_dereference(bond->curr_active_slave) != slave || 3207 (!rtnl_dereference(bond->curr_active_slave) && 3208 bond_time_in_interval(bond, trans_start, 1))) { 3209 struct slave *current_arp_slave; 3210 3211 current_arp_slave = rtnl_dereference(bond->current_arp_slave); 3212 bond_set_slave_link_state(slave, BOND_LINK_UP, 3213 BOND_SLAVE_NOTIFY_NOW); 3214 if (current_arp_slave) { 3215 bond_set_slave_inactive_flags( 3216 current_arp_slave, 3217 BOND_SLAVE_NOTIFY_NOW); 3218 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3219 } 3220 3221 slave_info(bond->dev, slave->dev, "link status definitely up\n"); 3222 3223 if (!rtnl_dereference(bond->curr_active_slave) || 3224 slave == rtnl_dereference(bond->primary_slave)) 3225 goto do_failover; 3226 3227 } 3228 3229 continue; 3230 3231 case BOND_LINK_DOWN: 3232 if (slave->link_failure_count < UINT_MAX) 3233 slave->link_failure_count++; 3234 3235 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 3236 BOND_SLAVE_NOTIFY_NOW); 3237 bond_set_slave_inactive_flags(slave, 3238 BOND_SLAVE_NOTIFY_NOW); 3239 3240 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); 3241 3242 if (slave == rtnl_dereference(bond->curr_active_slave)) { 3243 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3244 goto do_failover; 3245 } 3246 3247 continue; 3248 3249 case BOND_LINK_FAIL: 3250 bond_set_slave_link_state(slave, BOND_LINK_FAIL, 3251 BOND_SLAVE_NOTIFY_NOW); 3252 bond_set_slave_inactive_flags(slave, 3253 BOND_SLAVE_NOTIFY_NOW); 3254 3255 /* A slave has just been enslaved and has become 3256 * the current active slave. 3257 */ 3258 if (rtnl_dereference(bond->curr_active_slave)) 3259 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3260 continue; 3261 3262 default: 3263 slave_err(bond->dev, slave->dev, 3264 "impossible: link_new_state %d on slave\n", 3265 slave->link_new_state); 3266 continue; 3267 } 3268 3269 do_failover: 3270 block_netpoll_tx(); 3271 bond_select_active_slave(bond); 3272 unblock_netpoll_tx(); 3273 } 3274 3275 bond_set_carrier(bond); 3276 } 3277 3278 /* Send ARP probes for active-backup mode ARP monitor. 3279 * 3280 * Called with rcu_read_lock held. 3281 */ 3282 static bool bond_ab_arp_probe(struct bonding *bond) 3283 { 3284 struct slave *slave, *before = NULL, *new_slave = NULL, 3285 *curr_arp_slave = rcu_dereference(bond->current_arp_slave), 3286 *curr_active_slave = rcu_dereference(bond->curr_active_slave); 3287 struct list_head *iter; 3288 bool found = false; 3289 bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; 3290 3291 if (curr_arp_slave && curr_active_slave) 3292 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n", 3293 curr_arp_slave->dev->name, 3294 curr_active_slave->dev->name); 3295 3296 if (curr_active_slave) { 3297 bond_arp_send_all(bond, curr_active_slave); 3298 return should_notify_rtnl; 3299 } 3300 3301 /* if we don't have a curr_active_slave, search for the next available 3302 * backup slave from the current_arp_slave and make it the candidate 3303 * for becoming the curr_active_slave 3304 */ 3305 3306 if (!curr_arp_slave) { 3307 curr_arp_slave = bond_first_slave_rcu(bond); 3308 if (!curr_arp_slave) 3309 return should_notify_rtnl; 3310 } 3311 3312 bond_for_each_slave_rcu(bond, slave, iter) { 3313 if (!found && !before && bond_slave_is_up(slave)) 3314 before = slave; 3315 3316 if (found && !new_slave && bond_slave_is_up(slave)) 3317 new_slave = slave; 3318 /* if the link state is up at this point, we 3319 * mark it down - this can happen if we have 3320 * simultaneous link failures and 3321 * reselect_active_interface doesn't make this 3322 * one the current slave so it is still marked 3323 * up when it is actually down 3324 */ 3325 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { 3326 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 3327 BOND_SLAVE_NOTIFY_LATER); 3328 if (slave->link_failure_count < UINT_MAX) 3329 slave->link_failure_count++; 3330 3331 bond_set_slave_inactive_flags(slave, 3332 BOND_SLAVE_NOTIFY_LATER); 3333 3334 slave_info(bond->dev, slave->dev, "backup interface is now down\n"); 3335 } 3336 if (slave == curr_arp_slave) 3337 found = true; 3338 } 3339 3340 if (!new_slave && before) 3341 new_slave = before; 3342 3343 if (!new_slave) 3344 goto check_state; 3345 3346 bond_set_slave_link_state(new_slave, BOND_LINK_BACK, 3347 BOND_SLAVE_NOTIFY_LATER); 3348 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); 3349 bond_arp_send_all(bond, new_slave); 3350 new_slave->last_link_up = jiffies; 3351 rcu_assign_pointer(bond->current_arp_slave, new_slave); 3352 3353 check_state: 3354 bond_for_each_slave_rcu(bond, slave, iter) { 3355 if (slave->should_notify || slave->should_notify_link) { 3356 should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; 3357 break; 3358 } 3359 } 3360 return should_notify_rtnl; 3361 } 3362 3363 static void bond_activebackup_arp_mon(struct bonding *bond) 3364 { 3365 bool should_notify_peers = false; 3366 bool should_notify_rtnl = false; 3367 int delta_in_ticks; 3368 3369 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 3370 3371 if (!bond_has_slaves(bond)) 3372 goto re_arm; 3373 3374 rcu_read_lock(); 3375 3376 should_notify_peers = bond_should_notify_peers(bond); 3377 3378 if (bond_ab_arp_inspect(bond)) { 3379 rcu_read_unlock(); 3380 3381 /* Race avoidance with bond_close flush of workqueue */ 3382 if (!rtnl_trylock()) { 3383 delta_in_ticks = 1; 3384 should_notify_peers = false; 3385 goto re_arm; 3386 } 3387 3388 bond_ab_arp_commit(bond); 3389 3390 rtnl_unlock(); 3391 rcu_read_lock(); 3392 } 3393 3394 should_notify_rtnl = bond_ab_arp_probe(bond); 3395 rcu_read_unlock(); 3396 3397 re_arm: 3398 if (bond->params.arp_interval) 3399 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3400 3401 if (should_notify_peers || should_notify_rtnl) { 3402 if (!rtnl_trylock()) 3403 return; 3404 3405 if (should_notify_peers) 3406 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 3407 bond->dev); 3408 if (should_notify_rtnl) { 3409 bond_slave_state_notify(bond); 3410 bond_slave_link_notify(bond); 3411 } 3412 3413 rtnl_unlock(); 3414 } 3415 } 3416 3417 static void bond_arp_monitor(struct work_struct *work) 3418 { 3419 struct bonding *bond = container_of(work, struct bonding, 3420 arp_work.work); 3421 3422 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 3423 bond_activebackup_arp_mon(bond); 3424 else 3425 bond_loadbalance_arp_mon(bond); 3426 } 3427 3428 /*-------------------------- netdev event handling --------------------------*/ 3429 3430 /* Change device name */ 3431 static int bond_event_changename(struct bonding *bond) 3432 { 3433 bond_remove_proc_entry(bond); 3434 bond_create_proc_entry(bond); 3435 3436 bond_debug_reregister(bond); 3437 3438 return NOTIFY_DONE; 3439 } 3440 3441 static int bond_master_netdev_event(unsigned long event, 3442 struct net_device *bond_dev) 3443 { 3444 struct bonding *event_bond = netdev_priv(bond_dev); 3445 3446 netdev_dbg(bond_dev, "%s called\n", __func__); 3447 3448 switch (event) { 3449 case NETDEV_CHANGENAME: 3450 return bond_event_changename(event_bond); 3451 case NETDEV_UNREGISTER: 3452 bond_remove_proc_entry(event_bond); 3453 #ifdef CONFIG_XFRM_OFFLOAD 3454 xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); 3455 #endif /* CONFIG_XFRM_OFFLOAD */ 3456 break; 3457 case NETDEV_REGISTER: 3458 bond_create_proc_entry(event_bond); 3459 break; 3460 default: 3461 break; 3462 } 3463 3464 return NOTIFY_DONE; 3465 } 3466 3467 static int bond_slave_netdev_event(unsigned long event, 3468 struct net_device *slave_dev) 3469 { 3470 struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary; 3471 struct bonding *bond; 3472 struct net_device *bond_dev; 3473 3474 /* A netdev event can be generated while enslaving a device 3475 * before netdev_rx_handler_register is called in which case 3476 * slave will be NULL 3477 */ 3478 if (!slave) { 3479 netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__); 3480 return NOTIFY_DONE; 3481 } 3482 3483 bond_dev = slave->bond->dev; 3484 bond = slave->bond; 3485 primary = rtnl_dereference(bond->primary_slave); 3486 3487 slave_dbg(bond_dev, slave_dev, "%s called\n", __func__); 3488 3489 switch (event) { 3490 case NETDEV_UNREGISTER: 3491 if (bond_dev->type != ARPHRD_ETHER) 3492 bond_release_and_destroy(bond_dev, slave_dev); 3493 else 3494 __bond_release_one(bond_dev, slave_dev, false, true); 3495 break; 3496 case NETDEV_UP: 3497 case NETDEV_CHANGE: 3498 /* For 802.3ad mode only: 3499 * Getting invalid Speed/Duplex values here will put slave 3500 * in weird state. Mark it as link-fail if the link was 3501 * previously up or link-down if it hasn't yet come up, and 3502 * let link-monitoring (miimon) set it right when correct 3503 * speeds/duplex are available. 3504 */ 3505 if (bond_update_speed_duplex(slave) && 3506 BOND_MODE(bond) == BOND_MODE_8023AD) { 3507 if (slave->last_link_up) 3508 slave->link = BOND_LINK_FAIL; 3509 else 3510 slave->link = BOND_LINK_DOWN; 3511 } 3512 3513 if (BOND_MODE(bond) == BOND_MODE_8023AD) 3514 bond_3ad_adapter_speed_duplex_changed(slave); 3515 fallthrough; 3516 case NETDEV_DOWN: 3517 /* Refresh slave-array if applicable! 3518 * If the setup does not use miimon or arpmon (mode-specific!), 3519 * then these events will not cause the slave-array to be 3520 * refreshed. This will cause xmit to use a slave that is not 3521 * usable. Avoid such situation by refeshing the array at these 3522 * events. If these (miimon/arpmon) parameters are configured 3523 * then array gets refreshed twice and that should be fine! 3524 */ 3525 if (bond_mode_can_use_xmit_hash(bond)) 3526 bond_update_slave_arr(bond, NULL); 3527 break; 3528 case NETDEV_CHANGEMTU: 3529 /* TODO: Should slaves be allowed to 3530 * independently alter their MTU? For 3531 * an active-backup bond, slaves need 3532 * not be the same type of device, so 3533 * MTUs may vary. For other modes, 3534 * slaves arguably should have the 3535 * same MTUs. To do this, we'd need to 3536 * take over the slave's change_mtu 3537 * function for the duration of their 3538 * servitude. 3539 */ 3540 break; 3541 case NETDEV_CHANGENAME: 3542 /* we don't care if we don't have primary set */ 3543 if (!bond_uses_primary(bond) || 3544 !bond->params.primary[0]) 3545 break; 3546 3547 if (slave == primary) { 3548 /* slave's name changed - he's no longer primary */ 3549 RCU_INIT_POINTER(bond->primary_slave, NULL); 3550 } else if (!strcmp(slave_dev->name, bond->params.primary)) { 3551 /* we have a new primary slave */ 3552 rcu_assign_pointer(bond->primary_slave, slave); 3553 } else { /* we didn't change primary - exit */ 3554 break; 3555 } 3556 3557 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n", 3558 primary ? slave_dev->name : "none"); 3559 3560 block_netpoll_tx(); 3561 bond_select_active_slave(bond); 3562 unblock_netpoll_tx(); 3563 break; 3564 case NETDEV_FEAT_CHANGE: 3565 bond_compute_features(bond); 3566 break; 3567 case NETDEV_RESEND_IGMP: 3568 /* Propagate to master device */ 3569 call_netdevice_notifiers(event, slave->bond->dev); 3570 break; 3571 default: 3572 break; 3573 } 3574 3575 return NOTIFY_DONE; 3576 } 3577 3578 /* bond_netdev_event: handle netdev notifier chain events. 3579 * 3580 * This function receives events for the netdev chain. The caller (an 3581 * ioctl handler calling blocking_notifier_call_chain) holds the necessary 3582 * locks for us to safely manipulate the slave devices (RTNL lock, 3583 * dev_probe_lock). 3584 */ 3585 static int bond_netdev_event(struct notifier_block *this, 3586 unsigned long event, void *ptr) 3587 { 3588 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 3589 3590 netdev_dbg(event_dev, "%s received %s\n", 3591 __func__, netdev_cmd_to_name(event)); 3592 3593 if (!(event_dev->priv_flags & IFF_BONDING)) 3594 return NOTIFY_DONE; 3595 3596 if (event_dev->flags & IFF_MASTER) { 3597 int ret; 3598 3599 ret = bond_master_netdev_event(event, event_dev); 3600 if (ret != NOTIFY_DONE) 3601 return ret; 3602 } 3603 3604 if (event_dev->flags & IFF_SLAVE) 3605 return bond_slave_netdev_event(event, event_dev); 3606 3607 return NOTIFY_DONE; 3608 } 3609 3610 static struct notifier_block bond_netdev_notifier = { 3611 .notifier_call = bond_netdev_event, 3612 }; 3613 3614 /*---------------------------- Hashing Policies -----------------------------*/ 3615 3616 /* L2 hash helper */ 3617 static inline u32 bond_eth_hash(struct sk_buff *skb) 3618 { 3619 struct ethhdr *ep, hdr_tmp; 3620 3621 ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp); 3622 if (ep) 3623 return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto; 3624 return 0; 3625 } 3626 3627 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, 3628 int *noff, int *proto, bool l34) 3629 { 3630 const struct ipv6hdr *iph6; 3631 const struct iphdr *iph; 3632 3633 if (skb->protocol == htons(ETH_P_IP)) { 3634 if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph)))) 3635 return false; 3636 iph = (const struct iphdr *)(skb->data + *noff); 3637 iph_to_flow_copy_v4addrs(fk, iph); 3638 *noff += iph->ihl << 2; 3639 if (!ip_is_fragment(iph)) 3640 *proto = iph->protocol; 3641 } else if (skb->protocol == htons(ETH_P_IPV6)) { 3642 if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6)))) 3643 return false; 3644 iph6 = (const struct ipv6hdr *)(skb->data + *noff); 3645 iph_to_flow_copy_v6addrs(fk, iph6); 3646 *noff += sizeof(*iph6); 3647 *proto = iph6->nexthdr; 3648 } else { 3649 return false; 3650 } 3651 3652 if (l34 && *proto >= 0) 3653 fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto); 3654 3655 return true; 3656 } 3657 3658 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb) 3659 { 3660 struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); 3661 u32 srcmac_vendor = 0, srcmac_dev = 0; 3662 u16 vlan; 3663 int i; 3664 3665 for (i = 0; i < 3; i++) 3666 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i]; 3667 3668 for (i = 3; i < ETH_ALEN; i++) 3669 srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i]; 3670 3671 if (!skb_vlan_tag_present(skb)) 3672 return srcmac_vendor ^ srcmac_dev; 3673 3674 vlan = skb_vlan_tag_get(skb); 3675 3676 return vlan ^ srcmac_vendor ^ srcmac_dev; 3677 } 3678 3679 /* Extract the appropriate headers based on bond's xmit policy */ 3680 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, 3681 struct flow_keys *fk) 3682 { 3683 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; 3684 int noff, proto = -1; 3685 3686 switch (bond->params.xmit_policy) { 3687 case BOND_XMIT_POLICY_ENCAP23: 3688 case BOND_XMIT_POLICY_ENCAP34: 3689 memset(fk, 0, sizeof(*fk)); 3690 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding, 3691 fk, NULL, 0, 0, 0, 0); 3692 default: 3693 break; 3694 } 3695 3696 fk->ports.ports = 0; 3697 memset(&fk->icmp, 0, sizeof(fk->icmp)); 3698 noff = skb_network_offset(skb); 3699 if (!bond_flow_ip(skb, fk, &noff, &proto, l34)) 3700 return false; 3701 3702 /* ICMP error packets contains at least 8 bytes of the header 3703 * of the packet which generated the error. Use this information 3704 * to correlate ICMP error packets within the same flow which 3705 * generated the error. 3706 */ 3707 if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) { 3708 skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data, 3709 skb_transport_offset(skb), 3710 skb_headlen(skb)); 3711 if (proto == IPPROTO_ICMP) { 3712 if (!icmp_is_err(fk->icmp.type)) 3713 return true; 3714 3715 noff += sizeof(struct icmphdr); 3716 } else if (proto == IPPROTO_ICMPV6) { 3717 if (!icmpv6_is_err(fk->icmp.type)) 3718 return true; 3719 3720 noff += sizeof(struct icmp6hdr); 3721 } 3722 return bond_flow_ip(skb, fk, &noff, &proto, l34); 3723 } 3724 3725 return true; 3726 } 3727 3728 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow) 3729 { 3730 hash ^= (__force u32)flow_get_u32_dst(flow) ^ 3731 (__force u32)flow_get_u32_src(flow); 3732 hash ^= (hash >> 16); 3733 hash ^= (hash >> 8); 3734 /* discard lowest hash bit to deal with the common even ports pattern */ 3735 return hash >> 1; 3736 } 3737 3738 /** 3739 * bond_xmit_hash - generate a hash value based on the xmit policy 3740 * @bond: bonding device 3741 * @skb: buffer to use for headers 3742 * 3743 * This function will extract the necessary headers from the skb buffer and use 3744 * them to generate a hash based on the xmit_policy set in the bonding device 3745 */ 3746 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) 3747 { 3748 struct flow_keys flow; 3749 u32 hash; 3750 3751 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && 3752 skb->l4_hash) 3753 return skb->hash; 3754 3755 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) 3756 return bond_vlan_srcmac_hash(skb); 3757 3758 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || 3759 !bond_flow_dissect(bond, skb, &flow)) 3760 return bond_eth_hash(skb); 3761 3762 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || 3763 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { 3764 hash = bond_eth_hash(skb); 3765 } else { 3766 if (flow.icmp.id) 3767 memcpy(&hash, &flow.icmp, sizeof(hash)); 3768 else 3769 memcpy(&hash, &flow.ports.ports, sizeof(hash)); 3770 } 3771 3772 return bond_ip_hash(hash, &flow); 3773 } 3774 3775 /*-------------------------- Device entry points ----------------------------*/ 3776 3777 void bond_work_init_all(struct bonding *bond) 3778 { 3779 INIT_DELAYED_WORK(&bond->mcast_work, 3780 bond_resend_igmp_join_requests_delayed); 3781 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 3782 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); 3783 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor); 3784 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); 3785 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); 3786 } 3787 3788 static void bond_work_cancel_all(struct bonding *bond) 3789 { 3790 cancel_delayed_work_sync(&bond->mii_work); 3791 cancel_delayed_work_sync(&bond->arp_work); 3792 cancel_delayed_work_sync(&bond->alb_work); 3793 cancel_delayed_work_sync(&bond->ad_work); 3794 cancel_delayed_work_sync(&bond->mcast_work); 3795 cancel_delayed_work_sync(&bond->slave_arr_work); 3796 } 3797 3798 static int bond_open(struct net_device *bond_dev) 3799 { 3800 struct bonding *bond = netdev_priv(bond_dev); 3801 struct list_head *iter; 3802 struct slave *slave; 3803 3804 /* reset slave->backup and slave->inactive */ 3805 if (bond_has_slaves(bond)) { 3806 bond_for_each_slave(bond, slave, iter) { 3807 if (bond_uses_primary(bond) && 3808 slave != rcu_access_pointer(bond->curr_active_slave)) { 3809 bond_set_slave_inactive_flags(slave, 3810 BOND_SLAVE_NOTIFY_NOW); 3811 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) { 3812 bond_set_slave_active_flags(slave, 3813 BOND_SLAVE_NOTIFY_NOW); 3814 } 3815 } 3816 } 3817 3818 if (bond_is_lb(bond)) { 3819 /* bond_alb_initialize must be called before the timer 3820 * is started. 3821 */ 3822 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) 3823 return -ENOMEM; 3824 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) 3825 queue_delayed_work(bond->wq, &bond->alb_work, 0); 3826 } 3827 3828 if (bond->params.miimon) /* link check interval, in milliseconds. */ 3829 queue_delayed_work(bond->wq, &bond->mii_work, 0); 3830 3831 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 3832 queue_delayed_work(bond->wq, &bond->arp_work, 0); 3833 bond->recv_probe = bond_arp_rcv; 3834 } 3835 3836 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 3837 queue_delayed_work(bond->wq, &bond->ad_work, 0); 3838 /* register to receive LACPDUs */ 3839 bond->recv_probe = bond_3ad_lacpdu_recv; 3840 bond_3ad_initiate_agg_selection(bond, 1); 3841 } 3842 3843 if (bond_mode_can_use_xmit_hash(bond)) 3844 bond_update_slave_arr(bond, NULL); 3845 3846 return 0; 3847 } 3848 3849 static int bond_close(struct net_device *bond_dev) 3850 { 3851 struct bonding *bond = netdev_priv(bond_dev); 3852 3853 bond_work_cancel_all(bond); 3854 bond->send_peer_notif = 0; 3855 if (bond_is_lb(bond)) 3856 bond_alb_deinitialize(bond); 3857 bond->recv_probe = NULL; 3858 3859 return 0; 3860 } 3861 3862 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but 3863 * that some drivers can provide 32bit values only. 3864 */ 3865 static void bond_fold_stats(struct rtnl_link_stats64 *_res, 3866 const struct rtnl_link_stats64 *_new, 3867 const struct rtnl_link_stats64 *_old) 3868 { 3869 const u64 *new = (const u64 *)_new; 3870 const u64 *old = (const u64 *)_old; 3871 u64 *res = (u64 *)_res; 3872 int i; 3873 3874 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { 3875 u64 nv = new[i]; 3876 u64 ov = old[i]; 3877 s64 delta = nv - ov; 3878 3879 /* detects if this particular field is 32bit only */ 3880 if (((nv | ov) >> 32) == 0) 3881 delta = (s64)(s32)((u32)nv - (u32)ov); 3882 3883 /* filter anomalies, some drivers reset their stats 3884 * at down/up events. 3885 */ 3886 if (delta > 0) 3887 res[i] += delta; 3888 } 3889 } 3890 3891 #ifdef CONFIG_LOCKDEP 3892 static int bond_get_lowest_level_rcu(struct net_device *dev) 3893 { 3894 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 3895 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 3896 int cur = 0, max = 0; 3897 3898 now = dev; 3899 iter = &dev->adj_list.lower; 3900 3901 while (1) { 3902 next = NULL; 3903 while (1) { 3904 ldev = netdev_next_lower_dev_rcu(now, &iter); 3905 if (!ldev) 3906 break; 3907 3908 next = ldev; 3909 niter = &ldev->adj_list.lower; 3910 dev_stack[cur] = now; 3911 iter_stack[cur++] = iter; 3912 if (max <= cur) 3913 max = cur; 3914 break; 3915 } 3916 3917 if (!next) { 3918 if (!cur) 3919 return max; 3920 next = dev_stack[--cur]; 3921 niter = iter_stack[cur]; 3922 } 3923 3924 now = next; 3925 iter = niter; 3926 } 3927 3928 return max; 3929 } 3930 #endif 3931 3932 static void bond_get_stats(struct net_device *bond_dev, 3933 struct rtnl_link_stats64 *stats) 3934 { 3935 struct bonding *bond = netdev_priv(bond_dev); 3936 struct rtnl_link_stats64 temp; 3937 struct list_head *iter; 3938 struct slave *slave; 3939 int nest_level = 0; 3940 3941 3942 rcu_read_lock(); 3943 #ifdef CONFIG_LOCKDEP 3944 nest_level = bond_get_lowest_level_rcu(bond_dev); 3945 #endif 3946 3947 spin_lock_nested(&bond->stats_lock, nest_level); 3948 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 3949 3950 bond_for_each_slave_rcu(bond, slave, iter) { 3951 const struct rtnl_link_stats64 *new = 3952 dev_get_stats(slave->dev, &temp); 3953 3954 bond_fold_stats(stats, new, &slave->slave_stats); 3955 3956 /* save off the slave stats for the next run */ 3957 memcpy(&slave->slave_stats, new, sizeof(*new)); 3958 } 3959 3960 memcpy(&bond->bond_stats, stats, sizeof(*stats)); 3961 spin_unlock(&bond->stats_lock); 3962 rcu_read_unlock(); 3963 } 3964 3965 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) 3966 { 3967 struct bonding *bond = netdev_priv(bond_dev); 3968 struct net_device *slave_dev = NULL; 3969 struct ifbond k_binfo; 3970 struct ifbond __user *u_binfo = NULL; 3971 struct ifslave k_sinfo; 3972 struct ifslave __user *u_sinfo = NULL; 3973 struct mii_ioctl_data *mii = NULL; 3974 struct bond_opt_value newval; 3975 struct net *net; 3976 int res = 0; 3977 3978 netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd); 3979 3980 switch (cmd) { 3981 case SIOCGMIIPHY: 3982 mii = if_mii(ifr); 3983 if (!mii) 3984 return -EINVAL; 3985 3986 mii->phy_id = 0; 3987 fallthrough; 3988 case SIOCGMIIREG: 3989 /* We do this again just in case we were called by SIOCGMIIREG 3990 * instead of SIOCGMIIPHY. 3991 */ 3992 mii = if_mii(ifr); 3993 if (!mii) 3994 return -EINVAL; 3995 3996 if (mii->reg_num == 1) { 3997 mii->val_out = 0; 3998 if (netif_carrier_ok(bond->dev)) 3999 mii->val_out = BMSR_LSTATUS; 4000 } 4001 4002 return 0; 4003 case BOND_INFO_QUERY_OLD: 4004 case SIOCBONDINFOQUERY: 4005 u_binfo = (struct ifbond __user *)ifr->ifr_data; 4006 4007 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) 4008 return -EFAULT; 4009 4010 bond_info_query(bond_dev, &k_binfo); 4011 if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) 4012 return -EFAULT; 4013 4014 return 0; 4015 case BOND_SLAVE_INFO_QUERY_OLD: 4016 case SIOCBONDSLAVEINFOQUERY: 4017 u_sinfo = (struct ifslave __user *)ifr->ifr_data; 4018 4019 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave))) 4020 return -EFAULT; 4021 4022 res = bond_slave_info_query(bond_dev, &k_sinfo); 4023 if (res == 0 && 4024 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave))) 4025 return -EFAULT; 4026 4027 return res; 4028 default: 4029 break; 4030 } 4031 4032 net = dev_net(bond_dev); 4033 4034 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4035 return -EPERM; 4036 4037 slave_dev = __dev_get_by_name(net, ifr->ifr_slave); 4038 4039 slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev); 4040 4041 if (!slave_dev) 4042 return -ENODEV; 4043 4044 switch (cmd) { 4045 case BOND_ENSLAVE_OLD: 4046 case SIOCBONDENSLAVE: 4047 res = bond_enslave(bond_dev, slave_dev, NULL); 4048 break; 4049 case BOND_RELEASE_OLD: 4050 case SIOCBONDRELEASE: 4051 res = bond_release(bond_dev, slave_dev); 4052 break; 4053 case BOND_SETHWADDR_OLD: 4054 case SIOCBONDSETHWADDR: 4055 res = bond_set_dev_addr(bond_dev, slave_dev); 4056 break; 4057 case BOND_CHANGE_ACTIVE_OLD: 4058 case SIOCBONDCHANGEACTIVE: 4059 bond_opt_initstr(&newval, slave_dev->name); 4060 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, 4061 &newval); 4062 break; 4063 default: 4064 res = -EOPNOTSUPP; 4065 } 4066 4067 return res; 4068 } 4069 4070 static void bond_change_rx_flags(struct net_device *bond_dev, int change) 4071 { 4072 struct bonding *bond = netdev_priv(bond_dev); 4073 4074 if (change & IFF_PROMISC) 4075 bond_set_promiscuity(bond, 4076 bond_dev->flags & IFF_PROMISC ? 1 : -1); 4077 4078 if (change & IFF_ALLMULTI) 4079 bond_set_allmulti(bond, 4080 bond_dev->flags & IFF_ALLMULTI ? 1 : -1); 4081 } 4082 4083 static void bond_set_rx_mode(struct net_device *bond_dev) 4084 { 4085 struct bonding *bond = netdev_priv(bond_dev); 4086 struct list_head *iter; 4087 struct slave *slave; 4088 4089 rcu_read_lock(); 4090 if (bond_uses_primary(bond)) { 4091 slave = rcu_dereference(bond->curr_active_slave); 4092 if (slave) { 4093 dev_uc_sync(slave->dev, bond_dev); 4094 dev_mc_sync(slave->dev, bond_dev); 4095 } 4096 } else { 4097 bond_for_each_slave_rcu(bond, slave, iter) { 4098 dev_uc_sync_multiple(slave->dev, bond_dev); 4099 dev_mc_sync_multiple(slave->dev, bond_dev); 4100 } 4101 } 4102 rcu_read_unlock(); 4103 } 4104 4105 static int bond_neigh_init(struct neighbour *n) 4106 { 4107 struct bonding *bond = netdev_priv(n->dev); 4108 const struct net_device_ops *slave_ops; 4109 struct neigh_parms parms; 4110 struct slave *slave; 4111 int ret = 0; 4112 4113 rcu_read_lock(); 4114 slave = bond_first_slave_rcu(bond); 4115 if (!slave) 4116 goto out; 4117 slave_ops = slave->dev->netdev_ops; 4118 if (!slave_ops->ndo_neigh_setup) 4119 goto out; 4120 4121 /* TODO: find another way [1] to implement this. 4122 * Passing a zeroed structure is fragile, 4123 * but at least we do not pass garbage. 4124 * 4125 * [1] One way would be that ndo_neigh_setup() never touch 4126 * struct neigh_parms, but propagate the new neigh_setup() 4127 * back to ___neigh_create() / neigh_parms_alloc() 4128 */ 4129 memset(&parms, 0, sizeof(parms)); 4130 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); 4131 4132 if (ret) 4133 goto out; 4134 4135 if (parms.neigh_setup) 4136 ret = parms.neigh_setup(n); 4137 out: 4138 rcu_read_unlock(); 4139 return ret; 4140 } 4141 4142 /* The bonding ndo_neigh_setup is called at init time beofre any 4143 * slave exists. So we must declare proxy setup function which will 4144 * be used at run time to resolve the actual slave neigh param setup. 4145 * 4146 * It's also called by master devices (such as vlans) to setup their 4147 * underlying devices. In that case - do nothing, we're already set up from 4148 * our init. 4149 */ 4150 static int bond_neigh_setup(struct net_device *dev, 4151 struct neigh_parms *parms) 4152 { 4153 /* modify only our neigh_parms */ 4154 if (parms->dev == dev) 4155 parms->neigh_setup = bond_neigh_init; 4156 4157 return 0; 4158 } 4159 4160 /* Change the MTU of all of a master's slaves to match the master */ 4161 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) 4162 { 4163 struct bonding *bond = netdev_priv(bond_dev); 4164 struct slave *slave, *rollback_slave; 4165 struct list_head *iter; 4166 int res = 0; 4167 4168 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu); 4169 4170 bond_for_each_slave(bond, slave, iter) { 4171 slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n", 4172 slave, slave->dev->netdev_ops->ndo_change_mtu); 4173 4174 res = dev_set_mtu(slave->dev, new_mtu); 4175 4176 if (res) { 4177 /* If we failed to set the slave's mtu to the new value 4178 * we must abort the operation even in ACTIVE_BACKUP 4179 * mode, because if we allow the backup slaves to have 4180 * different mtu values than the active slave we'll 4181 * need to change their mtu when doing a failover. That 4182 * means changing their mtu from timer context, which 4183 * is probably not a good idea. 4184 */ 4185 slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n", 4186 res, new_mtu); 4187 goto unwind; 4188 } 4189 } 4190 4191 bond_dev->mtu = new_mtu; 4192 4193 return 0; 4194 4195 unwind: 4196 /* unwind from head to the slave that failed */ 4197 bond_for_each_slave(bond, rollback_slave, iter) { 4198 int tmp_res; 4199 4200 if (rollback_slave == slave) 4201 break; 4202 4203 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu); 4204 if (tmp_res) 4205 slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n", 4206 tmp_res); 4207 } 4208 4209 return res; 4210 } 4211 4212 /* Change HW address 4213 * 4214 * Note that many devices must be down to change the HW address, and 4215 * downing the master releases all slaves. We can make bonds full of 4216 * bonding devices to test this, however. 4217 */ 4218 static int bond_set_mac_address(struct net_device *bond_dev, void *addr) 4219 { 4220 struct bonding *bond = netdev_priv(bond_dev); 4221 struct slave *slave, *rollback_slave; 4222 struct sockaddr_storage *ss = addr, tmp_ss; 4223 struct list_head *iter; 4224 int res = 0; 4225 4226 if (BOND_MODE(bond) == BOND_MODE_ALB) 4227 return bond_alb_set_mac_address(bond_dev, addr); 4228 4229 4230 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond); 4231 4232 /* If fail_over_mac is enabled, do nothing and return success. 4233 * Returning an error causes ifenslave to fail. 4234 */ 4235 if (bond->params.fail_over_mac && 4236 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 4237 return 0; 4238 4239 if (!is_valid_ether_addr(ss->__data)) 4240 return -EADDRNOTAVAIL; 4241 4242 bond_for_each_slave(bond, slave, iter) { 4243 slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n", 4244 __func__, slave); 4245 res = dev_set_mac_address(slave->dev, addr, NULL); 4246 if (res) { 4247 /* TODO: consider downing the slave 4248 * and retry ? 4249 * User should expect communications 4250 * breakage anyway until ARP finish 4251 * updating, so... 4252 */ 4253 slave_dbg(bond_dev, slave->dev, "%s: err %d\n", 4254 __func__, res); 4255 goto unwind; 4256 } 4257 } 4258 4259 /* success */ 4260 memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len); 4261 return 0; 4262 4263 unwind: 4264 memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len); 4265 tmp_ss.ss_family = bond_dev->type; 4266 4267 /* unwind from head to the slave that failed */ 4268 bond_for_each_slave(bond, rollback_slave, iter) { 4269 int tmp_res; 4270 4271 if (rollback_slave == slave) 4272 break; 4273 4274 tmp_res = dev_set_mac_address(rollback_slave->dev, 4275 (struct sockaddr *)&tmp_ss, NULL); 4276 if (tmp_res) { 4277 slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n", 4278 __func__, tmp_res); 4279 } 4280 } 4281 4282 return res; 4283 } 4284 4285 /** 4286 * bond_get_slave_by_id - get xmit slave with slave_id 4287 * @bond: bonding device that is transmitting 4288 * @slave_id: slave id up to slave_cnt-1 through which to transmit 4289 * 4290 * This function tries to get slave with slave_id but in case 4291 * it fails, it tries to find the first available slave for transmission. 4292 */ 4293 static struct slave *bond_get_slave_by_id(struct bonding *bond, 4294 int slave_id) 4295 { 4296 struct list_head *iter; 4297 struct slave *slave; 4298 int i = slave_id; 4299 4300 /* Here we start from the slave with slave_id */ 4301 bond_for_each_slave_rcu(bond, slave, iter) { 4302 if (--i < 0) { 4303 if (bond_slave_can_tx(slave)) 4304 return slave; 4305 } 4306 } 4307 4308 /* Here we start from the first slave up to slave_id */ 4309 i = slave_id; 4310 bond_for_each_slave_rcu(bond, slave, iter) { 4311 if (--i < 0) 4312 break; 4313 if (bond_slave_can_tx(slave)) 4314 return slave; 4315 } 4316 /* no slave that can tx has been found */ 4317 return NULL; 4318 } 4319 4320 /** 4321 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave 4322 * @bond: bonding device to use 4323 * 4324 * Based on the value of the bonding device's packets_per_slave parameter 4325 * this function generates a slave id, which is usually used as the next 4326 * slave to transmit through. 4327 */ 4328 static u32 bond_rr_gen_slave_id(struct bonding *bond) 4329 { 4330 u32 slave_id; 4331 struct reciprocal_value reciprocal_packets_per_slave; 4332 int packets_per_slave = bond->params.packets_per_slave; 4333 4334 switch (packets_per_slave) { 4335 case 0: 4336 slave_id = prandom_u32(); 4337 break; 4338 case 1: 4339 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); 4340 break; 4341 default: 4342 reciprocal_packets_per_slave = 4343 bond->params.reciprocal_packets_per_slave; 4344 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); 4345 slave_id = reciprocal_divide(slave_id, 4346 reciprocal_packets_per_slave); 4347 break; 4348 } 4349 4350 return slave_id; 4351 } 4352 4353 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond, 4354 struct sk_buff *skb) 4355 { 4356 struct slave *slave; 4357 int slave_cnt; 4358 u32 slave_id; 4359 4360 /* Start with the curr_active_slave that joined the bond as the 4361 * default for sending IGMP traffic. For failover purposes one 4362 * needs to maintain some consistency for the interface that will 4363 * send the join/membership reports. The curr_active_slave found 4364 * will send all of this type of traffic. 4365 */ 4366 if (skb->protocol == htons(ETH_P_IP)) { 4367 int noff = skb_network_offset(skb); 4368 struct iphdr *iph; 4369 4370 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) 4371 goto non_igmp; 4372 4373 iph = ip_hdr(skb); 4374 if (iph->protocol == IPPROTO_IGMP) { 4375 slave = rcu_dereference(bond->curr_active_slave); 4376 if (slave) 4377 return slave; 4378 return bond_get_slave_by_id(bond, 0); 4379 } 4380 } 4381 4382 non_igmp: 4383 slave_cnt = READ_ONCE(bond->slave_cnt); 4384 if (likely(slave_cnt)) { 4385 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; 4386 return bond_get_slave_by_id(bond, slave_id); 4387 } 4388 return NULL; 4389 } 4390 4391 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, 4392 struct net_device *bond_dev) 4393 { 4394 struct bonding *bond = netdev_priv(bond_dev); 4395 struct slave *slave; 4396 4397 slave = bond_xmit_roundrobin_slave_get(bond, skb); 4398 if (likely(slave)) 4399 return bond_dev_queue_xmit(bond, skb, slave->dev); 4400 4401 return bond_tx_drop(bond_dev, skb); 4402 } 4403 4404 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond, 4405 struct sk_buff *skb) 4406 { 4407 return rcu_dereference(bond->curr_active_slave); 4408 } 4409 4410 /* In active-backup mode, we know that bond->curr_active_slave is always valid if 4411 * the bond has a usable interface. 4412 */ 4413 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, 4414 struct net_device *bond_dev) 4415 { 4416 struct bonding *bond = netdev_priv(bond_dev); 4417 struct slave *slave; 4418 4419 slave = bond_xmit_activebackup_slave_get(bond, skb); 4420 if (slave) 4421 return bond_dev_queue_xmit(bond, skb, slave->dev); 4422 4423 return bond_tx_drop(bond_dev, skb); 4424 } 4425 4426 /* Use this to update slave_array when (a) it's not appropriate to update 4427 * slave_array right away (note that update_slave_array() may sleep) 4428 * and / or (b) RTNL is not held. 4429 */ 4430 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay) 4431 { 4432 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay); 4433 } 4434 4435 /* Slave array work handler. Holds only RTNL */ 4436 static void bond_slave_arr_handler(struct work_struct *work) 4437 { 4438 struct bonding *bond = container_of(work, struct bonding, 4439 slave_arr_work.work); 4440 int ret; 4441 4442 if (!rtnl_trylock()) 4443 goto err; 4444 4445 ret = bond_update_slave_arr(bond, NULL); 4446 rtnl_unlock(); 4447 if (ret) { 4448 pr_warn_ratelimited("Failed to update slave array from WT\n"); 4449 goto err; 4450 } 4451 return; 4452 4453 err: 4454 bond_slave_arr_work_rearm(bond, 1); 4455 } 4456 4457 static void bond_skip_slave(struct bond_up_slave *slaves, 4458 struct slave *skipslave) 4459 { 4460 int idx; 4461 4462 /* Rare situation where caller has asked to skip a specific 4463 * slave but allocation failed (most likely!). BTW this is 4464 * only possible when the call is initiated from 4465 * __bond_release_one(). In this situation; overwrite the 4466 * skipslave entry in the array with the last entry from the 4467 * array to avoid a situation where the xmit path may choose 4468 * this to-be-skipped slave to send a packet out. 4469 */ 4470 for (idx = 0; slaves && idx < slaves->count; idx++) { 4471 if (skipslave == slaves->arr[idx]) { 4472 slaves->arr[idx] = 4473 slaves->arr[slaves->count - 1]; 4474 slaves->count--; 4475 break; 4476 } 4477 } 4478 } 4479 4480 static void bond_set_slave_arr(struct bonding *bond, 4481 struct bond_up_slave *usable_slaves, 4482 struct bond_up_slave *all_slaves) 4483 { 4484 struct bond_up_slave *usable, *all; 4485 4486 usable = rtnl_dereference(bond->usable_slaves); 4487 rcu_assign_pointer(bond->usable_slaves, usable_slaves); 4488 kfree_rcu(usable, rcu); 4489 4490 all = rtnl_dereference(bond->all_slaves); 4491 rcu_assign_pointer(bond->all_slaves, all_slaves); 4492 kfree_rcu(all, rcu); 4493 } 4494 4495 static void bond_reset_slave_arr(struct bonding *bond) 4496 { 4497 struct bond_up_slave *usable, *all; 4498 4499 usable = rtnl_dereference(bond->usable_slaves); 4500 if (usable) { 4501 RCU_INIT_POINTER(bond->usable_slaves, NULL); 4502 kfree_rcu(usable, rcu); 4503 } 4504 4505 all = rtnl_dereference(bond->all_slaves); 4506 if (all) { 4507 RCU_INIT_POINTER(bond->all_slaves, NULL); 4508 kfree_rcu(all, rcu); 4509 } 4510 } 4511 4512 /* Build the usable slaves array in control path for modes that use xmit-hash 4513 * to determine the slave interface - 4514 * (a) BOND_MODE_8023AD 4515 * (b) BOND_MODE_XOR 4516 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0 4517 * 4518 * The caller is expected to hold RTNL only and NO other lock! 4519 */ 4520 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) 4521 { 4522 struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL; 4523 struct slave *slave; 4524 struct list_head *iter; 4525 int agg_id = 0; 4526 int ret = 0; 4527 4528 might_sleep(); 4529 4530 usable_slaves = kzalloc(struct_size(usable_slaves, arr, 4531 bond->slave_cnt), GFP_KERNEL); 4532 all_slaves = kzalloc(struct_size(all_slaves, arr, 4533 bond->slave_cnt), GFP_KERNEL); 4534 if (!usable_slaves || !all_slaves) { 4535 ret = -ENOMEM; 4536 goto out; 4537 } 4538 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 4539 struct ad_info ad_info; 4540 4541 spin_lock_bh(&bond->mode_lock); 4542 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 4543 spin_unlock_bh(&bond->mode_lock); 4544 pr_debug("bond_3ad_get_active_agg_info failed\n"); 4545 /* No active aggragator means it's not safe to use 4546 * the previous array. 4547 */ 4548 bond_reset_slave_arr(bond); 4549 goto out; 4550 } 4551 spin_unlock_bh(&bond->mode_lock); 4552 agg_id = ad_info.aggregator_id; 4553 } 4554 bond_for_each_slave(bond, slave, iter) { 4555 if (skipslave == slave) 4556 continue; 4557 4558 all_slaves->arr[all_slaves->count++] = slave; 4559 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 4560 struct aggregator *agg; 4561 4562 agg = SLAVE_AD_INFO(slave)->port.aggregator; 4563 if (!agg || agg->aggregator_identifier != agg_id) 4564 continue; 4565 } 4566 if (!bond_slave_can_tx(slave)) 4567 continue; 4568 4569 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n", 4570 usable_slaves->count); 4571 4572 usable_slaves->arr[usable_slaves->count++] = slave; 4573 } 4574 4575 bond_set_slave_arr(bond, usable_slaves, all_slaves); 4576 return ret; 4577 out: 4578 if (ret != 0 && skipslave) { 4579 bond_skip_slave(rtnl_dereference(bond->all_slaves), 4580 skipslave); 4581 bond_skip_slave(rtnl_dereference(bond->usable_slaves), 4582 skipslave); 4583 } 4584 kfree_rcu(all_slaves, rcu); 4585 kfree_rcu(usable_slaves, rcu); 4586 4587 return ret; 4588 } 4589 4590 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond, 4591 struct sk_buff *skb, 4592 struct bond_up_slave *slaves) 4593 { 4594 struct slave *slave; 4595 unsigned int count; 4596 u32 hash; 4597 4598 hash = bond_xmit_hash(bond, skb); 4599 count = slaves ? READ_ONCE(slaves->count) : 0; 4600 if (unlikely(!count)) 4601 return NULL; 4602 4603 slave = slaves->arr[hash % count]; 4604 return slave; 4605 } 4606 4607 /* Use this Xmit function for 3AD as well as XOR modes. The current 4608 * usable slave array is formed in the control path. The xmit function 4609 * just calculates hash and sends the packet out. 4610 */ 4611 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, 4612 struct net_device *dev) 4613 { 4614 struct bonding *bond = netdev_priv(dev); 4615 struct bond_up_slave *slaves; 4616 struct slave *slave; 4617 4618 slaves = rcu_dereference(bond->usable_slaves); 4619 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); 4620 if (likely(slave)) 4621 return bond_dev_queue_xmit(bond, skb, slave->dev); 4622 4623 return bond_tx_drop(dev, skb); 4624 } 4625 4626 /* in broadcast mode, we send everything to all usable interfaces. */ 4627 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, 4628 struct net_device *bond_dev) 4629 { 4630 struct bonding *bond = netdev_priv(bond_dev); 4631 struct slave *slave = NULL; 4632 struct list_head *iter; 4633 4634 bond_for_each_slave_rcu(bond, slave, iter) { 4635 if (bond_is_last_slave(bond, slave)) 4636 break; 4637 if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { 4638 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 4639 4640 if (!skb2) { 4641 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n", 4642 bond_dev->name, __func__); 4643 continue; 4644 } 4645 bond_dev_queue_xmit(bond, skb2, slave->dev); 4646 } 4647 } 4648 if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) 4649 return bond_dev_queue_xmit(bond, skb, slave->dev); 4650 4651 return bond_tx_drop(bond_dev, skb); 4652 } 4653 4654 /*------------------------- Device initialization ---------------------------*/ 4655 4656 /* Lookup the slave that corresponds to a qid */ 4657 static inline int bond_slave_override(struct bonding *bond, 4658 struct sk_buff *skb) 4659 { 4660 struct slave *slave = NULL; 4661 struct list_head *iter; 4662 4663 if (!skb_rx_queue_recorded(skb)) 4664 return 1; 4665 4666 /* Find out if any slaves have the same mapping as this skb. */ 4667 bond_for_each_slave_rcu(bond, slave, iter) { 4668 if (slave->queue_id == skb_get_queue_mapping(skb)) { 4669 if (bond_slave_is_up(slave) && 4670 slave->link == BOND_LINK_UP) { 4671 bond_dev_queue_xmit(bond, skb, slave->dev); 4672 return 0; 4673 } 4674 /* If the slave isn't UP, use default transmit policy. */ 4675 break; 4676 } 4677 } 4678 4679 return 1; 4680 } 4681 4682 4683 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 4684 struct net_device *sb_dev) 4685 { 4686 /* This helper function exists to help dev_pick_tx get the correct 4687 * destination queue. Using a helper function skips a call to 4688 * skb_tx_hash and will put the skbs in the queue we expect on their 4689 * way down to the bonding driver. 4690 */ 4691 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 4692 4693 /* Save the original txq to restore before passing to the driver */ 4694 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb); 4695 4696 if (unlikely(txq >= dev->real_num_tx_queues)) { 4697 do { 4698 txq -= dev->real_num_tx_queues; 4699 } while (txq >= dev->real_num_tx_queues); 4700 } 4701 return txq; 4702 } 4703 4704 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev, 4705 struct sk_buff *skb, 4706 bool all_slaves) 4707 { 4708 struct bonding *bond = netdev_priv(master_dev); 4709 struct bond_up_slave *slaves; 4710 struct slave *slave = NULL; 4711 4712 switch (BOND_MODE(bond)) { 4713 case BOND_MODE_ROUNDROBIN: 4714 slave = bond_xmit_roundrobin_slave_get(bond, skb); 4715 break; 4716 case BOND_MODE_ACTIVEBACKUP: 4717 slave = bond_xmit_activebackup_slave_get(bond, skb); 4718 break; 4719 case BOND_MODE_8023AD: 4720 case BOND_MODE_XOR: 4721 if (all_slaves) 4722 slaves = rcu_dereference(bond->all_slaves); 4723 else 4724 slaves = rcu_dereference(bond->usable_slaves); 4725 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); 4726 break; 4727 case BOND_MODE_BROADCAST: 4728 break; 4729 case BOND_MODE_ALB: 4730 slave = bond_xmit_alb_slave_get(bond, skb); 4731 break; 4732 case BOND_MODE_TLB: 4733 slave = bond_xmit_tlb_slave_get(bond, skb); 4734 break; 4735 default: 4736 /* Should never happen, mode already checked */ 4737 WARN_ONCE(true, "Unknown bonding mode"); 4738 break; 4739 } 4740 4741 if (slave) 4742 return slave->dev; 4743 return NULL; 4744 } 4745 4746 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow) 4747 { 4748 switch (sk->sk_family) { 4749 #if IS_ENABLED(CONFIG_IPV6) 4750 case AF_INET6: 4751 if (sk->sk_ipv6only || 4752 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { 4753 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 4754 flow->addrs.v6addrs.src = inet6_sk(sk)->saddr; 4755 flow->addrs.v6addrs.dst = sk->sk_v6_daddr; 4756 break; 4757 } 4758 fallthrough; 4759 #endif 4760 default: /* AF_INET */ 4761 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 4762 flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr; 4763 flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr; 4764 break; 4765 } 4766 4767 flow->ports.src = inet_sk(sk)->inet_sport; 4768 flow->ports.dst = inet_sk(sk)->inet_dport; 4769 } 4770 4771 /** 4772 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields 4773 * @sk: socket to use for headers 4774 * 4775 * This function will extract the necessary field from the socket and use 4776 * them to generate a hash based on the LAYER34 xmit_policy. 4777 * Assumes that sk is a TCP or UDP socket. 4778 */ 4779 static u32 bond_sk_hash_l34(struct sock *sk) 4780 { 4781 struct flow_keys flow; 4782 u32 hash; 4783 4784 bond_sk_to_flow(sk, &flow); 4785 4786 /* L4 */ 4787 memcpy(&hash, &flow.ports.ports, sizeof(hash)); 4788 /* L3 */ 4789 return bond_ip_hash(hash, &flow); 4790 } 4791 4792 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond, 4793 struct sock *sk) 4794 { 4795 struct bond_up_slave *slaves; 4796 struct slave *slave; 4797 unsigned int count; 4798 u32 hash; 4799 4800 slaves = rcu_dereference(bond->usable_slaves); 4801 count = slaves ? READ_ONCE(slaves->count) : 0; 4802 if (unlikely(!count)) 4803 return NULL; 4804 4805 hash = bond_sk_hash_l34(sk); 4806 slave = slaves->arr[hash % count]; 4807 4808 return slave->dev; 4809 } 4810 4811 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev, 4812 struct sock *sk) 4813 { 4814 struct bonding *bond = netdev_priv(dev); 4815 struct net_device *lower = NULL; 4816 4817 rcu_read_lock(); 4818 if (bond_sk_check(bond)) 4819 lower = __bond_sk_get_lower_dev(bond, sk); 4820 rcu_read_unlock(); 4821 4822 return lower; 4823 } 4824 4825 #if IS_ENABLED(CONFIG_TLS_DEVICE) 4826 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb, 4827 struct net_device *dev) 4828 { 4829 if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev))) 4830 return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev); 4831 return bond_tx_drop(dev, skb); 4832 } 4833 #endif 4834 4835 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4836 { 4837 struct bonding *bond = netdev_priv(dev); 4838 4839 if (bond_should_override_tx_queue(bond) && 4840 !bond_slave_override(bond, skb)) 4841 return NETDEV_TX_OK; 4842 4843 #if IS_ENABLED(CONFIG_TLS_DEVICE) 4844 if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk)) 4845 return bond_tls_device_xmit(bond, skb, dev); 4846 #endif 4847 4848 switch (BOND_MODE(bond)) { 4849 case BOND_MODE_ROUNDROBIN: 4850 return bond_xmit_roundrobin(skb, dev); 4851 case BOND_MODE_ACTIVEBACKUP: 4852 return bond_xmit_activebackup(skb, dev); 4853 case BOND_MODE_8023AD: 4854 case BOND_MODE_XOR: 4855 return bond_3ad_xor_xmit(skb, dev); 4856 case BOND_MODE_BROADCAST: 4857 return bond_xmit_broadcast(skb, dev); 4858 case BOND_MODE_ALB: 4859 return bond_alb_xmit(skb, dev); 4860 case BOND_MODE_TLB: 4861 return bond_tlb_xmit(skb, dev); 4862 default: 4863 /* Should never happen, mode already checked */ 4864 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); 4865 WARN_ON_ONCE(1); 4866 return bond_tx_drop(dev, skb); 4867 } 4868 } 4869 4870 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4871 { 4872 struct bonding *bond = netdev_priv(dev); 4873 netdev_tx_t ret = NETDEV_TX_OK; 4874 4875 /* If we risk deadlock from transmitting this in the 4876 * netpoll path, tell netpoll to queue the frame for later tx 4877 */ 4878 if (unlikely(is_netpoll_tx_blocked(dev))) 4879 return NETDEV_TX_BUSY; 4880 4881 rcu_read_lock(); 4882 if (bond_has_slaves(bond)) 4883 ret = __bond_start_xmit(skb, dev); 4884 else 4885 ret = bond_tx_drop(dev, skb); 4886 rcu_read_unlock(); 4887 4888 return ret; 4889 } 4890 4891 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) 4892 { 4893 if (speed == 0 || speed == SPEED_UNKNOWN) 4894 speed = slave->speed; 4895 else 4896 speed = min(speed, slave->speed); 4897 4898 return speed; 4899 } 4900 4901 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, 4902 struct ethtool_link_ksettings *cmd) 4903 { 4904 struct bonding *bond = netdev_priv(bond_dev); 4905 struct list_head *iter; 4906 struct slave *slave; 4907 u32 speed = 0; 4908 4909 cmd->base.duplex = DUPLEX_UNKNOWN; 4910 cmd->base.port = PORT_OTHER; 4911 4912 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we 4913 * do not need to check mode. Though link speed might not represent 4914 * the true receive or transmit bandwidth (not all modes are symmetric) 4915 * this is an accurate maximum. 4916 */ 4917 bond_for_each_slave(bond, slave, iter) { 4918 if (bond_slave_can_tx(slave)) { 4919 if (slave->speed != SPEED_UNKNOWN) { 4920 if (BOND_MODE(bond) == BOND_MODE_BROADCAST) 4921 speed = bond_mode_bcast_speed(slave, 4922 speed); 4923 else 4924 speed += slave->speed; 4925 } 4926 if (cmd->base.duplex == DUPLEX_UNKNOWN && 4927 slave->duplex != DUPLEX_UNKNOWN) 4928 cmd->base.duplex = slave->duplex; 4929 } 4930 } 4931 cmd->base.speed = speed ? : SPEED_UNKNOWN; 4932 4933 return 0; 4934 } 4935 4936 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, 4937 struct ethtool_drvinfo *drvinfo) 4938 { 4939 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 4940 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d", 4941 BOND_ABI_VERSION); 4942 } 4943 4944 static const struct ethtool_ops bond_ethtool_ops = { 4945 .get_drvinfo = bond_ethtool_get_drvinfo, 4946 .get_link = ethtool_op_get_link, 4947 .get_link_ksettings = bond_ethtool_get_link_ksettings, 4948 }; 4949 4950 static const struct net_device_ops bond_netdev_ops = { 4951 .ndo_init = bond_init, 4952 .ndo_uninit = bond_uninit, 4953 .ndo_open = bond_open, 4954 .ndo_stop = bond_close, 4955 .ndo_start_xmit = bond_start_xmit, 4956 .ndo_select_queue = bond_select_queue, 4957 .ndo_get_stats64 = bond_get_stats, 4958 .ndo_do_ioctl = bond_do_ioctl, 4959 .ndo_change_rx_flags = bond_change_rx_flags, 4960 .ndo_set_rx_mode = bond_set_rx_mode, 4961 .ndo_change_mtu = bond_change_mtu, 4962 .ndo_set_mac_address = bond_set_mac_address, 4963 .ndo_neigh_setup = bond_neigh_setup, 4964 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4965 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4966 #ifdef CONFIG_NET_POLL_CONTROLLER 4967 .ndo_netpoll_setup = bond_netpoll_setup, 4968 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4969 .ndo_poll_controller = bond_poll_controller, 4970 #endif 4971 .ndo_add_slave = bond_enslave, 4972 .ndo_del_slave = bond_release, 4973 .ndo_fix_features = bond_fix_features, 4974 .ndo_features_check = passthru_features_check, 4975 .ndo_get_xmit_slave = bond_xmit_get_slave, 4976 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev, 4977 }; 4978 4979 static const struct device_type bond_type = { 4980 .name = "bond", 4981 }; 4982 4983 static void bond_destructor(struct net_device *bond_dev) 4984 { 4985 struct bonding *bond = netdev_priv(bond_dev); 4986 4987 if (bond->wq) 4988 destroy_workqueue(bond->wq); 4989 4990 if (bond->rr_tx_counter) 4991 free_percpu(bond->rr_tx_counter); 4992 } 4993 4994 void bond_setup(struct net_device *bond_dev) 4995 { 4996 struct bonding *bond = netdev_priv(bond_dev); 4997 4998 spin_lock_init(&bond->mode_lock); 4999 bond->params = bonding_defaults; 5000 5001 /* Initialize pointers */ 5002 bond->dev = bond_dev; 5003 5004 /* Initialize the device entry points */ 5005 ether_setup(bond_dev); 5006 bond_dev->max_mtu = ETH_MAX_MTU; 5007 bond_dev->netdev_ops = &bond_netdev_ops; 5008 bond_dev->ethtool_ops = &bond_ethtool_ops; 5009 5010 bond_dev->needs_free_netdev = true; 5011 bond_dev->priv_destructor = bond_destructor; 5012 5013 SET_NETDEV_DEVTYPE(bond_dev, &bond_type); 5014 5015 /* Initialize the device options */ 5016 bond_dev->flags |= IFF_MASTER; 5017 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; 5018 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 5019 5020 #ifdef CONFIG_XFRM_OFFLOAD 5021 /* set up xfrm device ops (only supported in active-backup right now) */ 5022 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; 5023 INIT_LIST_HEAD(&bond->ipsec_list); 5024 spin_lock_init(&bond->ipsec_lock); 5025 #endif /* CONFIG_XFRM_OFFLOAD */ 5026 5027 /* don't acquire bond device's netif_tx_lock when transmitting */ 5028 bond_dev->features |= NETIF_F_LLTX; 5029 5030 /* By default, we declare the bond to be fully 5031 * VLAN hardware accelerated capable. Special 5032 * care is taken in the various xmit functions 5033 * when there are slaves that are not hw accel 5034 * capable 5035 */ 5036 5037 /* Don't allow bond devices to change network namespaces. */ 5038 bond_dev->features |= NETIF_F_NETNS_LOCAL; 5039 5040 bond_dev->hw_features = BOND_VLAN_FEATURES | 5041 NETIF_F_HW_VLAN_CTAG_RX | 5042 NETIF_F_HW_VLAN_CTAG_FILTER; 5043 5044 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; 5045 bond_dev->features |= bond_dev->hw_features; 5046 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 5047 #ifdef CONFIG_XFRM_OFFLOAD 5048 bond_dev->hw_features |= BOND_XFRM_FEATURES; 5049 /* Only enable XFRM features if this is an active-backup config */ 5050 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 5051 bond_dev->features |= BOND_XFRM_FEATURES; 5052 #endif /* CONFIG_XFRM_OFFLOAD */ 5053 #if IS_ENABLED(CONFIG_TLS_DEVICE) 5054 if (bond_sk_check(bond)) 5055 bond_dev->features |= BOND_TLS_FEATURES; 5056 #endif 5057 } 5058 5059 /* Destroy a bonding device. 5060 * Must be under rtnl_lock when this function is called. 5061 */ 5062 static void bond_uninit(struct net_device *bond_dev) 5063 { 5064 struct bonding *bond = netdev_priv(bond_dev); 5065 struct bond_up_slave *usable, *all; 5066 struct list_head *iter; 5067 struct slave *slave; 5068 5069 bond_netpoll_cleanup(bond_dev); 5070 5071 /* Release the bonded slaves */ 5072 bond_for_each_slave(bond, slave, iter) 5073 __bond_release_one(bond_dev, slave->dev, true, true); 5074 netdev_info(bond_dev, "Released all slaves\n"); 5075 5076 usable = rtnl_dereference(bond->usable_slaves); 5077 if (usable) { 5078 RCU_INIT_POINTER(bond->usable_slaves, NULL); 5079 kfree_rcu(usable, rcu); 5080 } 5081 5082 all = rtnl_dereference(bond->all_slaves); 5083 if (all) { 5084 RCU_INIT_POINTER(bond->all_slaves, NULL); 5085 kfree_rcu(all, rcu); 5086 } 5087 5088 list_del(&bond->bond_list); 5089 5090 bond_debug_unregister(bond); 5091 } 5092 5093 /*------------------------- Module initialization ---------------------------*/ 5094 5095 static int bond_check_params(struct bond_params *params) 5096 { 5097 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; 5098 struct bond_opt_value newval; 5099 const struct bond_opt_value *valptr; 5100 int arp_all_targets_value = 0; 5101 u16 ad_actor_sys_prio = 0; 5102 u16 ad_user_port_key = 0; 5103 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 }; 5104 int arp_ip_count; 5105 int bond_mode = BOND_MODE_ROUNDROBIN; 5106 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 5107 int lacp_fast = 0; 5108 int tlb_dynamic_lb; 5109 5110 /* Convert string parameters. */ 5111 if (mode) { 5112 bond_opt_initstr(&newval, mode); 5113 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval); 5114 if (!valptr) { 5115 pr_err("Error: Invalid bonding mode \"%s\"\n", mode); 5116 return -EINVAL; 5117 } 5118 bond_mode = valptr->value; 5119 } 5120 5121 if (xmit_hash_policy) { 5122 if (bond_mode == BOND_MODE_ROUNDROBIN || 5123 bond_mode == BOND_MODE_ACTIVEBACKUP || 5124 bond_mode == BOND_MODE_BROADCAST) { 5125 pr_info("xmit_hash_policy param is irrelevant in mode %s\n", 5126 bond_mode_name(bond_mode)); 5127 } else { 5128 bond_opt_initstr(&newval, xmit_hash_policy); 5129 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH), 5130 &newval); 5131 if (!valptr) { 5132 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n", 5133 xmit_hash_policy); 5134 return -EINVAL; 5135 } 5136 xmit_hashtype = valptr->value; 5137 } 5138 } 5139 5140 if (lacp_rate) { 5141 if (bond_mode != BOND_MODE_8023AD) { 5142 pr_info("lacp_rate param is irrelevant in mode %s\n", 5143 bond_mode_name(bond_mode)); 5144 } else { 5145 bond_opt_initstr(&newval, lacp_rate); 5146 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE), 5147 &newval); 5148 if (!valptr) { 5149 pr_err("Error: Invalid lacp rate \"%s\"\n", 5150 lacp_rate); 5151 return -EINVAL; 5152 } 5153 lacp_fast = valptr->value; 5154 } 5155 } 5156 5157 if (ad_select) { 5158 bond_opt_initstr(&newval, ad_select); 5159 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), 5160 &newval); 5161 if (!valptr) { 5162 pr_err("Error: Invalid ad_select \"%s\"\n", ad_select); 5163 return -EINVAL; 5164 } 5165 params->ad_select = valptr->value; 5166 if (bond_mode != BOND_MODE_8023AD) 5167 pr_warn("ad_select param only affects 802.3ad mode\n"); 5168 } else { 5169 params->ad_select = BOND_AD_STABLE; 5170 } 5171 5172 if (max_bonds < 0) { 5173 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n", 5174 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS); 5175 max_bonds = BOND_DEFAULT_MAX_BONDS; 5176 } 5177 5178 if (miimon < 0) { 5179 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5180 miimon, INT_MAX); 5181 miimon = 0; 5182 } 5183 5184 if (updelay < 0) { 5185 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5186 updelay, INT_MAX); 5187 updelay = 0; 5188 } 5189 5190 if (downdelay < 0) { 5191 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5192 downdelay, INT_MAX); 5193 downdelay = 0; 5194 } 5195 5196 if ((use_carrier != 0) && (use_carrier != 1)) { 5197 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n", 5198 use_carrier); 5199 use_carrier = 1; 5200 } 5201 5202 if (num_peer_notif < 0 || num_peer_notif > 255) { 5203 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", 5204 num_peer_notif); 5205 num_peer_notif = 1; 5206 } 5207 5208 /* reset values for 802.3ad/TLB/ALB */ 5209 if (!bond_mode_uses_arp(bond_mode)) { 5210 if (!miimon) { 5211 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 5212 pr_warn("Forcing miimon to 100msec\n"); 5213 miimon = BOND_DEFAULT_MIIMON; 5214 } 5215 } 5216 5217 if (tx_queues < 1 || tx_queues > 255) { 5218 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n", 5219 tx_queues, BOND_DEFAULT_TX_QUEUES); 5220 tx_queues = BOND_DEFAULT_TX_QUEUES; 5221 } 5222 5223 if ((all_slaves_active != 0) && (all_slaves_active != 1)) { 5224 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n", 5225 all_slaves_active); 5226 all_slaves_active = 0; 5227 } 5228 5229 if (resend_igmp < 0 || resend_igmp > 255) { 5230 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n", 5231 resend_igmp, BOND_DEFAULT_RESEND_IGMP); 5232 resend_igmp = BOND_DEFAULT_RESEND_IGMP; 5233 } 5234 5235 bond_opt_initval(&newval, packets_per_slave); 5236 if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) { 5237 pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n", 5238 packets_per_slave, USHRT_MAX); 5239 packets_per_slave = 1; 5240 } 5241 5242 if (bond_mode == BOND_MODE_ALB) { 5243 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n", 5244 updelay); 5245 } 5246 5247 if (!miimon) { 5248 if (updelay || downdelay) { 5249 /* just warn the user the up/down delay will have 5250 * no effect since miimon is zero... 5251 */ 5252 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n", 5253 updelay, downdelay); 5254 } 5255 } else { 5256 /* don't allow arp monitoring */ 5257 if (arp_interval) { 5258 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n", 5259 miimon, arp_interval); 5260 arp_interval = 0; 5261 } 5262 5263 if ((updelay % miimon) != 0) { 5264 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n", 5265 updelay, miimon, (updelay / miimon) * miimon); 5266 } 5267 5268 updelay /= miimon; 5269 5270 if ((downdelay % miimon) != 0) { 5271 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n", 5272 downdelay, miimon, 5273 (downdelay / miimon) * miimon); 5274 } 5275 5276 downdelay /= miimon; 5277 } 5278 5279 if (arp_interval < 0) { 5280 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5281 arp_interval, INT_MAX); 5282 arp_interval = 0; 5283 } 5284 5285 for (arp_ip_count = 0, i = 0; 5286 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { 5287 __be32 ip; 5288 5289 /* not a complete check, but good enough to catch mistakes */ 5290 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || 5291 !bond_is_ip_target_ok(ip)) { 5292 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 5293 arp_ip_target[i]); 5294 arp_interval = 0; 5295 } else { 5296 if (bond_get_targets_ip(arp_target, ip) == -1) 5297 arp_target[arp_ip_count++] = ip; 5298 else 5299 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n", 5300 &ip); 5301 } 5302 } 5303 5304 if (arp_interval && !arp_ip_count) { 5305 /* don't allow arping if no arp_ip_target given... */ 5306 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n", 5307 arp_interval); 5308 arp_interval = 0; 5309 } 5310 5311 if (arp_validate) { 5312 if (!arp_interval) { 5313 pr_err("arp_validate requires arp_interval\n"); 5314 return -EINVAL; 5315 } 5316 5317 bond_opt_initstr(&newval, arp_validate); 5318 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE), 5319 &newval); 5320 if (!valptr) { 5321 pr_err("Error: invalid arp_validate \"%s\"\n", 5322 arp_validate); 5323 return -EINVAL; 5324 } 5325 arp_validate_value = valptr->value; 5326 } else { 5327 arp_validate_value = 0; 5328 } 5329 5330 if (arp_all_targets) { 5331 bond_opt_initstr(&newval, arp_all_targets); 5332 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), 5333 &newval); 5334 if (!valptr) { 5335 pr_err("Error: invalid arp_all_targets_value \"%s\"\n", 5336 arp_all_targets); 5337 arp_all_targets_value = 0; 5338 } else { 5339 arp_all_targets_value = valptr->value; 5340 } 5341 } 5342 5343 if (miimon) { 5344 pr_info("MII link monitoring set to %d ms\n", miimon); 5345 } else if (arp_interval) { 5346 valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE, 5347 arp_validate_value); 5348 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", 5349 arp_interval, valptr->string, arp_ip_count); 5350 5351 for (i = 0; i < arp_ip_count; i++) 5352 pr_cont(" %s", arp_ip_target[i]); 5353 5354 pr_cont("\n"); 5355 5356 } else if (max_bonds) { 5357 /* miimon and arp_interval not set, we need one so things 5358 * work as expected, see bonding.txt for details 5359 */ 5360 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n"); 5361 } 5362 5363 if (primary && !bond_mode_uses_primary(bond_mode)) { 5364 /* currently, using a primary only makes sense 5365 * in active backup, TLB or ALB modes 5366 */ 5367 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n", 5368 primary, bond_mode_name(bond_mode)); 5369 primary = NULL; 5370 } 5371 5372 if (primary && primary_reselect) { 5373 bond_opt_initstr(&newval, primary_reselect); 5374 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT), 5375 &newval); 5376 if (!valptr) { 5377 pr_err("Error: Invalid primary_reselect \"%s\"\n", 5378 primary_reselect); 5379 return -EINVAL; 5380 } 5381 primary_reselect_value = valptr->value; 5382 } else { 5383 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS; 5384 } 5385 5386 if (fail_over_mac) { 5387 bond_opt_initstr(&newval, fail_over_mac); 5388 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC), 5389 &newval); 5390 if (!valptr) { 5391 pr_err("Error: invalid fail_over_mac \"%s\"\n", 5392 fail_over_mac); 5393 return -EINVAL; 5394 } 5395 fail_over_mac_value = valptr->value; 5396 if (bond_mode != BOND_MODE_ACTIVEBACKUP) 5397 pr_warn("Warning: fail_over_mac only affects active-backup mode\n"); 5398 } else { 5399 fail_over_mac_value = BOND_FOM_NONE; 5400 } 5401 5402 bond_opt_initstr(&newval, "default"); 5403 valptr = bond_opt_parse( 5404 bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO), 5405 &newval); 5406 if (!valptr) { 5407 pr_err("Error: No ad_actor_sys_prio default value"); 5408 return -EINVAL; 5409 } 5410 ad_actor_sys_prio = valptr->value; 5411 5412 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY), 5413 &newval); 5414 if (!valptr) { 5415 pr_err("Error: No ad_user_port_key default value"); 5416 return -EINVAL; 5417 } 5418 ad_user_port_key = valptr->value; 5419 5420 bond_opt_initstr(&newval, "default"); 5421 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); 5422 if (!valptr) { 5423 pr_err("Error: No tlb_dynamic_lb default value"); 5424 return -EINVAL; 5425 } 5426 tlb_dynamic_lb = valptr->value; 5427 5428 if (lp_interval == 0) { 5429 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", 5430 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL); 5431 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 5432 } 5433 5434 /* fill params struct with the proper values */ 5435 params->mode = bond_mode; 5436 params->xmit_policy = xmit_hashtype; 5437 params->miimon = miimon; 5438 params->num_peer_notif = num_peer_notif; 5439 params->arp_interval = arp_interval; 5440 params->arp_validate = arp_validate_value; 5441 params->arp_all_targets = arp_all_targets_value; 5442 params->updelay = updelay; 5443 params->downdelay = downdelay; 5444 params->peer_notif_delay = 0; 5445 params->use_carrier = use_carrier; 5446 params->lacp_fast = lacp_fast; 5447 params->primary[0] = 0; 5448 params->primary_reselect = primary_reselect_value; 5449 params->fail_over_mac = fail_over_mac_value; 5450 params->tx_queues = tx_queues; 5451 params->all_slaves_active = all_slaves_active; 5452 params->resend_igmp = resend_igmp; 5453 params->min_links = min_links; 5454 params->lp_interval = lp_interval; 5455 params->packets_per_slave = packets_per_slave; 5456 params->tlb_dynamic_lb = tlb_dynamic_lb; 5457 params->ad_actor_sys_prio = ad_actor_sys_prio; 5458 eth_zero_addr(params->ad_actor_system); 5459 params->ad_user_port_key = ad_user_port_key; 5460 if (packets_per_slave > 0) { 5461 params->reciprocal_packets_per_slave = 5462 reciprocal_value(packets_per_slave); 5463 } else { 5464 /* reciprocal_packets_per_slave is unused if 5465 * packets_per_slave is 0 or 1, just initialize it 5466 */ 5467 params->reciprocal_packets_per_slave = 5468 (struct reciprocal_value) { 0 }; 5469 } 5470 5471 if (primary) 5472 strscpy_pad(params->primary, primary, sizeof(params->primary)); 5473 5474 memcpy(params->arp_targets, arp_target, sizeof(arp_target)); 5475 5476 return 0; 5477 } 5478 5479 /* Called from registration process */ 5480 static int bond_init(struct net_device *bond_dev) 5481 { 5482 struct bonding *bond = netdev_priv(bond_dev); 5483 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); 5484 5485 netdev_dbg(bond_dev, "Begin bond_init\n"); 5486 5487 bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM); 5488 if (!bond->wq) 5489 return -ENOMEM; 5490 5491 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) { 5492 bond->rr_tx_counter = alloc_percpu(u32); 5493 if (!bond->rr_tx_counter) { 5494 destroy_workqueue(bond->wq); 5495 bond->wq = NULL; 5496 return -ENOMEM; 5497 } 5498 } 5499 5500 spin_lock_init(&bond->stats_lock); 5501 netdev_lockdep_set_classes(bond_dev); 5502 5503 list_add_tail(&bond->bond_list, &bn->dev_list); 5504 5505 bond_prepare_sysfs_group(bond); 5506 5507 bond_debug_register(bond); 5508 5509 /* Ensure valid dev_addr */ 5510 if (is_zero_ether_addr(bond_dev->dev_addr) && 5511 bond_dev->addr_assign_type == NET_ADDR_PERM) 5512 eth_hw_addr_random(bond_dev); 5513 5514 return 0; 5515 } 5516 5517 unsigned int bond_get_num_tx_queues(void) 5518 { 5519 return tx_queues; 5520 } 5521 5522 /* Create a new bond based on the specified name and bonding parameters. 5523 * If name is NULL, obtain a suitable "bond%d" name for us. 5524 * Caller must NOT hold rtnl_lock; we need to release it here before we 5525 * set up our sysfs entries. 5526 */ 5527 int bond_create(struct net *net, const char *name) 5528 { 5529 struct net_device *bond_dev; 5530 struct bonding *bond; 5531 struct alb_bond_info *bond_info; 5532 int res; 5533 5534 rtnl_lock(); 5535 5536 bond_dev = alloc_netdev_mq(sizeof(struct bonding), 5537 name ? name : "bond%d", NET_NAME_UNKNOWN, 5538 bond_setup, tx_queues); 5539 if (!bond_dev) { 5540 pr_err("%s: eek! can't alloc netdev!\n", name); 5541 rtnl_unlock(); 5542 return -ENOMEM; 5543 } 5544 5545 /* 5546 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX. 5547 * It is set to 0 by default which is wrong. 5548 */ 5549 bond = netdev_priv(bond_dev); 5550 bond_info = &(BOND_ALB_INFO(bond)); 5551 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; 5552 5553 dev_net_set(bond_dev, net); 5554 bond_dev->rtnl_link_ops = &bond_link_ops; 5555 5556 res = register_netdevice(bond_dev); 5557 if (res < 0) { 5558 free_netdev(bond_dev); 5559 rtnl_unlock(); 5560 5561 return res; 5562 } 5563 5564 netif_carrier_off(bond_dev); 5565 5566 bond_work_init_all(bond); 5567 5568 rtnl_unlock(); 5569 return 0; 5570 } 5571 5572 static int __net_init bond_net_init(struct net *net) 5573 { 5574 struct bond_net *bn = net_generic(net, bond_net_id); 5575 5576 bn->net = net; 5577 INIT_LIST_HEAD(&bn->dev_list); 5578 5579 bond_create_proc_dir(bn); 5580 bond_create_sysfs(bn); 5581 5582 return 0; 5583 } 5584 5585 static void __net_exit bond_net_exit(struct net *net) 5586 { 5587 struct bond_net *bn = net_generic(net, bond_net_id); 5588 struct bonding *bond, *tmp_bond; 5589 LIST_HEAD(list); 5590 5591 bond_destroy_sysfs(bn); 5592 5593 /* Kill off any bonds created after unregistering bond rtnl ops */ 5594 rtnl_lock(); 5595 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) 5596 unregister_netdevice_queue(bond->dev, &list); 5597 unregister_netdevice_many(&list); 5598 rtnl_unlock(); 5599 5600 bond_destroy_proc_dir(bn); 5601 } 5602 5603 static struct pernet_operations bond_net_ops = { 5604 .init = bond_net_init, 5605 .exit = bond_net_exit, 5606 .id = &bond_net_id, 5607 .size = sizeof(struct bond_net), 5608 }; 5609 5610 static int __init bonding_init(void) 5611 { 5612 int i; 5613 int res; 5614 5615 res = bond_check_params(&bonding_defaults); 5616 if (res) 5617 goto out; 5618 5619 res = register_pernet_subsys(&bond_net_ops); 5620 if (res) 5621 goto out; 5622 5623 res = bond_netlink_init(); 5624 if (res) 5625 goto err_link; 5626 5627 bond_create_debugfs(); 5628 5629 for (i = 0; i < max_bonds; i++) { 5630 res = bond_create(&init_net, NULL); 5631 if (res) 5632 goto err; 5633 } 5634 5635 skb_flow_dissector_init(&flow_keys_bonding, 5636 flow_keys_bonding_keys, 5637 ARRAY_SIZE(flow_keys_bonding_keys)); 5638 5639 register_netdevice_notifier(&bond_netdev_notifier); 5640 out: 5641 return res; 5642 err: 5643 bond_destroy_debugfs(); 5644 bond_netlink_fini(); 5645 err_link: 5646 unregister_pernet_subsys(&bond_net_ops); 5647 goto out; 5648 5649 } 5650 5651 static void __exit bonding_exit(void) 5652 { 5653 unregister_netdevice_notifier(&bond_netdev_notifier); 5654 5655 bond_destroy_debugfs(); 5656 5657 bond_netlink_fini(); 5658 unregister_pernet_subsys(&bond_net_ops); 5659 5660 #ifdef CONFIG_NET_POLL_CONTROLLER 5661 /* Make sure we don't have an imbalance on our netpoll blocking */ 5662 WARN_ON(atomic_read(&netpoll_block_tx)); 5663 #endif 5664 } 5665 5666 module_init(bonding_init); 5667 module_exit(bonding_exit); 5668 MODULE_LICENSE("GPL"); 5669 MODULE_DESCRIPTION(DRV_DESCRIPTION); 5670 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); 5671