1 /* 2 * originally based on the dummy device. 3 * 4 * Copyright 1999, Thomas Davis, tadavis@lbl.gov. 5 * Licensed under the GPL. Based on dummy.c, and eql.c devices. 6 * 7 * bonding.c: an Ethernet Bonding driver 8 * 9 * This is useful to talk to a Cisco EtherChannel compatible equipment: 10 * Cisco 5500 11 * Sun Trunking (Solaris) 12 * Alteon AceDirector Trunks 13 * Linux Bonding 14 * and probably many L2 switches ... 15 * 16 * How it works: 17 * ifconfig bond0 ipaddress netmask up 18 * will setup a network device, with an ip address. No mac address 19 * will be assigned at this time. The hw mac address will come from 20 * the first slave bonded to the channel. All slaves will then use 21 * this hw mac address. 22 * 23 * ifconfig bond0 down 24 * will release all slaves, marking them as down. 25 * 26 * ifenslave bond0 eth0 27 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either 28 * a: be used as initial mac address 29 * b: if a hw mac address already is there, eth0's hw mac address 30 * will then be set from bond0. 31 * 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/types.h> 37 #include <linux/fcntl.h> 38 #include <linux/filter.h> 39 #include <linux/interrupt.h> 40 #include <linux/ptrace.h> 41 #include <linux/ioport.h> 42 #include <linux/in.h> 43 #include <net/ip.h> 44 #include <linux/ip.h> 45 #include <linux/icmp.h> 46 #include <linux/icmpv6.h> 47 #include <linux/tcp.h> 48 #include <linux/udp.h> 49 #include <linux/slab.h> 50 #include <linux/string.h> 51 #include <linux/init.h> 52 #include <linux/timer.h> 53 #include <linux/socket.h> 54 #include <linux/ctype.h> 55 #include <linux/inet.h> 56 #include <linux/bitops.h> 57 #include <linux/io.h> 58 #include <asm/dma.h> 59 #include <linux/uaccess.h> 60 #include <linux/errno.h> 61 #include <linux/netdevice.h> 62 #include <linux/inetdevice.h> 63 #include <linux/igmp.h> 64 #include <linux/etherdevice.h> 65 #include <linux/skbuff.h> 66 #include <net/sock.h> 67 #include <linux/rtnetlink.h> 68 #include <linux/smp.h> 69 #include <linux/if_ether.h> 70 #include <net/arp.h> 71 #include <linux/mii.h> 72 #include <linux/ethtool.h> 73 #include <linux/if_vlan.h> 74 #include <linux/if_bonding.h> 75 #include <linux/phy.h> 76 #include <linux/jiffies.h> 77 #include <linux/preempt.h> 78 #include <net/route.h> 79 #include <net/net_namespace.h> 80 #include <net/netns/generic.h> 81 #include <net/pkt_sched.h> 82 #include <linux/rculist.h> 83 #include <net/flow_dissector.h> 84 #include <net/xfrm.h> 85 #include <net/bonding.h> 86 #include <net/bond_3ad.h> 87 #include <net/bond_alb.h> 88 #if IS_ENABLED(CONFIG_TLS_DEVICE) 89 #include <net/tls.h> 90 #endif 91 #include <net/ip6_route.h> 92 93 #include "bonding_priv.h" 94 95 /*---------------------------- Module parameters ----------------------------*/ 96 97 /* monitor all links that often (in milliseconds). <=0 disables monitoring */ 98 99 static int max_bonds = BOND_DEFAULT_MAX_BONDS; 100 static int tx_queues = BOND_DEFAULT_TX_QUEUES; 101 static int num_peer_notif = 1; 102 static int miimon; 103 static int updelay; 104 static int downdelay; 105 static int use_carrier = 1; 106 static char *mode; 107 static char *primary; 108 static char *primary_reselect; 109 static char *lacp_rate; 110 static int min_links; 111 static char *ad_select; 112 static char *xmit_hash_policy; 113 static int arp_interval; 114 static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; 115 static char *arp_validate; 116 static char *arp_all_targets; 117 static char *fail_over_mac; 118 static int all_slaves_active; 119 static struct bond_params bonding_defaults; 120 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; 121 static int packets_per_slave = 1; 122 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 123 124 module_param(max_bonds, int, 0); 125 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 126 module_param(tx_queues, int, 0); 127 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); 128 module_param_named(num_grat_arp, num_peer_notif, int, 0644); 129 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " 130 "failover event (alias of num_unsol_na)"); 131 module_param_named(num_unsol_na, num_peer_notif, int, 0644); 132 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " 133 "failover event (alias of num_grat_arp)"); 134 module_param(miimon, int, 0); 135 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 136 module_param(updelay, int, 0); 137 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); 138 module_param(downdelay, int, 0); 139 MODULE_PARM_DESC(downdelay, "Delay before considering link down, " 140 "in milliseconds"); 141 module_param(use_carrier, int, 0); 142 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " 143 "0 for off, 1 for on (default)"); 144 module_param(mode, charp, 0); 145 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " 146 "1 for active-backup, 2 for balance-xor, " 147 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " 148 "6 for balance-alb"); 149 module_param(primary, charp, 0); 150 MODULE_PARM_DESC(primary, "Primary network device to use"); 151 module_param(primary_reselect, charp, 0); 152 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " 153 "once it comes up; " 154 "0 for always (default), " 155 "1 for only if speed of primary is " 156 "better, " 157 "2 for only on active slave " 158 "failure"); 159 module_param(lacp_rate, charp, 0); 160 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " 161 "0 for slow, 1 for fast"); 162 module_param(ad_select, charp, 0); 163 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " 164 "0 for stable (default), 1 for bandwidth, " 165 "2 for count"); 166 module_param(min_links, int, 0); 167 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); 168 169 module_param(xmit_hash_policy, charp, 0); 170 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; " 171 "0 for layer 2 (default), 1 for layer 3+4, " 172 "2 for layer 2+3, 3 for encap layer 2+3, " 173 "4 for encap layer 3+4, 5 for vlan+srcmac"); 174 module_param(arp_interval, int, 0); 175 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 176 module_param_array(arp_ip_target, charp, NULL, 0); 177 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); 178 module_param(arp_validate, charp, 0); 179 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " 180 "0 for none (default), 1 for active, " 181 "2 for backup, 3 for all"); 182 module_param(arp_all_targets, charp, 0); 183 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all"); 184 module_param(fail_over_mac, charp, 0); 185 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " 186 "the same MAC; 0 for none (default), " 187 "1 for active, 2 for follow"); 188 module_param(all_slaves_active, int, 0); 189 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " 190 "by setting active flag for all slaves; " 191 "0 for never (default), 1 for always."); 192 module_param(resend_igmp, int, 0); 193 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " 194 "link failure"); 195 module_param(packets_per_slave, int, 0); 196 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr " 197 "mode; 0 for a random slave, 1 packet per " 198 "slave (default), >1 packets per slave."); 199 module_param(lp_interval, uint, 0); 200 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where " 201 "the bonding driver sends learning packets to " 202 "each slaves peer switch. The default is 1."); 203 204 /*----------------------------- Global variables ----------------------------*/ 205 206 #ifdef CONFIG_NET_POLL_CONTROLLER 207 atomic_t netpoll_block_tx = ATOMIC_INIT(0); 208 #endif 209 210 unsigned int bond_net_id __read_mostly; 211 212 static const struct flow_dissector_key flow_keys_bonding_keys[] = { 213 { 214 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 215 .offset = offsetof(struct flow_keys, control), 216 }, 217 { 218 .key_id = FLOW_DISSECTOR_KEY_BASIC, 219 .offset = offsetof(struct flow_keys, basic), 220 }, 221 { 222 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 223 .offset = offsetof(struct flow_keys, addrs.v4addrs), 224 }, 225 { 226 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 227 .offset = offsetof(struct flow_keys, addrs.v6addrs), 228 }, 229 { 230 .key_id = FLOW_DISSECTOR_KEY_TIPC, 231 .offset = offsetof(struct flow_keys, addrs.tipckey), 232 }, 233 { 234 .key_id = FLOW_DISSECTOR_KEY_PORTS, 235 .offset = offsetof(struct flow_keys, ports), 236 }, 237 { 238 .key_id = FLOW_DISSECTOR_KEY_ICMP, 239 .offset = offsetof(struct flow_keys, icmp), 240 }, 241 { 242 .key_id = FLOW_DISSECTOR_KEY_VLAN, 243 .offset = offsetof(struct flow_keys, vlan), 244 }, 245 { 246 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, 247 .offset = offsetof(struct flow_keys, tags), 248 }, 249 { 250 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 251 .offset = offsetof(struct flow_keys, keyid), 252 }, 253 }; 254 255 static struct flow_dissector flow_keys_bonding __read_mostly; 256 257 /*-------------------------- Forward declarations ---------------------------*/ 258 259 static int bond_init(struct net_device *bond_dev); 260 static void bond_uninit(struct net_device *bond_dev); 261 static void bond_get_stats(struct net_device *bond_dev, 262 struct rtnl_link_stats64 *stats); 263 static void bond_slave_arr_handler(struct work_struct *work); 264 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 265 int mod); 266 static void bond_netdev_notify_work(struct work_struct *work); 267 268 /*---------------------------- General routines -----------------------------*/ 269 270 const char *bond_mode_name(int mode) 271 { 272 static const char *names[] = { 273 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", 274 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)", 275 [BOND_MODE_XOR] = "load balancing (xor)", 276 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)", 277 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation", 278 [BOND_MODE_TLB] = "transmit load balancing", 279 [BOND_MODE_ALB] = "adaptive load balancing", 280 }; 281 282 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB) 283 return "unknown"; 284 285 return names[mode]; 286 } 287 288 /** 289 * bond_dev_queue_xmit - Prepare skb for xmit. 290 * 291 * @bond: bond device that got this skb for tx. 292 * @skb: hw accel VLAN tagged skb to transmit 293 * @slave_dev: slave that is supposed to xmit this skbuff 294 */ 295 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, 296 struct net_device *slave_dev) 297 { 298 skb->dev = slave_dev; 299 300 BUILD_BUG_ON(sizeof(skb->queue_mapping) != 301 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); 302 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); 303 304 if (unlikely(netpoll_tx_running(bond->dev))) 305 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 306 307 return dev_queue_xmit(skb); 308 } 309 310 static bool bond_sk_check(struct bonding *bond) 311 { 312 switch (BOND_MODE(bond)) { 313 case BOND_MODE_8023AD: 314 case BOND_MODE_XOR: 315 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) 316 return true; 317 fallthrough; 318 default: 319 return false; 320 } 321 } 322 323 static bool bond_xdp_check(struct bonding *bond) 324 { 325 switch (BOND_MODE(bond)) { 326 case BOND_MODE_ROUNDROBIN: 327 case BOND_MODE_ACTIVEBACKUP: 328 return true; 329 case BOND_MODE_8023AD: 330 case BOND_MODE_XOR: 331 /* vlan+srcmac is not supported with XDP as in most cases the 802.1q 332 * payload is not in the packet due to hardware offload. 333 */ 334 if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC) 335 return true; 336 fallthrough; 337 default: 338 return false; 339 } 340 } 341 342 /*---------------------------------- VLAN -----------------------------------*/ 343 344 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, 345 * We don't protect the slave list iteration with a lock because: 346 * a. This operation is performed in IOCTL context, 347 * b. The operation is protected by the RTNL semaphore in the 8021q code, 348 * c. Holding a lock with BH disabled while directly calling a base driver 349 * entry point is generally a BAD idea. 350 * 351 * The design of synchronization/protection for this operation in the 8021q 352 * module is good for one or more VLAN devices over a single physical device 353 * and cannot be extended for a teaming solution like bonding, so there is a 354 * potential race condition here where a net device from the vlan group might 355 * be referenced (either by a base driver or the 8021q code) while it is being 356 * removed from the system. However, it turns out we're not making matters 357 * worse, and if it works for regular VLAN usage it will work here too. 358 */ 359 360 /** 361 * bond_vlan_rx_add_vid - Propagates adding an id to slaves 362 * @bond_dev: bonding net device that got called 363 * @proto: network protocol ID 364 * @vid: vlan id being added 365 */ 366 static int bond_vlan_rx_add_vid(struct net_device *bond_dev, 367 __be16 proto, u16 vid) 368 { 369 struct bonding *bond = netdev_priv(bond_dev); 370 struct slave *slave, *rollback_slave; 371 struct list_head *iter; 372 int res; 373 374 bond_for_each_slave(bond, slave, iter) { 375 res = vlan_vid_add(slave->dev, proto, vid); 376 if (res) 377 goto unwind; 378 } 379 380 return 0; 381 382 unwind: 383 /* unwind to the slave that failed */ 384 bond_for_each_slave(bond, rollback_slave, iter) { 385 if (rollback_slave == slave) 386 break; 387 388 vlan_vid_del(rollback_slave->dev, proto, vid); 389 } 390 391 return res; 392 } 393 394 /** 395 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves 396 * @bond_dev: bonding net device that got called 397 * @proto: network protocol ID 398 * @vid: vlan id being removed 399 */ 400 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, 401 __be16 proto, u16 vid) 402 { 403 struct bonding *bond = netdev_priv(bond_dev); 404 struct list_head *iter; 405 struct slave *slave; 406 407 bond_for_each_slave(bond, slave, iter) 408 vlan_vid_del(slave->dev, proto, vid); 409 410 if (bond_is_lb(bond)) 411 bond_alb_clear_vlan(bond, vid); 412 413 return 0; 414 } 415 416 /*---------------------------------- XFRM -----------------------------------*/ 417 418 #ifdef CONFIG_XFRM_OFFLOAD 419 /** 420 * bond_ipsec_add_sa - program device with a security association 421 * @xs: pointer to transformer state struct 422 **/ 423 static int bond_ipsec_add_sa(struct xfrm_state *xs) 424 { 425 struct net_device *bond_dev = xs->xso.dev; 426 struct bond_ipsec *ipsec; 427 struct bonding *bond; 428 struct slave *slave; 429 int err; 430 431 if (!bond_dev) 432 return -EINVAL; 433 434 rcu_read_lock(); 435 bond = netdev_priv(bond_dev); 436 slave = rcu_dereference(bond->curr_active_slave); 437 if (!slave) { 438 rcu_read_unlock(); 439 return -ENODEV; 440 } 441 442 if (!slave->dev->xfrmdev_ops || 443 !slave->dev->xfrmdev_ops->xdo_dev_state_add || 444 netif_is_bond_master(slave->dev)) { 445 slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); 446 rcu_read_unlock(); 447 return -EINVAL; 448 } 449 450 ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); 451 if (!ipsec) { 452 rcu_read_unlock(); 453 return -ENOMEM; 454 } 455 xs->xso.real_dev = slave->dev; 456 457 err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); 458 if (!err) { 459 ipsec->xs = xs; 460 INIT_LIST_HEAD(&ipsec->list); 461 spin_lock_bh(&bond->ipsec_lock); 462 list_add(&ipsec->list, &bond->ipsec_list); 463 spin_unlock_bh(&bond->ipsec_lock); 464 } else { 465 kfree(ipsec); 466 } 467 rcu_read_unlock(); 468 return err; 469 } 470 471 static void bond_ipsec_add_sa_all(struct bonding *bond) 472 { 473 struct net_device *bond_dev = bond->dev; 474 struct bond_ipsec *ipsec; 475 struct slave *slave; 476 477 rcu_read_lock(); 478 slave = rcu_dereference(bond->curr_active_slave); 479 if (!slave) 480 goto out; 481 482 if (!slave->dev->xfrmdev_ops || 483 !slave->dev->xfrmdev_ops->xdo_dev_state_add || 484 netif_is_bond_master(slave->dev)) { 485 spin_lock_bh(&bond->ipsec_lock); 486 if (!list_empty(&bond->ipsec_list)) 487 slave_warn(bond_dev, slave->dev, 488 "%s: no slave xdo_dev_state_add\n", 489 __func__); 490 spin_unlock_bh(&bond->ipsec_lock); 491 goto out; 492 } 493 494 spin_lock_bh(&bond->ipsec_lock); 495 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 496 ipsec->xs->xso.real_dev = slave->dev; 497 if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) { 498 slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); 499 ipsec->xs->xso.real_dev = NULL; 500 } 501 } 502 spin_unlock_bh(&bond->ipsec_lock); 503 out: 504 rcu_read_unlock(); 505 } 506 507 /** 508 * bond_ipsec_del_sa - clear out this specific SA 509 * @xs: pointer to transformer state struct 510 **/ 511 static void bond_ipsec_del_sa(struct xfrm_state *xs) 512 { 513 struct net_device *bond_dev = xs->xso.dev; 514 struct bond_ipsec *ipsec; 515 struct bonding *bond; 516 struct slave *slave; 517 518 if (!bond_dev) 519 return; 520 521 rcu_read_lock(); 522 bond = netdev_priv(bond_dev); 523 slave = rcu_dereference(bond->curr_active_slave); 524 525 if (!slave) 526 goto out; 527 528 if (!xs->xso.real_dev) 529 goto out; 530 531 WARN_ON(xs->xso.real_dev != slave->dev); 532 533 if (!slave->dev->xfrmdev_ops || 534 !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 535 netif_is_bond_master(slave->dev)) { 536 slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); 537 goto out; 538 } 539 540 slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); 541 out: 542 spin_lock_bh(&bond->ipsec_lock); 543 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 544 if (ipsec->xs == xs) { 545 list_del(&ipsec->list); 546 kfree(ipsec); 547 break; 548 } 549 } 550 spin_unlock_bh(&bond->ipsec_lock); 551 rcu_read_unlock(); 552 } 553 554 static void bond_ipsec_del_sa_all(struct bonding *bond) 555 { 556 struct net_device *bond_dev = bond->dev; 557 struct bond_ipsec *ipsec; 558 struct slave *slave; 559 560 rcu_read_lock(); 561 slave = rcu_dereference(bond->curr_active_slave); 562 if (!slave) { 563 rcu_read_unlock(); 564 return; 565 } 566 567 spin_lock_bh(&bond->ipsec_lock); 568 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 569 if (!ipsec->xs->xso.real_dev) 570 continue; 571 572 if (!slave->dev->xfrmdev_ops || 573 !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 574 netif_is_bond_master(slave->dev)) { 575 slave_warn(bond_dev, slave->dev, 576 "%s: no slave xdo_dev_state_delete\n", 577 __func__); 578 } else { 579 slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); 580 } 581 ipsec->xs->xso.real_dev = NULL; 582 } 583 spin_unlock_bh(&bond->ipsec_lock); 584 rcu_read_unlock(); 585 } 586 587 /** 588 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload 589 * @skb: current data packet 590 * @xs: pointer to transformer state struct 591 **/ 592 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 593 { 594 struct net_device *bond_dev = xs->xso.dev; 595 struct net_device *real_dev; 596 struct slave *curr_active; 597 struct bonding *bond; 598 int err; 599 600 bond = netdev_priv(bond_dev); 601 rcu_read_lock(); 602 curr_active = rcu_dereference(bond->curr_active_slave); 603 real_dev = curr_active->dev; 604 605 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 606 err = false; 607 goto out; 608 } 609 610 if (!xs->xso.real_dev) { 611 err = false; 612 goto out; 613 } 614 615 if (!real_dev->xfrmdev_ops || 616 !real_dev->xfrmdev_ops->xdo_dev_offload_ok || 617 netif_is_bond_master(real_dev)) { 618 err = false; 619 goto out; 620 } 621 622 err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); 623 out: 624 rcu_read_unlock(); 625 return err; 626 } 627 628 static const struct xfrmdev_ops bond_xfrmdev_ops = { 629 .xdo_dev_state_add = bond_ipsec_add_sa, 630 .xdo_dev_state_delete = bond_ipsec_del_sa, 631 .xdo_dev_offload_ok = bond_ipsec_offload_ok, 632 }; 633 #endif /* CONFIG_XFRM_OFFLOAD */ 634 635 /*------------------------------- Link status -------------------------------*/ 636 637 /* Set the carrier state for the master according to the state of its 638 * slaves. If any slaves are up, the master is up. In 802.3ad mode, 639 * do special 802.3ad magic. 640 * 641 * Returns zero if carrier state does not change, nonzero if it does. 642 */ 643 int bond_set_carrier(struct bonding *bond) 644 { 645 struct list_head *iter; 646 struct slave *slave; 647 648 if (!bond_has_slaves(bond)) 649 goto down; 650 651 if (BOND_MODE(bond) == BOND_MODE_8023AD) 652 return bond_3ad_set_carrier(bond); 653 654 bond_for_each_slave(bond, slave, iter) { 655 if (slave->link == BOND_LINK_UP) { 656 if (!netif_carrier_ok(bond->dev)) { 657 netif_carrier_on(bond->dev); 658 return 1; 659 } 660 return 0; 661 } 662 } 663 664 down: 665 if (netif_carrier_ok(bond->dev)) { 666 netif_carrier_off(bond->dev); 667 return 1; 668 } 669 return 0; 670 } 671 672 /* Get link speed and duplex from the slave's base driver 673 * using ethtool. If for some reason the call fails or the 674 * values are invalid, set speed and duplex to -1, 675 * and return. Return 1 if speed or duplex settings are 676 * UNKNOWN; 0 otherwise. 677 */ 678 static int bond_update_speed_duplex(struct slave *slave) 679 { 680 struct net_device *slave_dev = slave->dev; 681 struct ethtool_link_ksettings ecmd; 682 int res; 683 684 slave->speed = SPEED_UNKNOWN; 685 slave->duplex = DUPLEX_UNKNOWN; 686 687 res = __ethtool_get_link_ksettings(slave_dev, &ecmd); 688 if (res < 0) 689 return 1; 690 if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) 691 return 1; 692 switch (ecmd.base.duplex) { 693 case DUPLEX_FULL: 694 case DUPLEX_HALF: 695 break; 696 default: 697 return 1; 698 } 699 700 slave->speed = ecmd.base.speed; 701 slave->duplex = ecmd.base.duplex; 702 703 return 0; 704 } 705 706 const char *bond_slave_link_status(s8 link) 707 { 708 switch (link) { 709 case BOND_LINK_UP: 710 return "up"; 711 case BOND_LINK_FAIL: 712 return "going down"; 713 case BOND_LINK_DOWN: 714 return "down"; 715 case BOND_LINK_BACK: 716 return "going back"; 717 default: 718 return "unknown"; 719 } 720 } 721 722 /* if <dev> supports MII link status reporting, check its link status. 723 * 724 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), 725 * depending upon the setting of the use_carrier parameter. 726 * 727 * Return either BMSR_LSTATUS, meaning that the link is up (or we 728 * can't tell and just pretend it is), or 0, meaning that the link is 729 * down. 730 * 731 * If reporting is non-zero, instead of faking link up, return -1 if 732 * both ETHTOOL and MII ioctls fail (meaning the device does not 733 * support them). If use_carrier is set, return whatever it says. 734 * It'd be nice if there was a good way to tell if a driver supports 735 * netif_carrier, but there really isn't. 736 */ 737 static int bond_check_dev_link(struct bonding *bond, 738 struct net_device *slave_dev, int reporting) 739 { 740 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 741 int (*ioctl)(struct net_device *, struct ifreq *, int); 742 struct ifreq ifr; 743 struct mii_ioctl_data *mii; 744 745 if (!reporting && !netif_running(slave_dev)) 746 return 0; 747 748 if (bond->params.use_carrier) 749 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; 750 751 /* Try to get link status using Ethtool first. */ 752 if (slave_dev->ethtool_ops->get_link) 753 return slave_dev->ethtool_ops->get_link(slave_dev) ? 754 BMSR_LSTATUS : 0; 755 756 /* Ethtool can't be used, fallback to MII ioctls. */ 757 ioctl = slave_ops->ndo_eth_ioctl; 758 if (ioctl) { 759 /* TODO: set pointer to correct ioctl on a per team member 760 * bases to make this more efficient. that is, once 761 * we determine the correct ioctl, we will always 762 * call it and not the others for that team 763 * member. 764 */ 765 766 /* We cannot assume that SIOCGMIIPHY will also read a 767 * register; not all network drivers (e.g., e100) 768 * support that. 769 */ 770 771 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ 772 strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ); 773 mii = if_mii(&ifr); 774 if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { 775 mii->reg_num = MII_BMSR; 776 if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0) 777 return mii->val_out & BMSR_LSTATUS; 778 } 779 } 780 781 /* If reporting, report that either there's no ndo_eth_ioctl, 782 * or both SIOCGMIIREG and get_link failed (meaning that we 783 * cannot report link status). If not reporting, pretend 784 * we're ok. 785 */ 786 return reporting ? -1 : BMSR_LSTATUS; 787 } 788 789 /*----------------------------- Multicast list ------------------------------*/ 790 791 /* Push the promiscuity flag down to appropriate slaves */ 792 static int bond_set_promiscuity(struct bonding *bond, int inc) 793 { 794 struct list_head *iter; 795 int err = 0; 796 797 if (bond_uses_primary(bond)) { 798 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); 799 800 if (curr_active) 801 err = dev_set_promiscuity(curr_active->dev, inc); 802 } else { 803 struct slave *slave; 804 805 bond_for_each_slave(bond, slave, iter) { 806 err = dev_set_promiscuity(slave->dev, inc); 807 if (err) 808 return err; 809 } 810 } 811 return err; 812 } 813 814 /* Push the allmulti flag down to all slaves */ 815 static int bond_set_allmulti(struct bonding *bond, int inc) 816 { 817 struct list_head *iter; 818 int err = 0; 819 820 if (bond_uses_primary(bond)) { 821 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); 822 823 if (curr_active) 824 err = dev_set_allmulti(curr_active->dev, inc); 825 } else { 826 struct slave *slave; 827 828 bond_for_each_slave(bond, slave, iter) { 829 err = dev_set_allmulti(slave->dev, inc); 830 if (err) 831 return err; 832 } 833 } 834 return err; 835 } 836 837 /* Retrieve the list of registered multicast addresses for the bonding 838 * device and retransmit an IGMP JOIN request to the current active 839 * slave. 840 */ 841 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) 842 { 843 struct bonding *bond = container_of(work, struct bonding, 844 mcast_work.work); 845 846 if (!rtnl_trylock()) { 847 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 848 return; 849 } 850 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev); 851 852 if (bond->igmp_retrans > 1) { 853 bond->igmp_retrans--; 854 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 855 } 856 rtnl_unlock(); 857 } 858 859 /* Flush bond's hardware addresses from slave */ 860 static void bond_hw_addr_flush(struct net_device *bond_dev, 861 struct net_device *slave_dev) 862 { 863 struct bonding *bond = netdev_priv(bond_dev); 864 865 dev_uc_unsync(slave_dev, bond_dev); 866 dev_mc_unsync(slave_dev, bond_dev); 867 868 if (BOND_MODE(bond) == BOND_MODE_8023AD) 869 dev_mc_del(slave_dev, lacpdu_mcast_addr); 870 } 871 872 /*--------------------------- Active slave change ---------------------------*/ 873 874 /* Update the hardware address list and promisc/allmulti for the new and 875 * old active slaves (if any). Modes that are not using primary keep all 876 * slaves up date at all times; only the modes that use primary need to call 877 * this function to swap these settings during a failover. 878 */ 879 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, 880 struct slave *old_active) 881 { 882 if (old_active) { 883 if (bond->dev->flags & IFF_PROMISC) 884 dev_set_promiscuity(old_active->dev, -1); 885 886 if (bond->dev->flags & IFF_ALLMULTI) 887 dev_set_allmulti(old_active->dev, -1); 888 889 if (bond->dev->flags & IFF_UP) 890 bond_hw_addr_flush(bond->dev, old_active->dev); 891 } 892 893 if (new_active) { 894 /* FIXME: Signal errors upstream. */ 895 if (bond->dev->flags & IFF_PROMISC) 896 dev_set_promiscuity(new_active->dev, 1); 897 898 if (bond->dev->flags & IFF_ALLMULTI) 899 dev_set_allmulti(new_active->dev, 1); 900 901 if (bond->dev->flags & IFF_UP) { 902 netif_addr_lock_bh(bond->dev); 903 dev_uc_sync(new_active->dev, bond->dev); 904 dev_mc_sync(new_active->dev, bond->dev); 905 netif_addr_unlock_bh(bond->dev); 906 } 907 } 908 } 909 910 /** 911 * bond_set_dev_addr - clone slave's address to bond 912 * @bond_dev: bond net device 913 * @slave_dev: slave net device 914 * 915 * Should be called with RTNL held. 916 */ 917 static int bond_set_dev_addr(struct net_device *bond_dev, 918 struct net_device *slave_dev) 919 { 920 int err; 921 922 slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n", 923 bond_dev, slave_dev, slave_dev->addr_len); 924 err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL); 925 if (err) 926 return err; 927 928 __dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len); 929 bond_dev->addr_assign_type = NET_ADDR_STOLEN; 930 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 931 return 0; 932 } 933 934 static struct slave *bond_get_old_active(struct bonding *bond, 935 struct slave *new_active) 936 { 937 struct slave *slave; 938 struct list_head *iter; 939 940 bond_for_each_slave(bond, slave, iter) { 941 if (slave == new_active) 942 continue; 943 944 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) 945 return slave; 946 } 947 948 return NULL; 949 } 950 951 /* bond_do_fail_over_mac 952 * 953 * Perform special MAC address swapping for fail_over_mac settings 954 * 955 * Called with RTNL 956 */ 957 static void bond_do_fail_over_mac(struct bonding *bond, 958 struct slave *new_active, 959 struct slave *old_active) 960 { 961 u8 tmp_mac[MAX_ADDR_LEN]; 962 struct sockaddr_storage ss; 963 int rv; 964 965 switch (bond->params.fail_over_mac) { 966 case BOND_FOM_ACTIVE: 967 if (new_active) { 968 rv = bond_set_dev_addr(bond->dev, new_active->dev); 969 if (rv) 970 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n", 971 -rv); 972 } 973 break; 974 case BOND_FOM_FOLLOW: 975 /* if new_active && old_active, swap them 976 * if just old_active, do nothing (going to no active slave) 977 * if just new_active, set new_active to bond's MAC 978 */ 979 if (!new_active) 980 return; 981 982 if (!old_active) 983 old_active = bond_get_old_active(bond, new_active); 984 985 if (old_active) { 986 bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr, 987 new_active->dev->addr_len); 988 bond_hw_addr_copy(ss.__data, 989 old_active->dev->dev_addr, 990 old_active->dev->addr_len); 991 ss.ss_family = new_active->dev->type; 992 } else { 993 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, 994 bond->dev->addr_len); 995 ss.ss_family = bond->dev->type; 996 } 997 998 rv = dev_set_mac_address(new_active->dev, 999 (struct sockaddr *)&ss, NULL); 1000 if (rv) { 1001 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n", 1002 -rv); 1003 goto out; 1004 } 1005 1006 if (!old_active) 1007 goto out; 1008 1009 bond_hw_addr_copy(ss.__data, tmp_mac, 1010 new_active->dev->addr_len); 1011 ss.ss_family = old_active->dev->type; 1012 1013 rv = dev_set_mac_address(old_active->dev, 1014 (struct sockaddr *)&ss, NULL); 1015 if (rv) 1016 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n", 1017 -rv); 1018 out: 1019 break; 1020 default: 1021 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", 1022 bond->params.fail_over_mac); 1023 break; 1024 } 1025 1026 } 1027 1028 /** 1029 * bond_choose_primary_or_current - select the primary or high priority slave 1030 * @bond: our bonding struct 1031 * 1032 * - Check if there is a primary link. If the primary link was set and is up, 1033 * go on and do link reselection. 1034 * 1035 * - If primary link is not set or down, find the highest priority link. 1036 * If the highest priority link is not current slave, set it as primary 1037 * link and do link reselection. 1038 */ 1039 static struct slave *bond_choose_primary_or_current(struct bonding *bond) 1040 { 1041 struct slave *prim = rtnl_dereference(bond->primary_slave); 1042 struct slave *curr = rtnl_dereference(bond->curr_active_slave); 1043 struct slave *slave, *hprio = NULL; 1044 struct list_head *iter; 1045 1046 if (!prim || prim->link != BOND_LINK_UP) { 1047 bond_for_each_slave(bond, slave, iter) { 1048 if (slave->link == BOND_LINK_UP) { 1049 hprio = hprio ?: slave; 1050 if (slave->prio > hprio->prio) 1051 hprio = slave; 1052 } 1053 } 1054 1055 if (hprio && hprio != curr) { 1056 prim = hprio; 1057 goto link_reselect; 1058 } 1059 1060 if (!curr || curr->link != BOND_LINK_UP) 1061 return NULL; 1062 return curr; 1063 } 1064 1065 if (bond->force_primary) { 1066 bond->force_primary = false; 1067 return prim; 1068 } 1069 1070 link_reselect: 1071 if (!curr || curr->link != BOND_LINK_UP) 1072 return prim; 1073 1074 /* At this point, prim and curr are both up */ 1075 switch (bond->params.primary_reselect) { 1076 case BOND_PRI_RESELECT_ALWAYS: 1077 return prim; 1078 case BOND_PRI_RESELECT_BETTER: 1079 if (prim->speed < curr->speed) 1080 return curr; 1081 if (prim->speed == curr->speed && prim->duplex <= curr->duplex) 1082 return curr; 1083 return prim; 1084 case BOND_PRI_RESELECT_FAILURE: 1085 return curr; 1086 default: 1087 netdev_err(bond->dev, "impossible primary_reselect %d\n", 1088 bond->params.primary_reselect); 1089 return curr; 1090 } 1091 } 1092 1093 /** 1094 * bond_find_best_slave - select the best available slave to be the active one 1095 * @bond: our bonding struct 1096 */ 1097 static struct slave *bond_find_best_slave(struct bonding *bond) 1098 { 1099 struct slave *slave, *bestslave = NULL; 1100 struct list_head *iter; 1101 int mintime = bond->params.updelay; 1102 1103 slave = bond_choose_primary_or_current(bond); 1104 if (slave) 1105 return slave; 1106 1107 bond_for_each_slave(bond, slave, iter) { 1108 if (slave->link == BOND_LINK_UP) 1109 return slave; 1110 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) && 1111 slave->delay < mintime) { 1112 mintime = slave->delay; 1113 bestslave = slave; 1114 } 1115 } 1116 1117 return bestslave; 1118 } 1119 1120 static bool bond_should_notify_peers(struct bonding *bond) 1121 { 1122 struct slave *slave; 1123 1124 rcu_read_lock(); 1125 slave = rcu_dereference(bond->curr_active_slave); 1126 rcu_read_unlock(); 1127 1128 if (!slave || !bond->send_peer_notif || 1129 bond->send_peer_notif % 1130 max(1, bond->params.peer_notif_delay) != 0 || 1131 !netif_carrier_ok(bond->dev) || 1132 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 1133 return false; 1134 1135 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", 1136 slave ? slave->dev->name : "NULL"); 1137 1138 return true; 1139 } 1140 1141 /** 1142 * bond_change_active_slave - change the active slave into the specified one 1143 * @bond: our bonding struct 1144 * @new_active: the new slave to make the active one 1145 * 1146 * Set the new slave to the bond's settings and unset them on the old 1147 * curr_active_slave. 1148 * Setting include flags, mc-list, promiscuity, allmulti, etc. 1149 * 1150 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP, 1151 * because it is apparently the best available slave we have, even though its 1152 * updelay hasn't timed out yet. 1153 * 1154 * Caller must hold RTNL. 1155 */ 1156 void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 1157 { 1158 struct slave *old_active; 1159 1160 ASSERT_RTNL(); 1161 1162 old_active = rtnl_dereference(bond->curr_active_slave); 1163 1164 if (old_active == new_active) 1165 return; 1166 1167 #ifdef CONFIG_XFRM_OFFLOAD 1168 bond_ipsec_del_sa_all(bond); 1169 #endif /* CONFIG_XFRM_OFFLOAD */ 1170 1171 if (new_active) { 1172 new_active->last_link_up = jiffies; 1173 1174 if (new_active->link == BOND_LINK_BACK) { 1175 if (bond_uses_primary(bond)) { 1176 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n", 1177 (bond->params.updelay - new_active->delay) * bond->params.miimon); 1178 } 1179 1180 new_active->delay = 0; 1181 bond_set_slave_link_state(new_active, BOND_LINK_UP, 1182 BOND_SLAVE_NOTIFY_NOW); 1183 1184 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1185 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 1186 1187 if (bond_is_lb(bond)) 1188 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 1189 } else { 1190 if (bond_uses_primary(bond)) 1191 slave_info(bond->dev, new_active->dev, "making interface the new active one\n"); 1192 } 1193 } 1194 1195 if (bond_uses_primary(bond)) 1196 bond_hw_addr_swap(bond, new_active, old_active); 1197 1198 if (bond_is_lb(bond)) { 1199 bond_alb_handle_active_change(bond, new_active); 1200 if (old_active) 1201 bond_set_slave_inactive_flags(old_active, 1202 BOND_SLAVE_NOTIFY_NOW); 1203 if (new_active) 1204 bond_set_slave_active_flags(new_active, 1205 BOND_SLAVE_NOTIFY_NOW); 1206 } else { 1207 rcu_assign_pointer(bond->curr_active_slave, new_active); 1208 } 1209 1210 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { 1211 if (old_active) 1212 bond_set_slave_inactive_flags(old_active, 1213 BOND_SLAVE_NOTIFY_NOW); 1214 1215 if (new_active) { 1216 bool should_notify_peers = false; 1217 1218 bond_set_slave_active_flags(new_active, 1219 BOND_SLAVE_NOTIFY_NOW); 1220 1221 if (bond->params.fail_over_mac) 1222 bond_do_fail_over_mac(bond, new_active, 1223 old_active); 1224 1225 if (netif_running(bond->dev)) { 1226 bond->send_peer_notif = 1227 bond->params.num_peer_notif * 1228 max(1, bond->params.peer_notif_delay); 1229 should_notify_peers = 1230 bond_should_notify_peers(bond); 1231 } 1232 1233 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); 1234 if (should_notify_peers) { 1235 bond->send_peer_notif--; 1236 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 1237 bond->dev); 1238 } 1239 } 1240 } 1241 1242 #ifdef CONFIG_XFRM_OFFLOAD 1243 bond_ipsec_add_sa_all(bond); 1244 #endif /* CONFIG_XFRM_OFFLOAD */ 1245 1246 /* resend IGMP joins since active slave has changed or 1247 * all were sent on curr_active_slave. 1248 * resend only if bond is brought up with the affected 1249 * bonding modes and the retransmission is enabled 1250 */ 1251 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && 1252 ((bond_uses_primary(bond) && new_active) || 1253 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { 1254 bond->igmp_retrans = bond->params.resend_igmp; 1255 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 1256 } 1257 } 1258 1259 /** 1260 * bond_select_active_slave - select a new active slave, if needed 1261 * @bond: our bonding struct 1262 * 1263 * This functions should be called when one of the following occurs: 1264 * - The old curr_active_slave has been released or lost its link. 1265 * - The primary_slave has got its link back. 1266 * - A slave has got its link back and there's no old curr_active_slave. 1267 * 1268 * Caller must hold RTNL. 1269 */ 1270 void bond_select_active_slave(struct bonding *bond) 1271 { 1272 struct slave *best_slave; 1273 int rv; 1274 1275 ASSERT_RTNL(); 1276 1277 best_slave = bond_find_best_slave(bond); 1278 if (best_slave != rtnl_dereference(bond->curr_active_slave)) { 1279 bond_change_active_slave(bond, best_slave); 1280 rv = bond_set_carrier(bond); 1281 if (!rv) 1282 return; 1283 1284 if (netif_carrier_ok(bond->dev)) 1285 netdev_info(bond->dev, "active interface up!\n"); 1286 else 1287 netdev_info(bond->dev, "now running without any active interface!\n"); 1288 } 1289 } 1290 1291 #ifdef CONFIG_NET_POLL_CONTROLLER 1292 static inline int slave_enable_netpoll(struct slave *slave) 1293 { 1294 struct netpoll *np; 1295 int err = 0; 1296 1297 np = kzalloc(sizeof(*np), GFP_KERNEL); 1298 err = -ENOMEM; 1299 if (!np) 1300 goto out; 1301 1302 err = __netpoll_setup(np, slave->dev); 1303 if (err) { 1304 kfree(np); 1305 goto out; 1306 } 1307 slave->np = np; 1308 out: 1309 return err; 1310 } 1311 static inline void slave_disable_netpoll(struct slave *slave) 1312 { 1313 struct netpoll *np = slave->np; 1314 1315 if (!np) 1316 return; 1317 1318 slave->np = NULL; 1319 1320 __netpoll_free(np); 1321 } 1322 1323 static void bond_poll_controller(struct net_device *bond_dev) 1324 { 1325 struct bonding *bond = netdev_priv(bond_dev); 1326 struct slave *slave = NULL; 1327 struct list_head *iter; 1328 struct ad_info ad_info; 1329 1330 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1331 if (bond_3ad_get_active_agg_info(bond, &ad_info)) 1332 return; 1333 1334 bond_for_each_slave_rcu(bond, slave, iter) { 1335 if (!bond_slave_is_up(slave)) 1336 continue; 1337 1338 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 1339 struct aggregator *agg = 1340 SLAVE_AD_INFO(slave)->port.aggregator; 1341 1342 if (agg && 1343 agg->aggregator_identifier != ad_info.aggregator_id) 1344 continue; 1345 } 1346 1347 netpoll_poll_dev(slave->dev); 1348 } 1349 } 1350 1351 static void bond_netpoll_cleanup(struct net_device *bond_dev) 1352 { 1353 struct bonding *bond = netdev_priv(bond_dev); 1354 struct list_head *iter; 1355 struct slave *slave; 1356 1357 bond_for_each_slave(bond, slave, iter) 1358 if (bond_slave_is_up(slave)) 1359 slave_disable_netpoll(slave); 1360 } 1361 1362 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 1363 { 1364 struct bonding *bond = netdev_priv(dev); 1365 struct list_head *iter; 1366 struct slave *slave; 1367 int err = 0; 1368 1369 bond_for_each_slave(bond, slave, iter) { 1370 err = slave_enable_netpoll(slave); 1371 if (err) { 1372 bond_netpoll_cleanup(dev); 1373 break; 1374 } 1375 } 1376 return err; 1377 } 1378 #else 1379 static inline int slave_enable_netpoll(struct slave *slave) 1380 { 1381 return 0; 1382 } 1383 static inline void slave_disable_netpoll(struct slave *slave) 1384 { 1385 } 1386 static void bond_netpoll_cleanup(struct net_device *bond_dev) 1387 { 1388 } 1389 #endif 1390 1391 /*---------------------------------- IOCTL ----------------------------------*/ 1392 1393 static netdev_features_t bond_fix_features(struct net_device *dev, 1394 netdev_features_t features) 1395 { 1396 struct bonding *bond = netdev_priv(dev); 1397 struct list_head *iter; 1398 netdev_features_t mask; 1399 struct slave *slave; 1400 1401 mask = features; 1402 1403 features &= ~NETIF_F_ONE_FOR_ALL; 1404 features |= NETIF_F_ALL_FOR_ALL; 1405 1406 bond_for_each_slave(bond, slave, iter) { 1407 features = netdev_increment_features(features, 1408 slave->dev->features, 1409 mask); 1410 } 1411 features = netdev_add_tso_features(features, mask); 1412 1413 return features; 1414 } 1415 1416 #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1417 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ 1418 NETIF_F_HIGHDMA | NETIF_F_LRO) 1419 1420 #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1421 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) 1422 1423 #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1424 NETIF_F_GSO_SOFTWARE) 1425 1426 1427 static void bond_compute_features(struct bonding *bond) 1428 { 1429 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | 1430 IFF_XMIT_DST_RELEASE_PERM; 1431 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1432 netdev_features_t enc_features = BOND_ENC_FEATURES; 1433 #ifdef CONFIG_XFRM_OFFLOAD 1434 netdev_features_t xfrm_features = BOND_XFRM_FEATURES; 1435 #endif /* CONFIG_XFRM_OFFLOAD */ 1436 netdev_features_t mpls_features = BOND_MPLS_FEATURES; 1437 struct net_device *bond_dev = bond->dev; 1438 struct list_head *iter; 1439 struct slave *slave; 1440 unsigned short max_hard_header_len = ETH_HLEN; 1441 unsigned int tso_max_size = TSO_MAX_SIZE; 1442 u16 tso_max_segs = TSO_MAX_SEGS; 1443 1444 if (!bond_has_slaves(bond)) 1445 goto done; 1446 vlan_features &= NETIF_F_ALL_FOR_ALL; 1447 mpls_features &= NETIF_F_ALL_FOR_ALL; 1448 1449 bond_for_each_slave(bond, slave, iter) { 1450 vlan_features = netdev_increment_features(vlan_features, 1451 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1452 1453 enc_features = netdev_increment_features(enc_features, 1454 slave->dev->hw_enc_features, 1455 BOND_ENC_FEATURES); 1456 1457 #ifdef CONFIG_XFRM_OFFLOAD 1458 xfrm_features = netdev_increment_features(xfrm_features, 1459 slave->dev->hw_enc_features, 1460 BOND_XFRM_FEATURES); 1461 #endif /* CONFIG_XFRM_OFFLOAD */ 1462 1463 mpls_features = netdev_increment_features(mpls_features, 1464 slave->dev->mpls_features, 1465 BOND_MPLS_FEATURES); 1466 1467 dst_release_flag &= slave->dev->priv_flags; 1468 if (slave->dev->hard_header_len > max_hard_header_len) 1469 max_hard_header_len = slave->dev->hard_header_len; 1470 1471 tso_max_size = min(tso_max_size, slave->dev->tso_max_size); 1472 tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs); 1473 } 1474 bond_dev->hard_header_len = max_hard_header_len; 1475 1476 done: 1477 bond_dev->vlan_features = vlan_features; 1478 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1479 NETIF_F_HW_VLAN_CTAG_TX | 1480 NETIF_F_HW_VLAN_STAG_TX; 1481 #ifdef CONFIG_XFRM_OFFLOAD 1482 bond_dev->hw_enc_features |= xfrm_features; 1483 #endif /* CONFIG_XFRM_OFFLOAD */ 1484 bond_dev->mpls_features = mpls_features; 1485 netif_set_tso_max_segs(bond_dev, tso_max_segs); 1486 netif_set_tso_max_size(bond_dev, tso_max_size); 1487 1488 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1489 if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && 1490 dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) 1491 bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1492 1493 netdev_change_features(bond_dev); 1494 } 1495 1496 static void bond_setup_by_slave(struct net_device *bond_dev, 1497 struct net_device *slave_dev) 1498 { 1499 bond_dev->header_ops = slave_dev->header_ops; 1500 1501 bond_dev->type = slave_dev->type; 1502 bond_dev->hard_header_len = slave_dev->hard_header_len; 1503 bond_dev->needed_headroom = slave_dev->needed_headroom; 1504 bond_dev->addr_len = slave_dev->addr_len; 1505 1506 memcpy(bond_dev->broadcast, slave_dev->broadcast, 1507 slave_dev->addr_len); 1508 } 1509 1510 /* On bonding slaves other than the currently active slave, suppress 1511 * duplicates except for alb non-mcast/bcast. 1512 */ 1513 static bool bond_should_deliver_exact_match(struct sk_buff *skb, 1514 struct slave *slave, 1515 struct bonding *bond) 1516 { 1517 if (bond_is_slave_inactive(slave)) { 1518 if (BOND_MODE(bond) == BOND_MODE_ALB && 1519 skb->pkt_type != PACKET_BROADCAST && 1520 skb->pkt_type != PACKET_MULTICAST) 1521 return false; 1522 return true; 1523 } 1524 return false; 1525 } 1526 1527 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) 1528 { 1529 struct sk_buff *skb = *pskb; 1530 struct slave *slave; 1531 struct bonding *bond; 1532 int (*recv_probe)(const struct sk_buff *, struct bonding *, 1533 struct slave *); 1534 int ret = RX_HANDLER_ANOTHER; 1535 1536 skb = skb_share_check(skb, GFP_ATOMIC); 1537 if (unlikely(!skb)) 1538 return RX_HANDLER_CONSUMED; 1539 1540 *pskb = skb; 1541 1542 slave = bond_slave_get_rcu(skb->dev); 1543 bond = slave->bond; 1544 1545 recv_probe = READ_ONCE(bond->recv_probe); 1546 if (recv_probe) { 1547 ret = recv_probe(skb, bond, slave); 1548 if (ret == RX_HANDLER_CONSUMED) { 1549 consume_skb(skb); 1550 return ret; 1551 } 1552 } 1553 1554 /* 1555 * For packets determined by bond_should_deliver_exact_match() call to 1556 * be suppressed we want to make an exception for link-local packets. 1557 * This is necessary for e.g. LLDP daemons to be able to monitor 1558 * inactive slave links without being forced to bind to them 1559 * explicitly. 1560 * 1561 * At the same time, packets that are passed to the bonding master 1562 * (including link-local ones) can have their originating interface 1563 * determined via PACKET_ORIGDEV socket option. 1564 */ 1565 if (bond_should_deliver_exact_match(skb, slave, bond)) { 1566 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) 1567 return RX_HANDLER_PASS; 1568 return RX_HANDLER_EXACT; 1569 } 1570 1571 skb->dev = bond->dev; 1572 1573 if (BOND_MODE(bond) == BOND_MODE_ALB && 1574 netif_is_bridge_port(bond->dev) && 1575 skb->pkt_type == PACKET_HOST) { 1576 1577 if (unlikely(skb_cow_head(skb, 1578 skb->data - skb_mac_header(skb)))) { 1579 kfree_skb(skb); 1580 return RX_HANDLER_CONSUMED; 1581 } 1582 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, 1583 bond->dev->addr_len); 1584 } 1585 1586 return ret; 1587 } 1588 1589 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) 1590 { 1591 switch (BOND_MODE(bond)) { 1592 case BOND_MODE_ROUNDROBIN: 1593 return NETDEV_LAG_TX_TYPE_ROUNDROBIN; 1594 case BOND_MODE_ACTIVEBACKUP: 1595 return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; 1596 case BOND_MODE_BROADCAST: 1597 return NETDEV_LAG_TX_TYPE_BROADCAST; 1598 case BOND_MODE_XOR: 1599 case BOND_MODE_8023AD: 1600 return NETDEV_LAG_TX_TYPE_HASH; 1601 default: 1602 return NETDEV_LAG_TX_TYPE_UNKNOWN; 1603 } 1604 } 1605 1606 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, 1607 enum netdev_lag_tx_type type) 1608 { 1609 if (type != NETDEV_LAG_TX_TYPE_HASH) 1610 return NETDEV_LAG_HASH_NONE; 1611 1612 switch (bond->params.xmit_policy) { 1613 case BOND_XMIT_POLICY_LAYER2: 1614 return NETDEV_LAG_HASH_L2; 1615 case BOND_XMIT_POLICY_LAYER34: 1616 return NETDEV_LAG_HASH_L34; 1617 case BOND_XMIT_POLICY_LAYER23: 1618 return NETDEV_LAG_HASH_L23; 1619 case BOND_XMIT_POLICY_ENCAP23: 1620 return NETDEV_LAG_HASH_E23; 1621 case BOND_XMIT_POLICY_ENCAP34: 1622 return NETDEV_LAG_HASH_E34; 1623 case BOND_XMIT_POLICY_VLAN_SRCMAC: 1624 return NETDEV_LAG_HASH_VLAN_SRCMAC; 1625 default: 1626 return NETDEV_LAG_HASH_UNKNOWN; 1627 } 1628 } 1629 1630 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, 1631 struct netlink_ext_ack *extack) 1632 { 1633 struct netdev_lag_upper_info lag_upper_info; 1634 enum netdev_lag_tx_type type; 1635 int err; 1636 1637 type = bond_lag_tx_type(bond); 1638 lag_upper_info.tx_type = type; 1639 lag_upper_info.hash_type = bond_lag_hash_type(bond, type); 1640 1641 err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave, 1642 &lag_upper_info, extack); 1643 if (err) 1644 return err; 1645 1646 slave->dev->flags |= IFF_SLAVE; 1647 return 0; 1648 } 1649 1650 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) 1651 { 1652 netdev_upper_dev_unlink(slave->dev, bond->dev); 1653 slave->dev->flags &= ~IFF_SLAVE; 1654 } 1655 1656 static void slave_kobj_release(struct kobject *kobj) 1657 { 1658 struct slave *slave = to_slave(kobj); 1659 struct bonding *bond = bond_get_bond_by_slave(slave); 1660 1661 cancel_delayed_work_sync(&slave->notify_work); 1662 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1663 kfree(SLAVE_AD_INFO(slave)); 1664 1665 kfree(slave); 1666 } 1667 1668 static struct kobj_type slave_ktype = { 1669 .release = slave_kobj_release, 1670 #ifdef CONFIG_SYSFS 1671 .sysfs_ops = &slave_sysfs_ops, 1672 #endif 1673 }; 1674 1675 static int bond_kobj_init(struct slave *slave) 1676 { 1677 int err; 1678 1679 err = kobject_init_and_add(&slave->kobj, &slave_ktype, 1680 &(slave->dev->dev.kobj), "bonding_slave"); 1681 if (err) 1682 kobject_put(&slave->kobj); 1683 1684 return err; 1685 } 1686 1687 static struct slave *bond_alloc_slave(struct bonding *bond, 1688 struct net_device *slave_dev) 1689 { 1690 struct slave *slave = NULL; 1691 1692 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 1693 if (!slave) 1694 return NULL; 1695 1696 slave->bond = bond; 1697 slave->dev = slave_dev; 1698 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); 1699 1700 if (bond_kobj_init(slave)) 1701 return NULL; 1702 1703 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 1704 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), 1705 GFP_KERNEL); 1706 if (!SLAVE_AD_INFO(slave)) { 1707 kobject_put(&slave->kobj); 1708 return NULL; 1709 } 1710 } 1711 1712 return slave; 1713 } 1714 1715 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) 1716 { 1717 info->bond_mode = BOND_MODE(bond); 1718 info->miimon = bond->params.miimon; 1719 info->num_slaves = bond->slave_cnt; 1720 } 1721 1722 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) 1723 { 1724 strcpy(info->slave_name, slave->dev->name); 1725 info->link = slave->link; 1726 info->state = bond_slave_state(slave); 1727 info->link_failure_count = slave->link_failure_count; 1728 } 1729 1730 static void bond_netdev_notify_work(struct work_struct *_work) 1731 { 1732 struct slave *slave = container_of(_work, struct slave, 1733 notify_work.work); 1734 1735 if (rtnl_trylock()) { 1736 struct netdev_bonding_info binfo; 1737 1738 bond_fill_ifslave(slave, &binfo.slave); 1739 bond_fill_ifbond(slave->bond, &binfo.master); 1740 netdev_bonding_info_change(slave->dev, &binfo); 1741 rtnl_unlock(); 1742 } else { 1743 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); 1744 } 1745 } 1746 1747 void bond_queue_slave_event(struct slave *slave) 1748 { 1749 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); 1750 } 1751 1752 void bond_lower_state_changed(struct slave *slave) 1753 { 1754 struct netdev_lag_lower_state_info info; 1755 1756 info.link_up = slave->link == BOND_LINK_UP || 1757 slave->link == BOND_LINK_FAIL; 1758 info.tx_enabled = bond_is_active_slave(slave); 1759 netdev_lower_state_changed(slave->dev, &info); 1760 } 1761 1762 #define BOND_NL_ERR(bond_dev, extack, errmsg) do { \ 1763 if (extack) \ 1764 NL_SET_ERR_MSG(extack, errmsg); \ 1765 else \ 1766 netdev_err(bond_dev, "Error: %s\n", errmsg); \ 1767 } while (0) 1768 1769 #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \ 1770 if (extack) \ 1771 NL_SET_ERR_MSG(extack, errmsg); \ 1772 else \ 1773 slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \ 1774 } while (0) 1775 1776 /* enslave device <slave> to bond device <master> */ 1777 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, 1778 struct netlink_ext_ack *extack) 1779 { 1780 struct bonding *bond = netdev_priv(bond_dev); 1781 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 1782 struct slave *new_slave = NULL, *prev_slave; 1783 struct sockaddr_storage ss; 1784 int link_reporting; 1785 int res = 0, i; 1786 1787 if (slave_dev->flags & IFF_MASTER && 1788 !netif_is_bond_master(slave_dev)) { 1789 BOND_NL_ERR(bond_dev, extack, 1790 "Device type (master device) cannot be enslaved"); 1791 return -EPERM; 1792 } 1793 1794 if (!bond->params.use_carrier && 1795 slave_dev->ethtool_ops->get_link == NULL && 1796 slave_ops->ndo_eth_ioctl == NULL) { 1797 slave_warn(bond_dev, slave_dev, "no link monitoring support\n"); 1798 } 1799 1800 /* already in-use? */ 1801 if (netdev_is_rx_handler_busy(slave_dev)) { 1802 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1803 "Device is in use and cannot be enslaved"); 1804 return -EBUSY; 1805 } 1806 1807 if (bond_dev == slave_dev) { 1808 BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself."); 1809 return -EPERM; 1810 } 1811 1812 /* vlan challenged mutual exclusion */ 1813 /* no need to lock since we're protected by rtnl_lock */ 1814 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { 1815 slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n"); 1816 if (vlan_uses_dev(bond_dev)) { 1817 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1818 "Can not enslave VLAN challenged device to VLAN enabled bond"); 1819 return -EPERM; 1820 } else { 1821 slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n"); 1822 } 1823 } else { 1824 slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n"); 1825 } 1826 1827 if (slave_dev->features & NETIF_F_HW_ESP) 1828 slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n"); 1829 1830 /* Old ifenslave binaries are no longer supported. These can 1831 * be identified with moderate accuracy by the state of the slave: 1832 * the current ifenslave will set the interface down prior to 1833 * enslaving it; the old ifenslave will not. 1834 */ 1835 if (slave_dev->flags & IFF_UP) { 1836 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1837 "Device can not be enslaved while up"); 1838 return -EPERM; 1839 } 1840 1841 /* set bonding device ether type by slave - bonding netdevices are 1842 * created with ether_setup, so when the slave type is not ARPHRD_ETHER 1843 * there is a need to override some of the type dependent attribs/funcs. 1844 * 1845 * bond ether type mutual exclusion - don't allow slaves of dissimilar 1846 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond 1847 */ 1848 if (!bond_has_slaves(bond)) { 1849 if (bond_dev->type != slave_dev->type) { 1850 slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n", 1851 bond_dev->type, slave_dev->type); 1852 1853 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, 1854 bond_dev); 1855 res = notifier_to_errno(res); 1856 if (res) { 1857 slave_err(bond_dev, slave_dev, "refused to change device type\n"); 1858 return -EBUSY; 1859 } 1860 1861 /* Flush unicast and multicast addresses */ 1862 dev_uc_flush(bond_dev); 1863 dev_mc_flush(bond_dev); 1864 1865 if (slave_dev->type != ARPHRD_ETHER) 1866 bond_setup_by_slave(bond_dev, slave_dev); 1867 else { 1868 ether_setup(bond_dev); 1869 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1870 } 1871 1872 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, 1873 bond_dev); 1874 } 1875 } else if (bond_dev->type != slave_dev->type) { 1876 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1877 "Device type is different from other slaves"); 1878 return -EINVAL; 1879 } 1880 1881 if (slave_dev->type == ARPHRD_INFINIBAND && 1882 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 1883 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1884 "Only active-backup mode is supported for infiniband slaves"); 1885 res = -EOPNOTSUPP; 1886 goto err_undo_flags; 1887 } 1888 1889 if (!slave_ops->ndo_set_mac_address || 1890 slave_dev->type == ARPHRD_INFINIBAND) { 1891 slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n"); 1892 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && 1893 bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1894 if (!bond_has_slaves(bond)) { 1895 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1896 slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n"); 1897 } else { 1898 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1899 "Slave device does not support setting the MAC address, but fail_over_mac is not set to active"); 1900 res = -EOPNOTSUPP; 1901 goto err_undo_flags; 1902 } 1903 } 1904 } 1905 1906 call_netdevice_notifiers(NETDEV_JOIN, slave_dev); 1907 1908 /* If this is the first slave, then we need to set the master's hardware 1909 * address to be the same as the slave's. 1910 */ 1911 if (!bond_has_slaves(bond) && 1912 bond->dev->addr_assign_type == NET_ADDR_RANDOM) { 1913 res = bond_set_dev_addr(bond->dev, slave_dev); 1914 if (res) 1915 goto err_undo_flags; 1916 } 1917 1918 new_slave = bond_alloc_slave(bond, slave_dev); 1919 if (!new_slave) { 1920 res = -ENOMEM; 1921 goto err_undo_flags; 1922 } 1923 1924 /* Set the new_slave's queue_id to be zero. Queue ID mapping 1925 * is set via sysfs or module option if desired. 1926 */ 1927 new_slave->queue_id = 0; 1928 1929 /* Save slave's original mtu and then set it to match the bond */ 1930 new_slave->original_mtu = slave_dev->mtu; 1931 res = dev_set_mtu(slave_dev, bond->dev->mtu); 1932 if (res) { 1933 slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res); 1934 goto err_free; 1935 } 1936 1937 /* Save slave's original ("permanent") mac address for modes 1938 * that need it, and for restoring it upon release, and then 1939 * set it to the master's address 1940 */ 1941 bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr, 1942 slave_dev->addr_len); 1943 1944 if (!bond->params.fail_over_mac || 1945 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 1946 /* Set slave to master's mac address. The application already 1947 * set the master's mac address to that of the first slave 1948 */ 1949 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); 1950 ss.ss_family = slave_dev->type; 1951 res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, 1952 extack); 1953 if (res) { 1954 slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res); 1955 goto err_restore_mtu; 1956 } 1957 } 1958 1959 /* set no_addrconf flag before open to prevent IPv6 addrconf */ 1960 slave_dev->priv_flags |= IFF_NO_ADDRCONF; 1961 1962 /* open the slave since the application closed it */ 1963 res = dev_open(slave_dev, extack); 1964 if (res) { 1965 slave_err(bond_dev, slave_dev, "Opening slave failed\n"); 1966 goto err_restore_mac; 1967 } 1968 1969 slave_dev->priv_flags |= IFF_BONDING; 1970 /* initialize slave stats */ 1971 dev_get_stats(new_slave->dev, &new_slave->slave_stats); 1972 1973 if (bond_is_lb(bond)) { 1974 /* bond_alb_init_slave() must be called before all other stages since 1975 * it might fail and we do not want to have to undo everything 1976 */ 1977 res = bond_alb_init_slave(bond, new_slave); 1978 if (res) 1979 goto err_close; 1980 } 1981 1982 res = vlan_vids_add_by_dev(slave_dev, bond_dev); 1983 if (res) { 1984 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n"); 1985 goto err_close; 1986 } 1987 1988 prev_slave = bond_last_slave(bond); 1989 1990 new_slave->delay = 0; 1991 new_slave->link_failure_count = 0; 1992 1993 if (bond_update_speed_duplex(new_slave) && 1994 bond_needs_speed_duplex(bond)) 1995 new_slave->link = BOND_LINK_DOWN; 1996 1997 new_slave->last_rx = jiffies - 1998 (msecs_to_jiffies(bond->params.arp_interval) + 1); 1999 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) 2000 new_slave->target_last_arp_rx[i] = new_slave->last_rx; 2001 2002 new_slave->last_tx = new_slave->last_rx; 2003 2004 if (bond->params.miimon && !bond->params.use_carrier) { 2005 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 2006 2007 if ((link_reporting == -1) && !bond->params.arp_interval) { 2008 /* miimon is set but a bonded network driver 2009 * does not support ETHTOOL/MII and 2010 * arp_interval is not set. Note: if 2011 * use_carrier is enabled, we will never go 2012 * here (because netif_carrier is always 2013 * supported); thus, we don't need to change 2014 * the messages for netif_carrier. 2015 */ 2016 slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n"); 2017 } else if (link_reporting == -1) { 2018 /* unable get link status using mii/ethtool */ 2019 slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n"); 2020 } 2021 } 2022 2023 /* check for initial state */ 2024 new_slave->link = BOND_LINK_NOCHANGE; 2025 if (bond->params.miimon) { 2026 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 2027 if (bond->params.updelay) { 2028 bond_set_slave_link_state(new_slave, 2029 BOND_LINK_BACK, 2030 BOND_SLAVE_NOTIFY_NOW); 2031 new_slave->delay = bond->params.updelay; 2032 } else { 2033 bond_set_slave_link_state(new_slave, 2034 BOND_LINK_UP, 2035 BOND_SLAVE_NOTIFY_NOW); 2036 } 2037 } else { 2038 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN, 2039 BOND_SLAVE_NOTIFY_NOW); 2040 } 2041 } else if (bond->params.arp_interval) { 2042 bond_set_slave_link_state(new_slave, 2043 (netif_carrier_ok(slave_dev) ? 2044 BOND_LINK_UP : BOND_LINK_DOWN), 2045 BOND_SLAVE_NOTIFY_NOW); 2046 } else { 2047 bond_set_slave_link_state(new_slave, BOND_LINK_UP, 2048 BOND_SLAVE_NOTIFY_NOW); 2049 } 2050 2051 if (new_slave->link != BOND_LINK_DOWN) 2052 new_slave->last_link_up = jiffies; 2053 slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n", 2054 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 2055 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 2056 2057 if (bond_uses_primary(bond) && bond->params.primary[0]) { 2058 /* if there is a primary slave, remember it */ 2059 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { 2060 rcu_assign_pointer(bond->primary_slave, new_slave); 2061 bond->force_primary = true; 2062 } 2063 } 2064 2065 switch (BOND_MODE(bond)) { 2066 case BOND_MODE_ACTIVEBACKUP: 2067 bond_set_slave_inactive_flags(new_slave, 2068 BOND_SLAVE_NOTIFY_NOW); 2069 break; 2070 case BOND_MODE_8023AD: 2071 /* in 802.3ad mode, the internal mechanism 2072 * will activate the slaves in the selected 2073 * aggregator 2074 */ 2075 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 2076 /* if this is the first slave */ 2077 if (!prev_slave) { 2078 SLAVE_AD_INFO(new_slave)->id = 1; 2079 /* Initialize AD with the number of times that the AD timer is called in 1 second 2080 * can be called only after the mac address of the bond is set 2081 */ 2082 bond_3ad_initialize(bond); 2083 } else { 2084 SLAVE_AD_INFO(new_slave)->id = 2085 SLAVE_AD_INFO(prev_slave)->id + 1; 2086 } 2087 2088 bond_3ad_bind_slave(new_slave); 2089 break; 2090 case BOND_MODE_TLB: 2091 case BOND_MODE_ALB: 2092 bond_set_active_slave(new_slave); 2093 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 2094 break; 2095 default: 2096 slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n"); 2097 2098 /* always active in trunk mode */ 2099 bond_set_active_slave(new_slave); 2100 2101 /* In trunking mode there is little meaning to curr_active_slave 2102 * anyway (it holds no special properties of the bond device), 2103 * so we can change it without calling change_active_interface() 2104 */ 2105 if (!rcu_access_pointer(bond->curr_active_slave) && 2106 new_slave->link == BOND_LINK_UP) 2107 rcu_assign_pointer(bond->curr_active_slave, new_slave); 2108 2109 break; 2110 } /* switch(bond_mode) */ 2111 2112 #ifdef CONFIG_NET_POLL_CONTROLLER 2113 if (bond->dev->npinfo) { 2114 if (slave_enable_netpoll(new_slave)) { 2115 slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); 2116 res = -EBUSY; 2117 goto err_detach; 2118 } 2119 } 2120 #endif 2121 2122 if (!(bond_dev->features & NETIF_F_LRO)) 2123 dev_disable_lro(slave_dev); 2124 2125 res = netdev_rx_handler_register(slave_dev, bond_handle_frame, 2126 new_slave); 2127 if (res) { 2128 slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res); 2129 goto err_detach; 2130 } 2131 2132 res = bond_master_upper_dev_link(bond, new_slave, extack); 2133 if (res) { 2134 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res); 2135 goto err_unregister; 2136 } 2137 2138 bond_lower_state_changed(new_slave); 2139 2140 res = bond_sysfs_slave_add(new_slave); 2141 if (res) { 2142 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res); 2143 goto err_upper_unlink; 2144 } 2145 2146 /* If the mode uses primary, then the following is handled by 2147 * bond_change_active_slave(). 2148 */ 2149 if (!bond_uses_primary(bond)) { 2150 /* set promiscuity level to new slave */ 2151 if (bond_dev->flags & IFF_PROMISC) { 2152 res = dev_set_promiscuity(slave_dev, 1); 2153 if (res) 2154 goto err_sysfs_del; 2155 } 2156 2157 /* set allmulti level to new slave */ 2158 if (bond_dev->flags & IFF_ALLMULTI) { 2159 res = dev_set_allmulti(slave_dev, 1); 2160 if (res) { 2161 if (bond_dev->flags & IFF_PROMISC) 2162 dev_set_promiscuity(slave_dev, -1); 2163 goto err_sysfs_del; 2164 } 2165 } 2166 2167 if (bond_dev->flags & IFF_UP) { 2168 netif_addr_lock_bh(bond_dev); 2169 dev_mc_sync_multiple(slave_dev, bond_dev); 2170 dev_uc_sync_multiple(slave_dev, bond_dev); 2171 netif_addr_unlock_bh(bond_dev); 2172 2173 if (BOND_MODE(bond) == BOND_MODE_8023AD) 2174 dev_mc_add(slave_dev, lacpdu_mcast_addr); 2175 } 2176 } 2177 2178 bond->slave_cnt++; 2179 bond_compute_features(bond); 2180 bond_set_carrier(bond); 2181 2182 if (bond_uses_primary(bond)) { 2183 block_netpoll_tx(); 2184 bond_select_active_slave(bond); 2185 unblock_netpoll_tx(); 2186 } 2187 2188 if (bond_mode_can_use_xmit_hash(bond)) 2189 bond_update_slave_arr(bond, NULL); 2190 2191 2192 if (!slave_dev->netdev_ops->ndo_bpf || 2193 !slave_dev->netdev_ops->ndo_xdp_xmit) { 2194 if (bond->xdp_prog) { 2195 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 2196 "Slave does not support XDP"); 2197 res = -EOPNOTSUPP; 2198 goto err_sysfs_del; 2199 } 2200 } else if (bond->xdp_prog) { 2201 struct netdev_bpf xdp = { 2202 .command = XDP_SETUP_PROG, 2203 .flags = 0, 2204 .prog = bond->xdp_prog, 2205 .extack = extack, 2206 }; 2207 2208 if (dev_xdp_prog_count(slave_dev) > 0) { 2209 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 2210 "Slave has XDP program loaded, please unload before enslaving"); 2211 res = -EOPNOTSUPP; 2212 goto err_sysfs_del; 2213 } 2214 2215 res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 2216 if (res < 0) { 2217 /* ndo_bpf() sets extack error message */ 2218 slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res); 2219 goto err_sysfs_del; 2220 } 2221 if (bond->xdp_prog) 2222 bpf_prog_inc(bond->xdp_prog); 2223 } 2224 2225 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", 2226 bond_is_active_slave(new_slave) ? "an active" : "a backup", 2227 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); 2228 2229 /* enslave is successful */ 2230 bond_queue_slave_event(new_slave); 2231 return 0; 2232 2233 /* Undo stages on error */ 2234 err_sysfs_del: 2235 bond_sysfs_slave_del(new_slave); 2236 2237 err_upper_unlink: 2238 bond_upper_dev_unlink(bond, new_slave); 2239 2240 err_unregister: 2241 netdev_rx_handler_unregister(slave_dev); 2242 2243 err_detach: 2244 vlan_vids_del_by_dev(slave_dev, bond_dev); 2245 if (rcu_access_pointer(bond->primary_slave) == new_slave) 2246 RCU_INIT_POINTER(bond->primary_slave, NULL); 2247 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { 2248 block_netpoll_tx(); 2249 bond_change_active_slave(bond, NULL); 2250 bond_select_active_slave(bond); 2251 unblock_netpoll_tx(); 2252 } 2253 /* either primary_slave or curr_active_slave might've changed */ 2254 synchronize_rcu(); 2255 slave_disable_netpoll(new_slave); 2256 2257 err_close: 2258 if (!netif_is_bond_master(slave_dev)) 2259 slave_dev->priv_flags &= ~IFF_BONDING; 2260 dev_close(slave_dev); 2261 2262 err_restore_mac: 2263 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF; 2264 if (!bond->params.fail_over_mac || 2265 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2266 /* XXX TODO - fom follow mode needs to change master's 2267 * MAC if this slave's MAC is in use by the bond, or at 2268 * least print a warning. 2269 */ 2270 bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr, 2271 new_slave->dev->addr_len); 2272 ss.ss_family = slave_dev->type; 2273 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); 2274 } 2275 2276 err_restore_mtu: 2277 dev_set_mtu(slave_dev, new_slave->original_mtu); 2278 2279 err_free: 2280 kobject_put(&new_slave->kobj); 2281 2282 err_undo_flags: 2283 /* Enslave of first slave has failed and we need to fix master's mac */ 2284 if (!bond_has_slaves(bond)) { 2285 if (ether_addr_equal_64bits(bond_dev->dev_addr, 2286 slave_dev->dev_addr)) 2287 eth_hw_addr_random(bond_dev); 2288 if (bond_dev->type != ARPHRD_ETHER) { 2289 dev_close(bond_dev); 2290 ether_setup(bond_dev); 2291 bond_dev->flags |= IFF_MASTER; 2292 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2293 } 2294 } 2295 2296 return res; 2297 } 2298 2299 /* Try to release the slave device <slave> from the bond device <master> 2300 * It is legal to access curr_active_slave without a lock because all the function 2301 * is RTNL-locked. If "all" is true it means that the function is being called 2302 * while destroying a bond interface and all slaves are being released. 2303 * 2304 * The rules for slave state should be: 2305 * for Active/Backup: 2306 * Active stays on all backups go down 2307 * for Bonded connections: 2308 * The first up interface should be left on and all others downed. 2309 */ 2310 static int __bond_release_one(struct net_device *bond_dev, 2311 struct net_device *slave_dev, 2312 bool all, bool unregister) 2313 { 2314 struct bonding *bond = netdev_priv(bond_dev); 2315 struct slave *slave, *oldcurrent; 2316 struct sockaddr_storage ss; 2317 int old_flags = bond_dev->flags; 2318 netdev_features_t old_features = bond_dev->features; 2319 2320 /* slave is not a slave or master is not master of this slave */ 2321 if (!(slave_dev->flags & IFF_SLAVE) || 2322 !netdev_has_upper_dev(slave_dev, bond_dev)) { 2323 slave_dbg(bond_dev, slave_dev, "cannot release slave\n"); 2324 return -EINVAL; 2325 } 2326 2327 block_netpoll_tx(); 2328 2329 slave = bond_get_slave_by_dev(bond, slave_dev); 2330 if (!slave) { 2331 /* not a slave of this bond */ 2332 slave_info(bond_dev, slave_dev, "interface not enslaved\n"); 2333 unblock_netpoll_tx(); 2334 return -EINVAL; 2335 } 2336 2337 bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); 2338 2339 bond_sysfs_slave_del(slave); 2340 2341 /* recompute stats just before removing the slave */ 2342 bond_get_stats(bond->dev, &bond->bond_stats); 2343 2344 if (bond->xdp_prog) { 2345 struct netdev_bpf xdp = { 2346 .command = XDP_SETUP_PROG, 2347 .flags = 0, 2348 .prog = NULL, 2349 .extack = NULL, 2350 }; 2351 if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp)) 2352 slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n"); 2353 } 2354 2355 /* unregister rx_handler early so bond_handle_frame wouldn't be called 2356 * for this slave anymore. 2357 */ 2358 netdev_rx_handler_unregister(slave_dev); 2359 2360 if (BOND_MODE(bond) == BOND_MODE_8023AD) 2361 bond_3ad_unbind_slave(slave); 2362 2363 bond_upper_dev_unlink(bond, slave); 2364 2365 if (bond_mode_can_use_xmit_hash(bond)) 2366 bond_update_slave_arr(bond, slave); 2367 2368 slave_info(bond_dev, slave_dev, "Releasing %s interface\n", 2369 bond_is_active_slave(slave) ? "active" : "backup"); 2370 2371 oldcurrent = rcu_access_pointer(bond->curr_active_slave); 2372 2373 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 2374 2375 if (!all && (!bond->params.fail_over_mac || 2376 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { 2377 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 2378 bond_has_slaves(bond)) 2379 slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n", 2380 slave->perm_hwaddr); 2381 } 2382 2383 if (rtnl_dereference(bond->primary_slave) == slave) 2384 RCU_INIT_POINTER(bond->primary_slave, NULL); 2385 2386 if (oldcurrent == slave) 2387 bond_change_active_slave(bond, NULL); 2388 2389 if (bond_is_lb(bond)) { 2390 /* Must be called only after the slave has been 2391 * detached from the list and the curr_active_slave 2392 * has been cleared (if our_slave == old_current), 2393 * but before a new active slave is selected. 2394 */ 2395 bond_alb_deinit_slave(bond, slave); 2396 } 2397 2398 if (all) { 2399 RCU_INIT_POINTER(bond->curr_active_slave, NULL); 2400 } else if (oldcurrent == slave) { 2401 /* Note that we hold RTNL over this sequence, so there 2402 * is no concern that another slave add/remove event 2403 * will interfere. 2404 */ 2405 bond_select_active_slave(bond); 2406 } 2407 2408 bond_set_carrier(bond); 2409 if (!bond_has_slaves(bond)) 2410 eth_hw_addr_random(bond_dev); 2411 2412 unblock_netpoll_tx(); 2413 synchronize_rcu(); 2414 bond->slave_cnt--; 2415 2416 if (!bond_has_slaves(bond)) { 2417 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 2418 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); 2419 } 2420 2421 bond_compute_features(bond); 2422 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2423 (old_features & NETIF_F_VLAN_CHALLENGED)) 2424 slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n"); 2425 2426 vlan_vids_del_by_dev(slave_dev, bond_dev); 2427 2428 /* If the mode uses primary, then this case was handled above by 2429 * bond_change_active_slave(..., NULL) 2430 */ 2431 if (!bond_uses_primary(bond)) { 2432 /* unset promiscuity level from slave 2433 * NOTE: The NETDEV_CHANGEADDR call above may change the value 2434 * of the IFF_PROMISC flag in the bond_dev, but we need the 2435 * value of that flag before that change, as that was the value 2436 * when this slave was attached, so we cache at the start of the 2437 * function and use it here. Same goes for ALLMULTI below 2438 */ 2439 if (old_flags & IFF_PROMISC) 2440 dev_set_promiscuity(slave_dev, -1); 2441 2442 /* unset allmulti level from slave */ 2443 if (old_flags & IFF_ALLMULTI) 2444 dev_set_allmulti(slave_dev, -1); 2445 2446 if (old_flags & IFF_UP) 2447 bond_hw_addr_flush(bond_dev, slave_dev); 2448 } 2449 2450 slave_disable_netpoll(slave); 2451 2452 /* close slave before restoring its mac address */ 2453 dev_close(slave_dev); 2454 2455 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF; 2456 2457 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || 2458 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2459 /* restore original ("permanent") mac address */ 2460 bond_hw_addr_copy(ss.__data, slave->perm_hwaddr, 2461 slave->dev->addr_len); 2462 ss.ss_family = slave_dev->type; 2463 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); 2464 } 2465 2466 if (unregister) 2467 __dev_set_mtu(slave_dev, slave->original_mtu); 2468 else 2469 dev_set_mtu(slave_dev, slave->original_mtu); 2470 2471 if (!netif_is_bond_master(slave_dev)) 2472 slave_dev->priv_flags &= ~IFF_BONDING; 2473 2474 kobject_put(&slave->kobj); 2475 2476 return 0; 2477 } 2478 2479 /* A wrapper used because of ndo_del_link */ 2480 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) 2481 { 2482 return __bond_release_one(bond_dev, slave_dev, false, false); 2483 } 2484 2485 /* First release a slave and then destroy the bond if no more slaves are left. 2486 * Must be under rtnl_lock when this function is called. 2487 */ 2488 static int bond_release_and_destroy(struct net_device *bond_dev, 2489 struct net_device *slave_dev) 2490 { 2491 struct bonding *bond = netdev_priv(bond_dev); 2492 int ret; 2493 2494 ret = __bond_release_one(bond_dev, slave_dev, false, true); 2495 if (ret == 0 && !bond_has_slaves(bond) && 2496 bond_dev->reg_state != NETREG_UNREGISTERING) { 2497 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 2498 netdev_info(bond_dev, "Destroying bond\n"); 2499 bond_remove_proc_entry(bond); 2500 unregister_netdevice(bond_dev); 2501 } 2502 return ret; 2503 } 2504 2505 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info) 2506 { 2507 struct bonding *bond = netdev_priv(bond_dev); 2508 2509 bond_fill_ifbond(bond, info); 2510 } 2511 2512 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) 2513 { 2514 struct bonding *bond = netdev_priv(bond_dev); 2515 struct list_head *iter; 2516 int i = 0, res = -ENODEV; 2517 struct slave *slave; 2518 2519 bond_for_each_slave(bond, slave, iter) { 2520 if (i++ == (int)info->slave_id) { 2521 res = 0; 2522 bond_fill_ifslave(slave, info); 2523 break; 2524 } 2525 } 2526 2527 return res; 2528 } 2529 2530 /*-------------------------------- Monitoring -------------------------------*/ 2531 2532 /* called with rcu_read_lock() */ 2533 static int bond_miimon_inspect(struct bonding *bond) 2534 { 2535 bool ignore_updelay = false; 2536 int link_state, commit = 0; 2537 struct list_head *iter; 2538 struct slave *slave; 2539 2540 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { 2541 ignore_updelay = !rcu_dereference(bond->curr_active_slave); 2542 } else { 2543 struct bond_up_slave *usable_slaves; 2544 2545 usable_slaves = rcu_dereference(bond->usable_slaves); 2546 2547 if (usable_slaves && usable_slaves->count == 0) 2548 ignore_updelay = true; 2549 } 2550 2551 bond_for_each_slave_rcu(bond, slave, iter) { 2552 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2553 2554 link_state = bond_check_dev_link(bond, slave->dev, 0); 2555 2556 switch (slave->link) { 2557 case BOND_LINK_UP: 2558 if (link_state) 2559 continue; 2560 2561 bond_propose_link_state(slave, BOND_LINK_FAIL); 2562 commit++; 2563 slave->delay = bond->params.downdelay; 2564 if (slave->delay) { 2565 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n", 2566 (BOND_MODE(bond) == 2567 BOND_MODE_ACTIVEBACKUP) ? 2568 (bond_is_active_slave(slave) ? 2569 "active " : "backup ") : "", 2570 bond->params.downdelay * bond->params.miimon); 2571 } 2572 fallthrough; 2573 case BOND_LINK_FAIL: 2574 if (link_state) { 2575 /* recovered before downdelay expired */ 2576 bond_propose_link_state(slave, BOND_LINK_UP); 2577 slave->last_link_up = jiffies; 2578 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n", 2579 (bond->params.downdelay - slave->delay) * 2580 bond->params.miimon); 2581 commit++; 2582 continue; 2583 } 2584 2585 if (slave->delay <= 0) { 2586 bond_propose_link_state(slave, BOND_LINK_DOWN); 2587 commit++; 2588 continue; 2589 } 2590 2591 slave->delay--; 2592 break; 2593 2594 case BOND_LINK_DOWN: 2595 if (!link_state) 2596 continue; 2597 2598 bond_propose_link_state(slave, BOND_LINK_BACK); 2599 commit++; 2600 slave->delay = bond->params.updelay; 2601 2602 if (slave->delay) { 2603 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n", 2604 ignore_updelay ? 0 : 2605 bond->params.updelay * 2606 bond->params.miimon); 2607 } 2608 fallthrough; 2609 case BOND_LINK_BACK: 2610 if (!link_state) { 2611 bond_propose_link_state(slave, BOND_LINK_DOWN); 2612 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n", 2613 (bond->params.updelay - slave->delay) * 2614 bond->params.miimon); 2615 commit++; 2616 continue; 2617 } 2618 2619 if (ignore_updelay) 2620 slave->delay = 0; 2621 2622 if (slave->delay <= 0) { 2623 bond_propose_link_state(slave, BOND_LINK_UP); 2624 commit++; 2625 ignore_updelay = false; 2626 continue; 2627 } 2628 2629 slave->delay--; 2630 break; 2631 } 2632 } 2633 2634 return commit; 2635 } 2636 2637 static void bond_miimon_link_change(struct bonding *bond, 2638 struct slave *slave, 2639 char link) 2640 { 2641 switch (BOND_MODE(bond)) { 2642 case BOND_MODE_8023AD: 2643 bond_3ad_handle_link_change(slave, link); 2644 break; 2645 case BOND_MODE_TLB: 2646 case BOND_MODE_ALB: 2647 bond_alb_handle_link_change(bond, slave, link); 2648 break; 2649 case BOND_MODE_XOR: 2650 bond_update_slave_arr(bond, NULL); 2651 break; 2652 } 2653 } 2654 2655 static void bond_miimon_commit(struct bonding *bond) 2656 { 2657 struct slave *slave, *primary, *active; 2658 bool do_failover = false; 2659 struct list_head *iter; 2660 2661 ASSERT_RTNL(); 2662 2663 bond_for_each_slave(bond, slave, iter) { 2664 switch (slave->link_new_state) { 2665 case BOND_LINK_NOCHANGE: 2666 /* For 802.3ad mode, check current slave speed and 2667 * duplex again in case its port was disabled after 2668 * invalid speed/duplex reporting but recovered before 2669 * link monitoring could make a decision on the actual 2670 * link status 2671 */ 2672 if (BOND_MODE(bond) == BOND_MODE_8023AD && 2673 slave->link == BOND_LINK_UP) 2674 bond_3ad_adapter_speed_duplex_changed(slave); 2675 continue; 2676 2677 case BOND_LINK_UP: 2678 if (bond_update_speed_duplex(slave) && 2679 bond_needs_speed_duplex(bond)) { 2680 slave->link = BOND_LINK_DOWN; 2681 if (net_ratelimit()) 2682 slave_warn(bond->dev, slave->dev, 2683 "failed to get link speed/duplex\n"); 2684 continue; 2685 } 2686 bond_set_slave_link_state(slave, BOND_LINK_UP, 2687 BOND_SLAVE_NOTIFY_NOW); 2688 slave->last_link_up = jiffies; 2689 2690 primary = rtnl_dereference(bond->primary_slave); 2691 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 2692 /* prevent it from being the active one */ 2693 bond_set_backup_slave(slave); 2694 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2695 /* make it immediately active */ 2696 bond_set_active_slave(slave); 2697 } 2698 2699 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", 2700 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, 2701 slave->duplex ? "full" : "half"); 2702 2703 bond_miimon_link_change(bond, slave, BOND_LINK_UP); 2704 2705 active = rtnl_dereference(bond->curr_active_slave); 2706 if (!active || slave == primary || slave->prio > active->prio) 2707 do_failover = true; 2708 2709 continue; 2710 2711 case BOND_LINK_DOWN: 2712 if (slave->link_failure_count < UINT_MAX) 2713 slave->link_failure_count++; 2714 2715 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 2716 BOND_SLAVE_NOTIFY_NOW); 2717 2718 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || 2719 BOND_MODE(bond) == BOND_MODE_8023AD) 2720 bond_set_slave_inactive_flags(slave, 2721 BOND_SLAVE_NOTIFY_NOW); 2722 2723 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); 2724 2725 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); 2726 2727 if (slave == rcu_access_pointer(bond->curr_active_slave)) 2728 do_failover = true; 2729 2730 continue; 2731 2732 default: 2733 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", 2734 slave->link_new_state); 2735 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2736 2737 continue; 2738 } 2739 } 2740 2741 if (do_failover) { 2742 block_netpoll_tx(); 2743 bond_select_active_slave(bond); 2744 unblock_netpoll_tx(); 2745 } 2746 2747 bond_set_carrier(bond); 2748 } 2749 2750 /* bond_mii_monitor 2751 * 2752 * Really a wrapper that splits the mii monitor into two phases: an 2753 * inspection, then (if inspection indicates something needs to be done) 2754 * an acquisition of appropriate locks followed by a commit phase to 2755 * implement whatever link state changes are indicated. 2756 */ 2757 static void bond_mii_monitor(struct work_struct *work) 2758 { 2759 struct bonding *bond = container_of(work, struct bonding, 2760 mii_work.work); 2761 bool should_notify_peers = false; 2762 bool commit; 2763 unsigned long delay; 2764 struct slave *slave; 2765 struct list_head *iter; 2766 2767 delay = msecs_to_jiffies(bond->params.miimon); 2768 2769 if (!bond_has_slaves(bond)) 2770 goto re_arm; 2771 2772 rcu_read_lock(); 2773 should_notify_peers = bond_should_notify_peers(bond); 2774 commit = !!bond_miimon_inspect(bond); 2775 if (bond->send_peer_notif) { 2776 rcu_read_unlock(); 2777 if (rtnl_trylock()) { 2778 bond->send_peer_notif--; 2779 rtnl_unlock(); 2780 } 2781 } else { 2782 rcu_read_unlock(); 2783 } 2784 2785 if (commit) { 2786 /* Race avoidance with bond_close cancel of workqueue */ 2787 if (!rtnl_trylock()) { 2788 delay = 1; 2789 should_notify_peers = false; 2790 goto re_arm; 2791 } 2792 2793 bond_for_each_slave(bond, slave, iter) { 2794 bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); 2795 } 2796 bond_miimon_commit(bond); 2797 2798 rtnl_unlock(); /* might sleep, hold no other locks */ 2799 } 2800 2801 re_arm: 2802 if (bond->params.miimon) 2803 queue_delayed_work(bond->wq, &bond->mii_work, delay); 2804 2805 if (should_notify_peers) { 2806 if (!rtnl_trylock()) 2807 return; 2808 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); 2809 rtnl_unlock(); 2810 } 2811 } 2812 2813 static int bond_upper_dev_walk(struct net_device *upper, 2814 struct netdev_nested_priv *priv) 2815 { 2816 __be32 ip = *(__be32 *)priv->data; 2817 2818 return ip == bond_confirm_addr(upper, 0, ip); 2819 } 2820 2821 static bool bond_has_this_ip(struct bonding *bond, __be32 ip) 2822 { 2823 struct netdev_nested_priv priv = { 2824 .data = (void *)&ip, 2825 }; 2826 bool ret = false; 2827 2828 if (ip == bond_confirm_addr(bond->dev, 0, ip)) 2829 return true; 2830 2831 rcu_read_lock(); 2832 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv)) 2833 ret = true; 2834 rcu_read_unlock(); 2835 2836 return ret; 2837 } 2838 2839 static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags, 2840 struct sk_buff *skb) 2841 { 2842 struct net_device *bond_dev = slave->bond->dev; 2843 struct net_device *slave_dev = slave->dev; 2844 struct bond_vlan_tag *outer_tag = tags; 2845 2846 if (!tags || tags->vlan_proto == VLAN_N_VID) 2847 return true; 2848 2849 tags++; 2850 2851 /* Go through all the tags backwards and add them to the packet */ 2852 while (tags->vlan_proto != VLAN_N_VID) { 2853 if (!tags->vlan_id) { 2854 tags++; 2855 continue; 2856 } 2857 2858 slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n", 2859 ntohs(outer_tag->vlan_proto), tags->vlan_id); 2860 skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto, 2861 tags->vlan_id); 2862 if (!skb) { 2863 net_err_ratelimited("failed to insert inner VLAN tag\n"); 2864 return false; 2865 } 2866 2867 tags++; 2868 } 2869 /* Set the outer tag */ 2870 if (outer_tag->vlan_id) { 2871 slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n", 2872 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id); 2873 __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto, 2874 outer_tag->vlan_id); 2875 } 2876 2877 return true; 2878 } 2879 2880 /* We go to the (large) trouble of VLAN tagging ARP frames because 2881 * switches in VLAN mode (especially if ports are configured as 2882 * "native" to a VLAN) might not pass non-tagged frames. 2883 */ 2884 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, 2885 __be32 src_ip, struct bond_vlan_tag *tags) 2886 { 2887 struct net_device *bond_dev = slave->bond->dev; 2888 struct net_device *slave_dev = slave->dev; 2889 struct sk_buff *skb; 2890 2891 slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n", 2892 arp_op, &dest_ip, &src_ip); 2893 2894 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, 2895 NULL, slave_dev->dev_addr, NULL); 2896 2897 if (!skb) { 2898 net_err_ratelimited("ARP packet allocation failed\n"); 2899 return; 2900 } 2901 2902 if (bond_handle_vlan(slave, tags, skb)) { 2903 slave_update_last_tx(slave); 2904 arp_xmit(skb); 2905 } 2906 2907 return; 2908 } 2909 2910 /* Validate the device path between the @start_dev and the @end_dev. 2911 * The path is valid if the @end_dev is reachable through device 2912 * stacking. 2913 * When the path is validated, collect any vlan information in the 2914 * path. 2915 */ 2916 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, 2917 struct net_device *end_dev, 2918 int level) 2919 { 2920 struct bond_vlan_tag *tags; 2921 struct net_device *upper; 2922 struct list_head *iter; 2923 2924 if (start_dev == end_dev) { 2925 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC); 2926 if (!tags) 2927 return ERR_PTR(-ENOMEM); 2928 tags[level].vlan_proto = VLAN_N_VID; 2929 return tags; 2930 } 2931 2932 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { 2933 tags = bond_verify_device_path(upper, end_dev, level + 1); 2934 if (IS_ERR_OR_NULL(tags)) { 2935 if (IS_ERR(tags)) 2936 return tags; 2937 continue; 2938 } 2939 if (is_vlan_dev(upper)) { 2940 tags[level].vlan_proto = vlan_dev_vlan_proto(upper); 2941 tags[level].vlan_id = vlan_dev_vlan_id(upper); 2942 } 2943 2944 return tags; 2945 } 2946 2947 return NULL; 2948 } 2949 2950 static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2951 { 2952 struct rtable *rt; 2953 struct bond_vlan_tag *tags; 2954 __be32 *targets = bond->params.arp_targets, addr; 2955 int i; 2956 2957 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2958 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n", 2959 __func__, &targets[i]); 2960 tags = NULL; 2961 2962 /* Find out through which dev should the packet go */ 2963 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2964 RTO_ONLINK, 0); 2965 if (IS_ERR(rt)) { 2966 /* there's no route to target - try to send arp 2967 * probe to generate any traffic (arp_validate=0) 2968 */ 2969 if (bond->params.arp_validate) 2970 pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2971 bond->dev->name, 2972 &targets[i]); 2973 bond_arp_send(slave, ARPOP_REQUEST, targets[i], 2974 0, tags); 2975 continue; 2976 } 2977 2978 /* bond device itself */ 2979 if (rt->dst.dev == bond->dev) 2980 goto found; 2981 2982 rcu_read_lock(); 2983 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0); 2984 rcu_read_unlock(); 2985 2986 if (!IS_ERR_OR_NULL(tags)) 2987 goto found; 2988 2989 /* Not our device - skip */ 2990 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n", 2991 &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL"); 2992 2993 ip_rt_put(rt); 2994 continue; 2995 2996 found: 2997 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2998 ip_rt_put(rt); 2999 bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags); 3000 kfree(tags); 3001 } 3002 } 3003 3004 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) 3005 { 3006 int i; 3007 3008 if (!sip || !bond_has_this_ip(bond, tip)) { 3009 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n", 3010 __func__, &sip, &tip); 3011 return; 3012 } 3013 3014 i = bond_get_targets_ip(bond->params.arp_targets, sip); 3015 if (i == -1) { 3016 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n", 3017 __func__, &sip); 3018 return; 3019 } 3020 slave->last_rx = jiffies; 3021 slave->target_last_arp_rx[i] = jiffies; 3022 } 3023 3024 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, 3025 struct slave *slave) 3026 { 3027 struct arphdr *arp = (struct arphdr *)skb->data; 3028 struct slave *curr_active_slave, *curr_arp_slave; 3029 unsigned char *arp_ptr; 3030 __be32 sip, tip; 3031 unsigned int alen; 3032 3033 alen = arp_hdr_len(bond->dev); 3034 3035 if (alen > skb_headlen(skb)) { 3036 arp = kmalloc(alen, GFP_ATOMIC); 3037 if (!arp) 3038 goto out_unlock; 3039 if (skb_copy_bits(skb, 0, arp, alen) < 0) 3040 goto out_unlock; 3041 } 3042 3043 if (arp->ar_hln != bond->dev->addr_len || 3044 skb->pkt_type == PACKET_OTHERHOST || 3045 skb->pkt_type == PACKET_LOOPBACK || 3046 arp->ar_hrd != htons(ARPHRD_ETHER) || 3047 arp->ar_pro != htons(ETH_P_IP) || 3048 arp->ar_pln != 4) 3049 goto out_unlock; 3050 3051 arp_ptr = (unsigned char *)(arp + 1); 3052 arp_ptr += bond->dev->addr_len; 3053 memcpy(&sip, arp_ptr, 4); 3054 arp_ptr += 4 + bond->dev->addr_len; 3055 memcpy(&tip, arp_ptr, 4); 3056 3057 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n", 3058 __func__, slave->dev->name, bond_slave_state(slave), 3059 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 3060 &sip, &tip); 3061 3062 curr_active_slave = rcu_dereference(bond->curr_active_slave); 3063 curr_arp_slave = rcu_dereference(bond->current_arp_slave); 3064 3065 /* We 'trust' the received ARP enough to validate it if: 3066 * 3067 * (a) the slave receiving the ARP is active (which includes the 3068 * current ARP slave, if any), or 3069 * 3070 * (b) the receiving slave isn't active, but there is a currently 3071 * active slave and it received valid arp reply(s) after it became 3072 * the currently active slave, or 3073 * 3074 * (c) there is an ARP slave that sent an ARP during the prior ARP 3075 * interval, and we receive an ARP reply on any slave. We accept 3076 * these because switch FDB update delays may deliver the ARP 3077 * reply to a slave other than the sender of the ARP request. 3078 * 3079 * Note: for (b), backup slaves are receiving the broadcast ARP 3080 * request, not a reply. This request passes from the sending 3081 * slave through the L2 switch(es) to the receiving slave. Since 3082 * this is checking the request, sip/tip are swapped for 3083 * validation. 3084 * 3085 * This is done to avoid endless looping when we can't reach the 3086 * arp_ip_target and fool ourselves with our own arp requests. 3087 */ 3088 if (bond_is_active_slave(slave)) 3089 bond_validate_arp(bond, slave, sip, tip); 3090 else if (curr_active_slave && 3091 time_after(slave_last_rx(bond, curr_active_slave), 3092 curr_active_slave->last_link_up)) 3093 bond_validate_arp(bond, slave, tip, sip); 3094 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) && 3095 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) 3096 bond_validate_arp(bond, slave, sip, tip); 3097 3098 out_unlock: 3099 if (arp != (struct arphdr *)skb->data) 3100 kfree(arp); 3101 return RX_HANDLER_ANOTHER; 3102 } 3103 3104 #if IS_ENABLED(CONFIG_IPV6) 3105 static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr, 3106 const struct in6_addr *saddr, struct bond_vlan_tag *tags) 3107 { 3108 struct net_device *bond_dev = slave->bond->dev; 3109 struct net_device *slave_dev = slave->dev; 3110 struct in6_addr mcaddr; 3111 struct sk_buff *skb; 3112 3113 slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n", 3114 daddr, saddr); 3115 3116 skb = ndisc_ns_create(slave_dev, daddr, saddr, 0); 3117 if (!skb) { 3118 net_err_ratelimited("NS packet allocation failed\n"); 3119 return; 3120 } 3121 3122 addrconf_addr_solict_mult(daddr, &mcaddr); 3123 if (bond_handle_vlan(slave, tags, skb)) { 3124 slave_update_last_tx(slave); 3125 ndisc_send_skb(skb, &mcaddr, saddr); 3126 } 3127 } 3128 3129 static void bond_ns_send_all(struct bonding *bond, struct slave *slave) 3130 { 3131 struct in6_addr *targets = bond->params.ns_targets; 3132 struct bond_vlan_tag *tags; 3133 struct dst_entry *dst; 3134 struct in6_addr saddr; 3135 struct flowi6 fl6; 3136 int i; 3137 3138 for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) { 3139 slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n", 3140 __func__, &targets[i]); 3141 tags = NULL; 3142 3143 /* Find out through which dev should the packet go */ 3144 memset(&fl6, 0, sizeof(struct flowi6)); 3145 fl6.daddr = targets[i]; 3146 fl6.flowi6_oif = bond->dev->ifindex; 3147 3148 dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); 3149 if (dst->error) { 3150 dst_release(dst); 3151 /* there's no route to target - try to send arp 3152 * probe to generate any traffic (arp_validate=0) 3153 */ 3154 if (bond->params.arp_validate) 3155 pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n", 3156 bond->dev->name, 3157 &targets[i]); 3158 bond_ns_send(slave, &targets[i], &in6addr_any, tags); 3159 continue; 3160 } 3161 3162 /* bond device itself */ 3163 if (dst->dev == bond->dev) 3164 goto found; 3165 3166 rcu_read_lock(); 3167 tags = bond_verify_device_path(bond->dev, dst->dev, 0); 3168 rcu_read_unlock(); 3169 3170 if (!IS_ERR_OR_NULL(tags)) 3171 goto found; 3172 3173 /* Not our device - skip */ 3174 slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n", 3175 &targets[i], dst->dev ? dst->dev->name : "NULL"); 3176 3177 dst_release(dst); 3178 continue; 3179 3180 found: 3181 if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr)) 3182 bond_ns_send(slave, &targets[i], &saddr, tags); 3183 else 3184 bond_ns_send(slave, &targets[i], &in6addr_any, tags); 3185 3186 dst_release(dst); 3187 kfree(tags); 3188 } 3189 } 3190 3191 static int bond_confirm_addr6(struct net_device *dev, 3192 struct netdev_nested_priv *priv) 3193 { 3194 struct in6_addr *addr = (struct in6_addr *)priv->data; 3195 3196 return ipv6_chk_addr(dev_net(dev), addr, dev, 0); 3197 } 3198 3199 static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) 3200 { 3201 struct netdev_nested_priv priv = { 3202 .data = addr, 3203 }; 3204 int ret = false; 3205 3206 if (bond_confirm_addr6(bond->dev, &priv)) 3207 return true; 3208 3209 rcu_read_lock(); 3210 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv)) 3211 ret = true; 3212 rcu_read_unlock(); 3213 3214 return ret; 3215 } 3216 3217 static void bond_validate_na(struct bonding *bond, struct slave *slave, 3218 struct in6_addr *saddr, struct in6_addr *daddr) 3219 { 3220 int i; 3221 3222 /* Ignore NAs that: 3223 * 1. Source address is unspecified address. 3224 * 2. Dest address is neither all-nodes multicast address nor 3225 * exist on bond interface. 3226 */ 3227 if (ipv6_addr_any(saddr) || 3228 (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) && 3229 !bond_has_this_ip6(bond, daddr))) { 3230 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", 3231 __func__, saddr, daddr); 3232 return; 3233 } 3234 3235 i = bond_get_targets_ip6(bond->params.ns_targets, saddr); 3236 if (i == -1) { 3237 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n", 3238 __func__, saddr); 3239 return; 3240 } 3241 slave->last_rx = jiffies; 3242 slave->target_last_arp_rx[i] = jiffies; 3243 } 3244 3245 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, 3246 struct slave *slave) 3247 { 3248 struct slave *curr_active_slave, *curr_arp_slave; 3249 struct in6_addr *saddr, *daddr; 3250 struct { 3251 struct ipv6hdr ip6; 3252 struct icmp6hdr icmp6; 3253 } *combined, _combined; 3254 3255 if (skb->pkt_type == PACKET_OTHERHOST || 3256 skb->pkt_type == PACKET_LOOPBACK) 3257 goto out; 3258 3259 combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined); 3260 if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP || 3261 combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT) 3262 goto out; 3263 3264 saddr = &combined->ip6.saddr; 3265 daddr = &combined->ip6.daddr; 3266 3267 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", 3268 __func__, slave->dev->name, bond_slave_state(slave), 3269 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 3270 saddr, daddr); 3271 3272 curr_active_slave = rcu_dereference(bond->curr_active_slave); 3273 curr_arp_slave = rcu_dereference(bond->current_arp_slave); 3274 3275 /* We 'trust' the received ARP enough to validate it if: 3276 * see bond_arp_rcv(). 3277 */ 3278 if (bond_is_active_slave(slave)) 3279 bond_validate_na(bond, slave, saddr, daddr); 3280 else if (curr_active_slave && 3281 time_after(slave_last_rx(bond, curr_active_slave), 3282 curr_active_slave->last_link_up)) 3283 bond_validate_na(bond, slave, saddr, daddr); 3284 else if (curr_arp_slave && 3285 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) 3286 bond_validate_na(bond, slave, saddr, daddr); 3287 3288 out: 3289 return RX_HANDLER_ANOTHER; 3290 } 3291 #endif 3292 3293 int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, 3294 struct slave *slave) 3295 { 3296 #if IS_ENABLED(CONFIG_IPV6) 3297 bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6); 3298 #endif 3299 bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 3300 3301 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", 3302 __func__, skb->dev->name); 3303 3304 /* Use arp validate logic for both ARP and NS */ 3305 if (!slave_do_arp_validate(bond, slave)) { 3306 if ((slave_do_arp_validate_only(bond) && is_arp) || 3307 #if IS_ENABLED(CONFIG_IPV6) 3308 (slave_do_arp_validate_only(bond) && is_ipv6) || 3309 #endif 3310 !slave_do_arp_validate_only(bond)) 3311 slave->last_rx = jiffies; 3312 return RX_HANDLER_ANOTHER; 3313 } else if (is_arp) { 3314 return bond_arp_rcv(skb, bond, slave); 3315 #if IS_ENABLED(CONFIG_IPV6) 3316 } else if (is_ipv6) { 3317 return bond_na_rcv(skb, bond, slave); 3318 #endif 3319 } else { 3320 return RX_HANDLER_ANOTHER; 3321 } 3322 } 3323 3324 static void bond_send_validate(struct bonding *bond, struct slave *slave) 3325 { 3326 bond_arp_send_all(bond, slave); 3327 #if IS_ENABLED(CONFIG_IPV6) 3328 bond_ns_send_all(bond, slave); 3329 #endif 3330 } 3331 3332 /* function to verify if we're in the arp_interval timeslice, returns true if 3333 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval + 3334 * arp_interval/2) . the arp_interval/2 is needed for really fast networks. 3335 */ 3336 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 3337 int mod) 3338 { 3339 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 3340 3341 return time_in_range(jiffies, 3342 last_act - delta_in_ticks, 3343 last_act + mod * delta_in_ticks + delta_in_ticks/2); 3344 } 3345 3346 /* This function is called regularly to monitor each slave's link 3347 * ensuring that traffic is being sent and received when arp monitoring 3348 * is used in load-balancing mode. if the adapter has been dormant, then an 3349 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 3350 * arp monitoring in active backup mode. 3351 */ 3352 static void bond_loadbalance_arp_mon(struct bonding *bond) 3353 { 3354 struct slave *slave, *oldcurrent; 3355 struct list_head *iter; 3356 int do_failover = 0, slave_state_changed = 0; 3357 3358 if (!bond_has_slaves(bond)) 3359 goto re_arm; 3360 3361 rcu_read_lock(); 3362 3363 oldcurrent = rcu_dereference(bond->curr_active_slave); 3364 /* see if any of the previous devices are up now (i.e. they have 3365 * xmt and rcv traffic). the curr_active_slave does not come into 3366 * the picture unless it is null. also, slave->last_link_up is not 3367 * needed here because we send an arp on each slave and give a slave 3368 * as long as it needs to get the tx/rx within the delta. 3369 * TODO: what about up/down delay in arp mode? it wasn't here before 3370 * so it can wait 3371 */ 3372 bond_for_each_slave_rcu(bond, slave, iter) { 3373 unsigned long last_tx = slave_last_tx(slave); 3374 3375 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 3376 3377 if (slave->link != BOND_LINK_UP) { 3378 if (bond_time_in_interval(bond, last_tx, 1) && 3379 bond_time_in_interval(bond, slave->last_rx, 1)) { 3380 3381 bond_propose_link_state(slave, BOND_LINK_UP); 3382 slave_state_changed = 1; 3383 3384 /* primary_slave has no meaning in round-robin 3385 * mode. the window of a slave being up and 3386 * curr_active_slave being null after enslaving 3387 * is closed. 3388 */ 3389 if (!oldcurrent) { 3390 slave_info(bond->dev, slave->dev, "link status definitely up\n"); 3391 do_failover = 1; 3392 } else { 3393 slave_info(bond->dev, slave->dev, "interface is now up\n"); 3394 } 3395 } 3396 } else { 3397 /* slave->link == BOND_LINK_UP */ 3398 3399 /* not all switches will respond to an arp request 3400 * when the source ip is 0, so don't take the link down 3401 * if we don't know our ip yet 3402 */ 3403 if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || 3404 !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { 3405 3406 bond_propose_link_state(slave, BOND_LINK_DOWN); 3407 slave_state_changed = 1; 3408 3409 if (slave->link_failure_count < UINT_MAX) 3410 slave->link_failure_count++; 3411 3412 slave_info(bond->dev, slave->dev, "interface is now down\n"); 3413 3414 if (slave == oldcurrent) 3415 do_failover = 1; 3416 } 3417 } 3418 3419 /* note: if switch is in round-robin mode, all links 3420 * must tx arp to ensure all links rx an arp - otherwise 3421 * links may oscillate or not come up at all; if switch is 3422 * in something like xor mode, there is nothing we can 3423 * do - all replies will be rx'ed on same link causing slaves 3424 * to be unstable during low/no traffic periods 3425 */ 3426 if (bond_slave_is_up(slave)) 3427 bond_send_validate(bond, slave); 3428 } 3429 3430 rcu_read_unlock(); 3431 3432 if (do_failover || slave_state_changed) { 3433 if (!rtnl_trylock()) 3434 goto re_arm; 3435 3436 bond_for_each_slave(bond, slave, iter) { 3437 if (slave->link_new_state != BOND_LINK_NOCHANGE) 3438 slave->link = slave->link_new_state; 3439 } 3440 3441 if (slave_state_changed) { 3442 bond_slave_state_change(bond); 3443 if (BOND_MODE(bond) == BOND_MODE_XOR) 3444 bond_update_slave_arr(bond, NULL); 3445 } 3446 if (do_failover) { 3447 block_netpoll_tx(); 3448 bond_select_active_slave(bond); 3449 unblock_netpoll_tx(); 3450 } 3451 rtnl_unlock(); 3452 } 3453 3454 re_arm: 3455 if (bond->params.arp_interval) 3456 queue_delayed_work(bond->wq, &bond->arp_work, 3457 msecs_to_jiffies(bond->params.arp_interval)); 3458 } 3459 3460 /* Called to inspect slaves for active-backup mode ARP monitor link state 3461 * changes. Sets proposed link state in slaves to specify what action 3462 * should take place for the slave. Returns 0 if no changes are found, >0 3463 * if changes to link states must be committed. 3464 * 3465 * Called with rcu_read_lock held. 3466 */ 3467 static int bond_ab_arp_inspect(struct bonding *bond) 3468 { 3469 unsigned long last_tx, last_rx; 3470 struct list_head *iter; 3471 struct slave *slave; 3472 int commit = 0; 3473 3474 bond_for_each_slave_rcu(bond, slave, iter) { 3475 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 3476 last_rx = slave_last_rx(bond, slave); 3477 3478 if (slave->link != BOND_LINK_UP) { 3479 if (bond_time_in_interval(bond, last_rx, 1)) { 3480 bond_propose_link_state(slave, BOND_LINK_UP); 3481 commit++; 3482 } else if (slave->link == BOND_LINK_BACK) { 3483 bond_propose_link_state(slave, BOND_LINK_FAIL); 3484 commit++; 3485 } 3486 continue; 3487 } 3488 3489 /* Give slaves 2*delta after being enslaved or made 3490 * active. This avoids bouncing, as the last receive 3491 * times need a full ARP monitor cycle to be updated. 3492 */ 3493 if (bond_time_in_interval(bond, slave->last_link_up, 2)) 3494 continue; 3495 3496 /* Backup slave is down if: 3497 * - No current_arp_slave AND 3498 * - more than (missed_max+1)*delta since last receive AND 3499 * - the bond has an IP address 3500 * 3501 * Note: a non-null current_arp_slave indicates 3502 * the curr_active_slave went down and we are 3503 * searching for a new one; under this condition 3504 * we only take the curr_active_slave down - this 3505 * gives each slave a chance to tx/rx traffic 3506 * before being taken out 3507 */ 3508 if (!bond_is_active_slave(slave) && 3509 !rcu_access_pointer(bond->current_arp_slave) && 3510 !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) { 3511 bond_propose_link_state(slave, BOND_LINK_DOWN); 3512 commit++; 3513 } 3514 3515 /* Active slave is down if: 3516 * - more than missed_max*delta since transmitting OR 3517 * - (more than missed_max*delta since receive AND 3518 * the bond has an IP address) 3519 */ 3520 last_tx = slave_last_tx(slave); 3521 if (bond_is_active_slave(slave) && 3522 (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || 3523 !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) { 3524 bond_propose_link_state(slave, BOND_LINK_DOWN); 3525 commit++; 3526 } 3527 } 3528 3529 return commit; 3530 } 3531 3532 /* Called to commit link state changes noted by inspection step of 3533 * active-backup mode ARP monitor. 3534 * 3535 * Called with RTNL hold. 3536 */ 3537 static void bond_ab_arp_commit(struct bonding *bond) 3538 { 3539 bool do_failover = false; 3540 struct list_head *iter; 3541 unsigned long last_tx; 3542 struct slave *slave; 3543 3544 bond_for_each_slave(bond, slave, iter) { 3545 switch (slave->link_new_state) { 3546 case BOND_LINK_NOCHANGE: 3547 continue; 3548 3549 case BOND_LINK_UP: 3550 last_tx = slave_last_tx(slave); 3551 if (rtnl_dereference(bond->curr_active_slave) != slave || 3552 (!rtnl_dereference(bond->curr_active_slave) && 3553 bond_time_in_interval(bond, last_tx, 1))) { 3554 struct slave *current_arp_slave; 3555 3556 current_arp_slave = rtnl_dereference(bond->current_arp_slave); 3557 bond_set_slave_link_state(slave, BOND_LINK_UP, 3558 BOND_SLAVE_NOTIFY_NOW); 3559 if (current_arp_slave) { 3560 bond_set_slave_inactive_flags( 3561 current_arp_slave, 3562 BOND_SLAVE_NOTIFY_NOW); 3563 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3564 } 3565 3566 slave_info(bond->dev, slave->dev, "link status definitely up\n"); 3567 3568 if (!rtnl_dereference(bond->curr_active_slave) || 3569 slave == rtnl_dereference(bond->primary_slave) || 3570 slave->prio > rtnl_dereference(bond->curr_active_slave)->prio) 3571 do_failover = true; 3572 3573 } 3574 3575 continue; 3576 3577 case BOND_LINK_DOWN: 3578 if (slave->link_failure_count < UINT_MAX) 3579 slave->link_failure_count++; 3580 3581 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 3582 BOND_SLAVE_NOTIFY_NOW); 3583 bond_set_slave_inactive_flags(slave, 3584 BOND_SLAVE_NOTIFY_NOW); 3585 3586 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); 3587 3588 if (slave == rtnl_dereference(bond->curr_active_slave)) { 3589 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3590 do_failover = true; 3591 } 3592 3593 continue; 3594 3595 case BOND_LINK_FAIL: 3596 bond_set_slave_link_state(slave, BOND_LINK_FAIL, 3597 BOND_SLAVE_NOTIFY_NOW); 3598 bond_set_slave_inactive_flags(slave, 3599 BOND_SLAVE_NOTIFY_NOW); 3600 3601 /* A slave has just been enslaved and has become 3602 * the current active slave. 3603 */ 3604 if (rtnl_dereference(bond->curr_active_slave)) 3605 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3606 continue; 3607 3608 default: 3609 slave_err(bond->dev, slave->dev, 3610 "impossible: link_new_state %d on slave\n", 3611 slave->link_new_state); 3612 continue; 3613 } 3614 } 3615 3616 if (do_failover) { 3617 block_netpoll_tx(); 3618 bond_select_active_slave(bond); 3619 unblock_netpoll_tx(); 3620 } 3621 3622 bond_set_carrier(bond); 3623 } 3624 3625 /* Send ARP probes for active-backup mode ARP monitor. 3626 * 3627 * Called with rcu_read_lock held. 3628 */ 3629 static bool bond_ab_arp_probe(struct bonding *bond) 3630 { 3631 struct slave *slave, *before = NULL, *new_slave = NULL, 3632 *curr_arp_slave = rcu_dereference(bond->current_arp_slave), 3633 *curr_active_slave = rcu_dereference(bond->curr_active_slave); 3634 struct list_head *iter; 3635 bool found = false; 3636 bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; 3637 3638 if (curr_arp_slave && curr_active_slave) 3639 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n", 3640 curr_arp_slave->dev->name, 3641 curr_active_slave->dev->name); 3642 3643 if (curr_active_slave) { 3644 bond_send_validate(bond, curr_active_slave); 3645 return should_notify_rtnl; 3646 } 3647 3648 /* if we don't have a curr_active_slave, search for the next available 3649 * backup slave from the current_arp_slave and make it the candidate 3650 * for becoming the curr_active_slave 3651 */ 3652 3653 if (!curr_arp_slave) { 3654 curr_arp_slave = bond_first_slave_rcu(bond); 3655 if (!curr_arp_slave) 3656 return should_notify_rtnl; 3657 } 3658 3659 bond_for_each_slave_rcu(bond, slave, iter) { 3660 if (!found && !before && bond_slave_is_up(slave)) 3661 before = slave; 3662 3663 if (found && !new_slave && bond_slave_is_up(slave)) 3664 new_slave = slave; 3665 /* if the link state is up at this point, we 3666 * mark it down - this can happen if we have 3667 * simultaneous link failures and 3668 * reselect_active_interface doesn't make this 3669 * one the current slave so it is still marked 3670 * up when it is actually down 3671 */ 3672 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { 3673 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 3674 BOND_SLAVE_NOTIFY_LATER); 3675 if (slave->link_failure_count < UINT_MAX) 3676 slave->link_failure_count++; 3677 3678 bond_set_slave_inactive_flags(slave, 3679 BOND_SLAVE_NOTIFY_LATER); 3680 3681 slave_info(bond->dev, slave->dev, "backup interface is now down\n"); 3682 } 3683 if (slave == curr_arp_slave) 3684 found = true; 3685 } 3686 3687 if (!new_slave && before) 3688 new_slave = before; 3689 3690 if (!new_slave) 3691 goto check_state; 3692 3693 bond_set_slave_link_state(new_slave, BOND_LINK_BACK, 3694 BOND_SLAVE_NOTIFY_LATER); 3695 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); 3696 bond_send_validate(bond, new_slave); 3697 new_slave->last_link_up = jiffies; 3698 rcu_assign_pointer(bond->current_arp_slave, new_slave); 3699 3700 check_state: 3701 bond_for_each_slave_rcu(bond, slave, iter) { 3702 if (slave->should_notify || slave->should_notify_link) { 3703 should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; 3704 break; 3705 } 3706 } 3707 return should_notify_rtnl; 3708 } 3709 3710 static void bond_activebackup_arp_mon(struct bonding *bond) 3711 { 3712 bool should_notify_peers = false; 3713 bool should_notify_rtnl = false; 3714 int delta_in_ticks; 3715 3716 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 3717 3718 if (!bond_has_slaves(bond)) 3719 goto re_arm; 3720 3721 rcu_read_lock(); 3722 3723 should_notify_peers = bond_should_notify_peers(bond); 3724 3725 if (bond_ab_arp_inspect(bond)) { 3726 rcu_read_unlock(); 3727 3728 /* Race avoidance with bond_close flush of workqueue */ 3729 if (!rtnl_trylock()) { 3730 delta_in_ticks = 1; 3731 should_notify_peers = false; 3732 goto re_arm; 3733 } 3734 3735 bond_ab_arp_commit(bond); 3736 3737 rtnl_unlock(); 3738 rcu_read_lock(); 3739 } 3740 3741 should_notify_rtnl = bond_ab_arp_probe(bond); 3742 rcu_read_unlock(); 3743 3744 re_arm: 3745 if (bond->params.arp_interval) 3746 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3747 3748 if (should_notify_peers || should_notify_rtnl) { 3749 if (!rtnl_trylock()) 3750 return; 3751 3752 if (should_notify_peers) { 3753 bond->send_peer_notif--; 3754 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 3755 bond->dev); 3756 } 3757 if (should_notify_rtnl) { 3758 bond_slave_state_notify(bond); 3759 bond_slave_link_notify(bond); 3760 } 3761 3762 rtnl_unlock(); 3763 } 3764 } 3765 3766 static void bond_arp_monitor(struct work_struct *work) 3767 { 3768 struct bonding *bond = container_of(work, struct bonding, 3769 arp_work.work); 3770 3771 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 3772 bond_activebackup_arp_mon(bond); 3773 else 3774 bond_loadbalance_arp_mon(bond); 3775 } 3776 3777 /*-------------------------- netdev event handling --------------------------*/ 3778 3779 /* Change device name */ 3780 static int bond_event_changename(struct bonding *bond) 3781 { 3782 bond_remove_proc_entry(bond); 3783 bond_create_proc_entry(bond); 3784 3785 bond_debug_reregister(bond); 3786 3787 return NOTIFY_DONE; 3788 } 3789 3790 static int bond_master_netdev_event(unsigned long event, 3791 struct net_device *bond_dev) 3792 { 3793 struct bonding *event_bond = netdev_priv(bond_dev); 3794 3795 netdev_dbg(bond_dev, "%s called\n", __func__); 3796 3797 switch (event) { 3798 case NETDEV_CHANGENAME: 3799 return bond_event_changename(event_bond); 3800 case NETDEV_UNREGISTER: 3801 bond_remove_proc_entry(event_bond); 3802 #ifdef CONFIG_XFRM_OFFLOAD 3803 xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); 3804 #endif /* CONFIG_XFRM_OFFLOAD */ 3805 break; 3806 case NETDEV_REGISTER: 3807 bond_create_proc_entry(event_bond); 3808 break; 3809 default: 3810 break; 3811 } 3812 3813 return NOTIFY_DONE; 3814 } 3815 3816 static int bond_slave_netdev_event(unsigned long event, 3817 struct net_device *slave_dev) 3818 { 3819 struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary; 3820 struct bonding *bond; 3821 struct net_device *bond_dev; 3822 3823 /* A netdev event can be generated while enslaving a device 3824 * before netdev_rx_handler_register is called in which case 3825 * slave will be NULL 3826 */ 3827 if (!slave) { 3828 netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__); 3829 return NOTIFY_DONE; 3830 } 3831 3832 bond_dev = slave->bond->dev; 3833 bond = slave->bond; 3834 primary = rtnl_dereference(bond->primary_slave); 3835 3836 slave_dbg(bond_dev, slave_dev, "%s called\n", __func__); 3837 3838 switch (event) { 3839 case NETDEV_UNREGISTER: 3840 if (bond_dev->type != ARPHRD_ETHER) 3841 bond_release_and_destroy(bond_dev, slave_dev); 3842 else 3843 __bond_release_one(bond_dev, slave_dev, false, true); 3844 break; 3845 case NETDEV_UP: 3846 case NETDEV_CHANGE: 3847 /* For 802.3ad mode only: 3848 * Getting invalid Speed/Duplex values here will put slave 3849 * in weird state. Mark it as link-fail if the link was 3850 * previously up or link-down if it hasn't yet come up, and 3851 * let link-monitoring (miimon) set it right when correct 3852 * speeds/duplex are available. 3853 */ 3854 if (bond_update_speed_duplex(slave) && 3855 BOND_MODE(bond) == BOND_MODE_8023AD) { 3856 if (slave->last_link_up) 3857 slave->link = BOND_LINK_FAIL; 3858 else 3859 slave->link = BOND_LINK_DOWN; 3860 } 3861 3862 if (BOND_MODE(bond) == BOND_MODE_8023AD) 3863 bond_3ad_adapter_speed_duplex_changed(slave); 3864 fallthrough; 3865 case NETDEV_DOWN: 3866 /* Refresh slave-array if applicable! 3867 * If the setup does not use miimon or arpmon (mode-specific!), 3868 * then these events will not cause the slave-array to be 3869 * refreshed. This will cause xmit to use a slave that is not 3870 * usable. Avoid such situation by refeshing the array at these 3871 * events. If these (miimon/arpmon) parameters are configured 3872 * then array gets refreshed twice and that should be fine! 3873 */ 3874 if (bond_mode_can_use_xmit_hash(bond)) 3875 bond_update_slave_arr(bond, NULL); 3876 break; 3877 case NETDEV_CHANGEMTU: 3878 /* TODO: Should slaves be allowed to 3879 * independently alter their MTU? For 3880 * an active-backup bond, slaves need 3881 * not be the same type of device, so 3882 * MTUs may vary. For other modes, 3883 * slaves arguably should have the 3884 * same MTUs. To do this, we'd need to 3885 * take over the slave's change_mtu 3886 * function for the duration of their 3887 * servitude. 3888 */ 3889 break; 3890 case NETDEV_CHANGENAME: 3891 /* we don't care if we don't have primary set */ 3892 if (!bond_uses_primary(bond) || 3893 !bond->params.primary[0]) 3894 break; 3895 3896 if (slave == primary) { 3897 /* slave's name changed - he's no longer primary */ 3898 RCU_INIT_POINTER(bond->primary_slave, NULL); 3899 } else if (!strcmp(slave_dev->name, bond->params.primary)) { 3900 /* we have a new primary slave */ 3901 rcu_assign_pointer(bond->primary_slave, slave); 3902 } else { /* we didn't change primary - exit */ 3903 break; 3904 } 3905 3906 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n", 3907 primary ? slave_dev->name : "none"); 3908 3909 block_netpoll_tx(); 3910 bond_select_active_slave(bond); 3911 unblock_netpoll_tx(); 3912 break; 3913 case NETDEV_FEAT_CHANGE: 3914 bond_compute_features(bond); 3915 break; 3916 case NETDEV_RESEND_IGMP: 3917 /* Propagate to master device */ 3918 call_netdevice_notifiers(event, slave->bond->dev); 3919 break; 3920 default: 3921 break; 3922 } 3923 3924 return NOTIFY_DONE; 3925 } 3926 3927 /* bond_netdev_event: handle netdev notifier chain events. 3928 * 3929 * This function receives events for the netdev chain. The caller (an 3930 * ioctl handler calling blocking_notifier_call_chain) holds the necessary 3931 * locks for us to safely manipulate the slave devices (RTNL lock, 3932 * dev_probe_lock). 3933 */ 3934 static int bond_netdev_event(struct notifier_block *this, 3935 unsigned long event, void *ptr) 3936 { 3937 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 3938 3939 netdev_dbg(event_dev, "%s received %s\n", 3940 __func__, netdev_cmd_to_name(event)); 3941 3942 if (!(event_dev->priv_flags & IFF_BONDING)) 3943 return NOTIFY_DONE; 3944 3945 if (event_dev->flags & IFF_MASTER) { 3946 int ret; 3947 3948 ret = bond_master_netdev_event(event, event_dev); 3949 if (ret != NOTIFY_DONE) 3950 return ret; 3951 } 3952 3953 if (event_dev->flags & IFF_SLAVE) 3954 return bond_slave_netdev_event(event, event_dev); 3955 3956 return NOTIFY_DONE; 3957 } 3958 3959 static struct notifier_block bond_netdev_notifier = { 3960 .notifier_call = bond_netdev_event, 3961 }; 3962 3963 /*---------------------------- Hashing Policies -----------------------------*/ 3964 3965 /* Helper to access data in a packet, with or without a backing skb. 3966 * If skb is given the data is linearized if necessary via pskb_may_pull. 3967 */ 3968 static inline const void *bond_pull_data(struct sk_buff *skb, 3969 const void *data, int hlen, int n) 3970 { 3971 if (likely(n <= hlen)) 3972 return data; 3973 else if (skb && likely(pskb_may_pull(skb, n))) 3974 return skb->head; 3975 3976 return NULL; 3977 } 3978 3979 /* L2 hash helper */ 3980 static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) 3981 { 3982 struct ethhdr *ep; 3983 3984 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); 3985 if (!data) 3986 return 0; 3987 3988 ep = (struct ethhdr *)(data + mhoff); 3989 return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto); 3990 } 3991 3992 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data, 3993 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34) 3994 { 3995 const struct ipv6hdr *iph6; 3996 const struct iphdr *iph; 3997 3998 if (l2_proto == htons(ETH_P_IP)) { 3999 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph)); 4000 if (!data) 4001 return false; 4002 4003 iph = (const struct iphdr *)(data + *nhoff); 4004 iph_to_flow_copy_v4addrs(fk, iph); 4005 *nhoff += iph->ihl << 2; 4006 if (!ip_is_fragment(iph)) 4007 *ip_proto = iph->protocol; 4008 } else if (l2_proto == htons(ETH_P_IPV6)) { 4009 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6)); 4010 if (!data) 4011 return false; 4012 4013 iph6 = (const struct ipv6hdr *)(data + *nhoff); 4014 iph_to_flow_copy_v6addrs(fk, iph6); 4015 *nhoff += sizeof(*iph6); 4016 *ip_proto = iph6->nexthdr; 4017 } else { 4018 return false; 4019 } 4020 4021 if (l34 && *ip_proto >= 0) 4022 fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen); 4023 4024 return true; 4025 } 4026 4027 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) 4028 { 4029 u32 srcmac_vendor = 0, srcmac_dev = 0; 4030 struct ethhdr *mac_hdr; 4031 u16 vlan = 0; 4032 int i; 4033 4034 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); 4035 if (!data) 4036 return 0; 4037 mac_hdr = (struct ethhdr *)(data + mhoff); 4038 4039 for (i = 0; i < 3; i++) 4040 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i]; 4041 4042 for (i = 3; i < ETH_ALEN; i++) 4043 srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i]; 4044 4045 if (skb && skb_vlan_tag_present(skb)) 4046 vlan = skb_vlan_tag_get(skb); 4047 4048 return vlan ^ srcmac_vendor ^ srcmac_dev; 4049 } 4050 4051 /* Extract the appropriate headers based on bond's xmit policy */ 4052 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data, 4053 __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk) 4054 { 4055 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; 4056 int ip_proto = -1; 4057 4058 switch (bond->params.xmit_policy) { 4059 case BOND_XMIT_POLICY_ENCAP23: 4060 case BOND_XMIT_POLICY_ENCAP34: 4061 memset(fk, 0, sizeof(*fk)); 4062 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding, 4063 fk, data, l2_proto, nhoff, hlen, 0); 4064 default: 4065 break; 4066 } 4067 4068 fk->ports.ports = 0; 4069 memset(&fk->icmp, 0, sizeof(fk->icmp)); 4070 if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34)) 4071 return false; 4072 4073 /* ICMP error packets contains at least 8 bytes of the header 4074 * of the packet which generated the error. Use this information 4075 * to correlate ICMP error packets within the same flow which 4076 * generated the error. 4077 */ 4078 if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) { 4079 skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen); 4080 if (ip_proto == IPPROTO_ICMP) { 4081 if (!icmp_is_err(fk->icmp.type)) 4082 return true; 4083 4084 nhoff += sizeof(struct icmphdr); 4085 } else if (ip_proto == IPPROTO_ICMPV6) { 4086 if (!icmpv6_is_err(fk->icmp.type)) 4087 return true; 4088 4089 nhoff += sizeof(struct icmp6hdr); 4090 } 4091 return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34); 4092 } 4093 4094 return true; 4095 } 4096 4097 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy) 4098 { 4099 hash ^= (__force u32)flow_get_u32_dst(flow) ^ 4100 (__force u32)flow_get_u32_src(flow); 4101 hash ^= (hash >> 16); 4102 hash ^= (hash >> 8); 4103 4104 /* discard lowest hash bit to deal with the common even ports pattern */ 4105 if (xmit_policy == BOND_XMIT_POLICY_LAYER34 || 4106 xmit_policy == BOND_XMIT_POLICY_ENCAP34) 4107 return hash >> 1; 4108 4109 return hash; 4110 } 4111 4112 /* Generate hash based on xmit policy. If @skb is given it is used to linearize 4113 * the data as required, but this function can be used without it if the data is 4114 * known to be linear (e.g. with xdp_buff). 4115 */ 4116 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data, 4117 __be16 l2_proto, int mhoff, int nhoff, int hlen) 4118 { 4119 struct flow_keys flow; 4120 u32 hash; 4121 4122 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) 4123 return bond_vlan_srcmac_hash(skb, data, mhoff, hlen); 4124 4125 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || 4126 !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow)) 4127 return bond_eth_hash(skb, data, mhoff, hlen); 4128 4129 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || 4130 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { 4131 hash = bond_eth_hash(skb, data, mhoff, hlen); 4132 } else { 4133 if (flow.icmp.id) 4134 memcpy(&hash, &flow.icmp, sizeof(hash)); 4135 else 4136 memcpy(&hash, &flow.ports.ports, sizeof(hash)); 4137 } 4138 4139 return bond_ip_hash(hash, &flow, bond->params.xmit_policy); 4140 } 4141 4142 /** 4143 * bond_xmit_hash - generate a hash value based on the xmit policy 4144 * @bond: bonding device 4145 * @skb: buffer to use for headers 4146 * 4147 * This function will extract the necessary headers from the skb buffer and use 4148 * them to generate a hash based on the xmit_policy set in the bonding device 4149 */ 4150 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) 4151 { 4152 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && 4153 skb->l4_hash) 4154 return skb->hash; 4155 4156 return __bond_xmit_hash(bond, skb, skb->data, skb->protocol, 4157 skb_mac_offset(skb), skb_network_offset(skb), 4158 skb_headlen(skb)); 4159 } 4160 4161 /** 4162 * bond_xmit_hash_xdp - generate a hash value based on the xmit policy 4163 * @bond: bonding device 4164 * @xdp: buffer to use for headers 4165 * 4166 * The XDP variant of bond_xmit_hash. 4167 */ 4168 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp) 4169 { 4170 struct ethhdr *eth; 4171 4172 if (xdp->data + sizeof(struct ethhdr) > xdp->data_end) 4173 return 0; 4174 4175 eth = (struct ethhdr *)xdp->data; 4176 4177 return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0, 4178 sizeof(struct ethhdr), xdp->data_end - xdp->data); 4179 } 4180 4181 /*-------------------------- Device entry points ----------------------------*/ 4182 4183 void bond_work_init_all(struct bonding *bond) 4184 { 4185 INIT_DELAYED_WORK(&bond->mcast_work, 4186 bond_resend_igmp_join_requests_delayed); 4187 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 4188 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); 4189 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor); 4190 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); 4191 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); 4192 } 4193 4194 static void bond_work_cancel_all(struct bonding *bond) 4195 { 4196 cancel_delayed_work_sync(&bond->mii_work); 4197 cancel_delayed_work_sync(&bond->arp_work); 4198 cancel_delayed_work_sync(&bond->alb_work); 4199 cancel_delayed_work_sync(&bond->ad_work); 4200 cancel_delayed_work_sync(&bond->mcast_work); 4201 cancel_delayed_work_sync(&bond->slave_arr_work); 4202 } 4203 4204 static int bond_open(struct net_device *bond_dev) 4205 { 4206 struct bonding *bond = netdev_priv(bond_dev); 4207 struct list_head *iter; 4208 struct slave *slave; 4209 4210 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { 4211 bond->rr_tx_counter = alloc_percpu(u32); 4212 if (!bond->rr_tx_counter) 4213 return -ENOMEM; 4214 } 4215 4216 /* reset slave->backup and slave->inactive */ 4217 if (bond_has_slaves(bond)) { 4218 bond_for_each_slave(bond, slave, iter) { 4219 if (bond_uses_primary(bond) && 4220 slave != rcu_access_pointer(bond->curr_active_slave)) { 4221 bond_set_slave_inactive_flags(slave, 4222 BOND_SLAVE_NOTIFY_NOW); 4223 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) { 4224 bond_set_slave_active_flags(slave, 4225 BOND_SLAVE_NOTIFY_NOW); 4226 } 4227 } 4228 } 4229 4230 if (bond_is_lb(bond)) { 4231 /* bond_alb_initialize must be called before the timer 4232 * is started. 4233 */ 4234 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) 4235 return -ENOMEM; 4236 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) 4237 queue_delayed_work(bond->wq, &bond->alb_work, 0); 4238 } 4239 4240 if (bond->params.miimon) /* link check interval, in milliseconds. */ 4241 queue_delayed_work(bond->wq, &bond->mii_work, 0); 4242 4243 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 4244 queue_delayed_work(bond->wq, &bond->arp_work, 0); 4245 bond->recv_probe = bond_rcv_validate; 4246 } 4247 4248 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 4249 queue_delayed_work(bond->wq, &bond->ad_work, 0); 4250 /* register to receive LACPDUs */ 4251 bond->recv_probe = bond_3ad_lacpdu_recv; 4252 bond_3ad_initiate_agg_selection(bond, 1); 4253 4254 bond_for_each_slave(bond, slave, iter) 4255 dev_mc_add(slave->dev, lacpdu_mcast_addr); 4256 } 4257 4258 if (bond_mode_can_use_xmit_hash(bond)) 4259 bond_update_slave_arr(bond, NULL); 4260 4261 return 0; 4262 } 4263 4264 static int bond_close(struct net_device *bond_dev) 4265 { 4266 struct bonding *bond = netdev_priv(bond_dev); 4267 struct slave *slave; 4268 4269 bond_work_cancel_all(bond); 4270 bond->send_peer_notif = 0; 4271 if (bond_is_lb(bond)) 4272 bond_alb_deinitialize(bond); 4273 bond->recv_probe = NULL; 4274 4275 if (bond_uses_primary(bond)) { 4276 rcu_read_lock(); 4277 slave = rcu_dereference(bond->curr_active_slave); 4278 if (slave) 4279 bond_hw_addr_flush(bond_dev, slave->dev); 4280 rcu_read_unlock(); 4281 } else { 4282 struct list_head *iter; 4283 4284 bond_for_each_slave(bond, slave, iter) 4285 bond_hw_addr_flush(bond_dev, slave->dev); 4286 } 4287 4288 return 0; 4289 } 4290 4291 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but 4292 * that some drivers can provide 32bit values only. 4293 */ 4294 static void bond_fold_stats(struct rtnl_link_stats64 *_res, 4295 const struct rtnl_link_stats64 *_new, 4296 const struct rtnl_link_stats64 *_old) 4297 { 4298 const u64 *new = (const u64 *)_new; 4299 const u64 *old = (const u64 *)_old; 4300 u64 *res = (u64 *)_res; 4301 int i; 4302 4303 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { 4304 u64 nv = new[i]; 4305 u64 ov = old[i]; 4306 s64 delta = nv - ov; 4307 4308 /* detects if this particular field is 32bit only */ 4309 if (((nv | ov) >> 32) == 0) 4310 delta = (s64)(s32)((u32)nv - (u32)ov); 4311 4312 /* filter anomalies, some drivers reset their stats 4313 * at down/up events. 4314 */ 4315 if (delta > 0) 4316 res[i] += delta; 4317 } 4318 } 4319 4320 #ifdef CONFIG_LOCKDEP 4321 static int bond_get_lowest_level_rcu(struct net_device *dev) 4322 { 4323 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 4324 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 4325 int cur = 0, max = 0; 4326 4327 now = dev; 4328 iter = &dev->adj_list.lower; 4329 4330 while (1) { 4331 next = NULL; 4332 while (1) { 4333 ldev = netdev_next_lower_dev_rcu(now, &iter); 4334 if (!ldev) 4335 break; 4336 4337 next = ldev; 4338 niter = &ldev->adj_list.lower; 4339 dev_stack[cur] = now; 4340 iter_stack[cur++] = iter; 4341 if (max <= cur) 4342 max = cur; 4343 break; 4344 } 4345 4346 if (!next) { 4347 if (!cur) 4348 return max; 4349 next = dev_stack[--cur]; 4350 niter = iter_stack[cur]; 4351 } 4352 4353 now = next; 4354 iter = niter; 4355 } 4356 4357 return max; 4358 } 4359 #endif 4360 4361 static void bond_get_stats(struct net_device *bond_dev, 4362 struct rtnl_link_stats64 *stats) 4363 { 4364 struct bonding *bond = netdev_priv(bond_dev); 4365 struct rtnl_link_stats64 temp; 4366 struct list_head *iter; 4367 struct slave *slave; 4368 int nest_level = 0; 4369 4370 4371 rcu_read_lock(); 4372 #ifdef CONFIG_LOCKDEP 4373 nest_level = bond_get_lowest_level_rcu(bond_dev); 4374 #endif 4375 4376 spin_lock_nested(&bond->stats_lock, nest_level); 4377 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 4378 4379 bond_for_each_slave_rcu(bond, slave, iter) { 4380 const struct rtnl_link_stats64 *new = 4381 dev_get_stats(slave->dev, &temp); 4382 4383 bond_fold_stats(stats, new, &slave->slave_stats); 4384 4385 /* save off the slave stats for the next run */ 4386 memcpy(&slave->slave_stats, new, sizeof(*new)); 4387 } 4388 4389 memcpy(&bond->bond_stats, stats, sizeof(*stats)); 4390 spin_unlock(&bond->stats_lock); 4391 rcu_read_unlock(); 4392 } 4393 4394 static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) 4395 { 4396 struct bonding *bond = netdev_priv(bond_dev); 4397 struct mii_ioctl_data *mii = NULL; 4398 const struct net_device_ops *ops; 4399 struct net_device *real_dev; 4400 struct hwtstamp_config cfg; 4401 struct ifreq ifrr; 4402 int res = 0; 4403 4404 netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd); 4405 4406 switch (cmd) { 4407 case SIOCGMIIPHY: 4408 mii = if_mii(ifr); 4409 if (!mii) 4410 return -EINVAL; 4411 4412 mii->phy_id = 0; 4413 fallthrough; 4414 case SIOCGMIIREG: 4415 /* We do this again just in case we were called by SIOCGMIIREG 4416 * instead of SIOCGMIIPHY. 4417 */ 4418 mii = if_mii(ifr); 4419 if (!mii) 4420 return -EINVAL; 4421 4422 if (mii->reg_num == 1) { 4423 mii->val_out = 0; 4424 if (netif_carrier_ok(bond->dev)) 4425 mii->val_out = BMSR_LSTATUS; 4426 } 4427 4428 break; 4429 case SIOCSHWTSTAMP: 4430 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 4431 return -EFAULT; 4432 4433 if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX)) 4434 return -EOPNOTSUPP; 4435 4436 fallthrough; 4437 case SIOCGHWTSTAMP: 4438 real_dev = bond_option_active_slave_get_rcu(bond); 4439 if (!real_dev) 4440 return -EOPNOTSUPP; 4441 4442 strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); 4443 ifrr.ifr_ifru = ifr->ifr_ifru; 4444 4445 ops = real_dev->netdev_ops; 4446 if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) { 4447 res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); 4448 if (res) 4449 return res; 4450 4451 ifr->ifr_ifru = ifrr.ifr_ifru; 4452 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 4453 return -EFAULT; 4454 4455 /* Set the BOND_PHC_INDEX flag to notify user space */ 4456 cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; 4457 4458 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? 4459 -EFAULT : 0; 4460 } 4461 fallthrough; 4462 default: 4463 res = -EOPNOTSUPP; 4464 } 4465 4466 return res; 4467 } 4468 4469 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) 4470 { 4471 struct bonding *bond = netdev_priv(bond_dev); 4472 struct net_device *slave_dev = NULL; 4473 struct ifbond k_binfo; 4474 struct ifbond __user *u_binfo = NULL; 4475 struct ifslave k_sinfo; 4476 struct ifslave __user *u_sinfo = NULL; 4477 struct bond_opt_value newval; 4478 struct net *net; 4479 int res = 0; 4480 4481 netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd); 4482 4483 switch (cmd) { 4484 case SIOCBONDINFOQUERY: 4485 u_binfo = (struct ifbond __user *)ifr->ifr_data; 4486 4487 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) 4488 return -EFAULT; 4489 4490 bond_info_query(bond_dev, &k_binfo); 4491 if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) 4492 return -EFAULT; 4493 4494 return 0; 4495 case SIOCBONDSLAVEINFOQUERY: 4496 u_sinfo = (struct ifslave __user *)ifr->ifr_data; 4497 4498 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave))) 4499 return -EFAULT; 4500 4501 res = bond_slave_info_query(bond_dev, &k_sinfo); 4502 if (res == 0 && 4503 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave))) 4504 return -EFAULT; 4505 4506 return res; 4507 default: 4508 break; 4509 } 4510 4511 net = dev_net(bond_dev); 4512 4513 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4514 return -EPERM; 4515 4516 slave_dev = __dev_get_by_name(net, ifr->ifr_slave); 4517 4518 slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev); 4519 4520 if (!slave_dev) 4521 return -ENODEV; 4522 4523 switch (cmd) { 4524 case SIOCBONDENSLAVE: 4525 res = bond_enslave(bond_dev, slave_dev, NULL); 4526 break; 4527 case SIOCBONDRELEASE: 4528 res = bond_release(bond_dev, slave_dev); 4529 break; 4530 case SIOCBONDSETHWADDR: 4531 res = bond_set_dev_addr(bond_dev, slave_dev); 4532 break; 4533 case SIOCBONDCHANGEACTIVE: 4534 bond_opt_initstr(&newval, slave_dev->name); 4535 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, 4536 &newval); 4537 break; 4538 default: 4539 res = -EOPNOTSUPP; 4540 } 4541 4542 return res; 4543 } 4544 4545 static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr, 4546 void __user *data, int cmd) 4547 { 4548 struct ifreq ifrdata = { .ifr_data = data }; 4549 4550 switch (cmd) { 4551 case BOND_INFO_QUERY_OLD: 4552 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY); 4553 case BOND_SLAVE_INFO_QUERY_OLD: 4554 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY); 4555 case BOND_ENSLAVE_OLD: 4556 return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE); 4557 case BOND_RELEASE_OLD: 4558 return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE); 4559 case BOND_SETHWADDR_OLD: 4560 return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR); 4561 case BOND_CHANGE_ACTIVE_OLD: 4562 return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE); 4563 } 4564 4565 return -EOPNOTSUPP; 4566 } 4567 4568 static void bond_change_rx_flags(struct net_device *bond_dev, int change) 4569 { 4570 struct bonding *bond = netdev_priv(bond_dev); 4571 4572 if (change & IFF_PROMISC) 4573 bond_set_promiscuity(bond, 4574 bond_dev->flags & IFF_PROMISC ? 1 : -1); 4575 4576 if (change & IFF_ALLMULTI) 4577 bond_set_allmulti(bond, 4578 bond_dev->flags & IFF_ALLMULTI ? 1 : -1); 4579 } 4580 4581 static void bond_set_rx_mode(struct net_device *bond_dev) 4582 { 4583 struct bonding *bond = netdev_priv(bond_dev); 4584 struct list_head *iter; 4585 struct slave *slave; 4586 4587 rcu_read_lock(); 4588 if (bond_uses_primary(bond)) { 4589 slave = rcu_dereference(bond->curr_active_slave); 4590 if (slave) { 4591 dev_uc_sync(slave->dev, bond_dev); 4592 dev_mc_sync(slave->dev, bond_dev); 4593 } 4594 } else { 4595 bond_for_each_slave_rcu(bond, slave, iter) { 4596 dev_uc_sync_multiple(slave->dev, bond_dev); 4597 dev_mc_sync_multiple(slave->dev, bond_dev); 4598 } 4599 } 4600 rcu_read_unlock(); 4601 } 4602 4603 static int bond_neigh_init(struct neighbour *n) 4604 { 4605 struct bonding *bond = netdev_priv(n->dev); 4606 const struct net_device_ops *slave_ops; 4607 struct neigh_parms parms; 4608 struct slave *slave; 4609 int ret = 0; 4610 4611 rcu_read_lock(); 4612 slave = bond_first_slave_rcu(bond); 4613 if (!slave) 4614 goto out; 4615 slave_ops = slave->dev->netdev_ops; 4616 if (!slave_ops->ndo_neigh_setup) 4617 goto out; 4618 4619 /* TODO: find another way [1] to implement this. 4620 * Passing a zeroed structure is fragile, 4621 * but at least we do not pass garbage. 4622 * 4623 * [1] One way would be that ndo_neigh_setup() never touch 4624 * struct neigh_parms, but propagate the new neigh_setup() 4625 * back to ___neigh_create() / neigh_parms_alloc() 4626 */ 4627 memset(&parms, 0, sizeof(parms)); 4628 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); 4629 4630 if (ret) 4631 goto out; 4632 4633 if (parms.neigh_setup) 4634 ret = parms.neigh_setup(n); 4635 out: 4636 rcu_read_unlock(); 4637 return ret; 4638 } 4639 4640 /* The bonding ndo_neigh_setup is called at init time beofre any 4641 * slave exists. So we must declare proxy setup function which will 4642 * be used at run time to resolve the actual slave neigh param setup. 4643 * 4644 * It's also called by master devices (such as vlans) to setup their 4645 * underlying devices. In that case - do nothing, we're already set up from 4646 * our init. 4647 */ 4648 static int bond_neigh_setup(struct net_device *dev, 4649 struct neigh_parms *parms) 4650 { 4651 /* modify only our neigh_parms */ 4652 if (parms->dev == dev) 4653 parms->neigh_setup = bond_neigh_init; 4654 4655 return 0; 4656 } 4657 4658 /* Change the MTU of all of a master's slaves to match the master */ 4659 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) 4660 { 4661 struct bonding *bond = netdev_priv(bond_dev); 4662 struct slave *slave, *rollback_slave; 4663 struct list_head *iter; 4664 int res = 0; 4665 4666 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu); 4667 4668 bond_for_each_slave(bond, slave, iter) { 4669 slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n", 4670 slave, slave->dev->netdev_ops->ndo_change_mtu); 4671 4672 res = dev_set_mtu(slave->dev, new_mtu); 4673 4674 if (res) { 4675 /* If we failed to set the slave's mtu to the new value 4676 * we must abort the operation even in ACTIVE_BACKUP 4677 * mode, because if we allow the backup slaves to have 4678 * different mtu values than the active slave we'll 4679 * need to change their mtu when doing a failover. That 4680 * means changing their mtu from timer context, which 4681 * is probably not a good idea. 4682 */ 4683 slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n", 4684 res, new_mtu); 4685 goto unwind; 4686 } 4687 } 4688 4689 bond_dev->mtu = new_mtu; 4690 4691 return 0; 4692 4693 unwind: 4694 /* unwind from head to the slave that failed */ 4695 bond_for_each_slave(bond, rollback_slave, iter) { 4696 int tmp_res; 4697 4698 if (rollback_slave == slave) 4699 break; 4700 4701 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu); 4702 if (tmp_res) 4703 slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n", 4704 tmp_res); 4705 } 4706 4707 return res; 4708 } 4709 4710 /* Change HW address 4711 * 4712 * Note that many devices must be down to change the HW address, and 4713 * downing the master releases all slaves. We can make bonds full of 4714 * bonding devices to test this, however. 4715 */ 4716 static int bond_set_mac_address(struct net_device *bond_dev, void *addr) 4717 { 4718 struct bonding *bond = netdev_priv(bond_dev); 4719 struct slave *slave, *rollback_slave; 4720 struct sockaddr_storage *ss = addr, tmp_ss; 4721 struct list_head *iter; 4722 int res = 0; 4723 4724 if (BOND_MODE(bond) == BOND_MODE_ALB) 4725 return bond_alb_set_mac_address(bond_dev, addr); 4726 4727 4728 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond); 4729 4730 /* If fail_over_mac is enabled, do nothing and return success. 4731 * Returning an error causes ifenslave to fail. 4732 */ 4733 if (bond->params.fail_over_mac && 4734 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 4735 return 0; 4736 4737 if (!is_valid_ether_addr(ss->__data)) 4738 return -EADDRNOTAVAIL; 4739 4740 bond_for_each_slave(bond, slave, iter) { 4741 slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n", 4742 __func__, slave); 4743 res = dev_set_mac_address(slave->dev, addr, NULL); 4744 if (res) { 4745 /* TODO: consider downing the slave 4746 * and retry ? 4747 * User should expect communications 4748 * breakage anyway until ARP finish 4749 * updating, so... 4750 */ 4751 slave_dbg(bond_dev, slave->dev, "%s: err %d\n", 4752 __func__, res); 4753 goto unwind; 4754 } 4755 } 4756 4757 /* success */ 4758 dev_addr_set(bond_dev, ss->__data); 4759 return 0; 4760 4761 unwind: 4762 memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len); 4763 tmp_ss.ss_family = bond_dev->type; 4764 4765 /* unwind from head to the slave that failed */ 4766 bond_for_each_slave(bond, rollback_slave, iter) { 4767 int tmp_res; 4768 4769 if (rollback_slave == slave) 4770 break; 4771 4772 tmp_res = dev_set_mac_address(rollback_slave->dev, 4773 (struct sockaddr *)&tmp_ss, NULL); 4774 if (tmp_res) { 4775 slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n", 4776 __func__, tmp_res); 4777 } 4778 } 4779 4780 return res; 4781 } 4782 4783 /** 4784 * bond_get_slave_by_id - get xmit slave with slave_id 4785 * @bond: bonding device that is transmitting 4786 * @slave_id: slave id up to slave_cnt-1 through which to transmit 4787 * 4788 * This function tries to get slave with slave_id but in case 4789 * it fails, it tries to find the first available slave for transmission. 4790 */ 4791 static struct slave *bond_get_slave_by_id(struct bonding *bond, 4792 int slave_id) 4793 { 4794 struct list_head *iter; 4795 struct slave *slave; 4796 int i = slave_id; 4797 4798 /* Here we start from the slave with slave_id */ 4799 bond_for_each_slave_rcu(bond, slave, iter) { 4800 if (--i < 0) { 4801 if (bond_slave_can_tx(slave)) 4802 return slave; 4803 } 4804 } 4805 4806 /* Here we start from the first slave up to slave_id */ 4807 i = slave_id; 4808 bond_for_each_slave_rcu(bond, slave, iter) { 4809 if (--i < 0) 4810 break; 4811 if (bond_slave_can_tx(slave)) 4812 return slave; 4813 } 4814 /* no slave that can tx has been found */ 4815 return NULL; 4816 } 4817 4818 /** 4819 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave 4820 * @bond: bonding device to use 4821 * 4822 * Based on the value of the bonding device's packets_per_slave parameter 4823 * this function generates a slave id, which is usually used as the next 4824 * slave to transmit through. 4825 */ 4826 static u32 bond_rr_gen_slave_id(struct bonding *bond) 4827 { 4828 u32 slave_id; 4829 struct reciprocal_value reciprocal_packets_per_slave; 4830 int packets_per_slave = bond->params.packets_per_slave; 4831 4832 switch (packets_per_slave) { 4833 case 0: 4834 slave_id = get_random_u32(); 4835 break; 4836 case 1: 4837 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); 4838 break; 4839 default: 4840 reciprocal_packets_per_slave = 4841 bond->params.reciprocal_packets_per_slave; 4842 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); 4843 slave_id = reciprocal_divide(slave_id, 4844 reciprocal_packets_per_slave); 4845 break; 4846 } 4847 4848 return slave_id; 4849 } 4850 4851 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond, 4852 struct sk_buff *skb) 4853 { 4854 struct slave *slave; 4855 int slave_cnt; 4856 u32 slave_id; 4857 4858 /* Start with the curr_active_slave that joined the bond as the 4859 * default for sending IGMP traffic. For failover purposes one 4860 * needs to maintain some consistency for the interface that will 4861 * send the join/membership reports. The curr_active_slave found 4862 * will send all of this type of traffic. 4863 */ 4864 if (skb->protocol == htons(ETH_P_IP)) { 4865 int noff = skb_network_offset(skb); 4866 struct iphdr *iph; 4867 4868 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) 4869 goto non_igmp; 4870 4871 iph = ip_hdr(skb); 4872 if (iph->protocol == IPPROTO_IGMP) { 4873 slave = rcu_dereference(bond->curr_active_slave); 4874 if (slave) 4875 return slave; 4876 return bond_get_slave_by_id(bond, 0); 4877 } 4878 } 4879 4880 non_igmp: 4881 slave_cnt = READ_ONCE(bond->slave_cnt); 4882 if (likely(slave_cnt)) { 4883 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; 4884 return bond_get_slave_by_id(bond, slave_id); 4885 } 4886 return NULL; 4887 } 4888 4889 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond, 4890 struct xdp_buff *xdp) 4891 { 4892 struct slave *slave; 4893 int slave_cnt; 4894 u32 slave_id; 4895 const struct ethhdr *eth; 4896 void *data = xdp->data; 4897 4898 if (data + sizeof(struct ethhdr) > xdp->data_end) 4899 goto non_igmp; 4900 4901 eth = (struct ethhdr *)data; 4902 data += sizeof(struct ethhdr); 4903 4904 /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */ 4905 if (eth->h_proto == htons(ETH_P_IP)) { 4906 const struct iphdr *iph; 4907 4908 if (data + sizeof(struct iphdr) > xdp->data_end) 4909 goto non_igmp; 4910 4911 iph = (struct iphdr *)data; 4912 4913 if (iph->protocol == IPPROTO_IGMP) { 4914 slave = rcu_dereference(bond->curr_active_slave); 4915 if (slave) 4916 return slave; 4917 return bond_get_slave_by_id(bond, 0); 4918 } 4919 } 4920 4921 non_igmp: 4922 slave_cnt = READ_ONCE(bond->slave_cnt); 4923 if (likely(slave_cnt)) { 4924 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; 4925 return bond_get_slave_by_id(bond, slave_id); 4926 } 4927 return NULL; 4928 } 4929 4930 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, 4931 struct net_device *bond_dev) 4932 { 4933 struct bonding *bond = netdev_priv(bond_dev); 4934 struct slave *slave; 4935 4936 slave = bond_xmit_roundrobin_slave_get(bond, skb); 4937 if (likely(slave)) 4938 return bond_dev_queue_xmit(bond, skb, slave->dev); 4939 4940 return bond_tx_drop(bond_dev, skb); 4941 } 4942 4943 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond) 4944 { 4945 return rcu_dereference(bond->curr_active_slave); 4946 } 4947 4948 /* In active-backup mode, we know that bond->curr_active_slave is always valid if 4949 * the bond has a usable interface. 4950 */ 4951 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, 4952 struct net_device *bond_dev) 4953 { 4954 struct bonding *bond = netdev_priv(bond_dev); 4955 struct slave *slave; 4956 4957 slave = bond_xmit_activebackup_slave_get(bond); 4958 if (slave) 4959 return bond_dev_queue_xmit(bond, skb, slave->dev); 4960 4961 return bond_tx_drop(bond_dev, skb); 4962 } 4963 4964 /* Use this to update slave_array when (a) it's not appropriate to update 4965 * slave_array right away (note that update_slave_array() may sleep) 4966 * and / or (b) RTNL is not held. 4967 */ 4968 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay) 4969 { 4970 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay); 4971 } 4972 4973 /* Slave array work handler. Holds only RTNL */ 4974 static void bond_slave_arr_handler(struct work_struct *work) 4975 { 4976 struct bonding *bond = container_of(work, struct bonding, 4977 slave_arr_work.work); 4978 int ret; 4979 4980 if (!rtnl_trylock()) 4981 goto err; 4982 4983 ret = bond_update_slave_arr(bond, NULL); 4984 rtnl_unlock(); 4985 if (ret) { 4986 pr_warn_ratelimited("Failed to update slave array from WT\n"); 4987 goto err; 4988 } 4989 return; 4990 4991 err: 4992 bond_slave_arr_work_rearm(bond, 1); 4993 } 4994 4995 static void bond_skip_slave(struct bond_up_slave *slaves, 4996 struct slave *skipslave) 4997 { 4998 int idx; 4999 5000 /* Rare situation where caller has asked to skip a specific 5001 * slave but allocation failed (most likely!). BTW this is 5002 * only possible when the call is initiated from 5003 * __bond_release_one(). In this situation; overwrite the 5004 * skipslave entry in the array with the last entry from the 5005 * array to avoid a situation where the xmit path may choose 5006 * this to-be-skipped slave to send a packet out. 5007 */ 5008 for (idx = 0; slaves && idx < slaves->count; idx++) { 5009 if (skipslave == slaves->arr[idx]) { 5010 slaves->arr[idx] = 5011 slaves->arr[slaves->count - 1]; 5012 slaves->count--; 5013 break; 5014 } 5015 } 5016 } 5017 5018 static void bond_set_slave_arr(struct bonding *bond, 5019 struct bond_up_slave *usable_slaves, 5020 struct bond_up_slave *all_slaves) 5021 { 5022 struct bond_up_slave *usable, *all; 5023 5024 usable = rtnl_dereference(bond->usable_slaves); 5025 rcu_assign_pointer(bond->usable_slaves, usable_slaves); 5026 kfree_rcu(usable, rcu); 5027 5028 all = rtnl_dereference(bond->all_slaves); 5029 rcu_assign_pointer(bond->all_slaves, all_slaves); 5030 kfree_rcu(all, rcu); 5031 } 5032 5033 static void bond_reset_slave_arr(struct bonding *bond) 5034 { 5035 struct bond_up_slave *usable, *all; 5036 5037 usable = rtnl_dereference(bond->usable_slaves); 5038 if (usable) { 5039 RCU_INIT_POINTER(bond->usable_slaves, NULL); 5040 kfree_rcu(usable, rcu); 5041 } 5042 5043 all = rtnl_dereference(bond->all_slaves); 5044 if (all) { 5045 RCU_INIT_POINTER(bond->all_slaves, NULL); 5046 kfree_rcu(all, rcu); 5047 } 5048 } 5049 5050 /* Build the usable slaves array in control path for modes that use xmit-hash 5051 * to determine the slave interface - 5052 * (a) BOND_MODE_8023AD 5053 * (b) BOND_MODE_XOR 5054 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0 5055 * 5056 * The caller is expected to hold RTNL only and NO other lock! 5057 */ 5058 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) 5059 { 5060 struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL; 5061 struct slave *slave; 5062 struct list_head *iter; 5063 int agg_id = 0; 5064 int ret = 0; 5065 5066 might_sleep(); 5067 5068 usable_slaves = kzalloc(struct_size(usable_slaves, arr, 5069 bond->slave_cnt), GFP_KERNEL); 5070 all_slaves = kzalloc(struct_size(all_slaves, arr, 5071 bond->slave_cnt), GFP_KERNEL); 5072 if (!usable_slaves || !all_slaves) { 5073 ret = -ENOMEM; 5074 goto out; 5075 } 5076 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 5077 struct ad_info ad_info; 5078 5079 spin_lock_bh(&bond->mode_lock); 5080 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 5081 spin_unlock_bh(&bond->mode_lock); 5082 pr_debug("bond_3ad_get_active_agg_info failed\n"); 5083 /* No active aggragator means it's not safe to use 5084 * the previous array. 5085 */ 5086 bond_reset_slave_arr(bond); 5087 goto out; 5088 } 5089 spin_unlock_bh(&bond->mode_lock); 5090 agg_id = ad_info.aggregator_id; 5091 } 5092 bond_for_each_slave(bond, slave, iter) { 5093 if (skipslave == slave) 5094 continue; 5095 5096 all_slaves->arr[all_slaves->count++] = slave; 5097 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 5098 struct aggregator *agg; 5099 5100 agg = SLAVE_AD_INFO(slave)->port.aggregator; 5101 if (!agg || agg->aggregator_identifier != agg_id) 5102 continue; 5103 } 5104 if (!bond_slave_can_tx(slave)) 5105 continue; 5106 5107 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n", 5108 usable_slaves->count); 5109 5110 usable_slaves->arr[usable_slaves->count++] = slave; 5111 } 5112 5113 bond_set_slave_arr(bond, usable_slaves, all_slaves); 5114 return ret; 5115 out: 5116 if (ret != 0 && skipslave) { 5117 bond_skip_slave(rtnl_dereference(bond->all_slaves), 5118 skipslave); 5119 bond_skip_slave(rtnl_dereference(bond->usable_slaves), 5120 skipslave); 5121 } 5122 kfree_rcu(all_slaves, rcu); 5123 kfree_rcu(usable_slaves, rcu); 5124 5125 return ret; 5126 } 5127 5128 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond, 5129 struct sk_buff *skb, 5130 struct bond_up_slave *slaves) 5131 { 5132 struct slave *slave; 5133 unsigned int count; 5134 u32 hash; 5135 5136 hash = bond_xmit_hash(bond, skb); 5137 count = slaves ? READ_ONCE(slaves->count) : 0; 5138 if (unlikely(!count)) 5139 return NULL; 5140 5141 slave = slaves->arr[hash % count]; 5142 return slave; 5143 } 5144 5145 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond, 5146 struct xdp_buff *xdp) 5147 { 5148 struct bond_up_slave *slaves; 5149 unsigned int count; 5150 u32 hash; 5151 5152 hash = bond_xmit_hash_xdp(bond, xdp); 5153 slaves = rcu_dereference(bond->usable_slaves); 5154 count = slaves ? READ_ONCE(slaves->count) : 0; 5155 if (unlikely(!count)) 5156 return NULL; 5157 5158 return slaves->arr[hash % count]; 5159 } 5160 5161 /* Use this Xmit function for 3AD as well as XOR modes. The current 5162 * usable slave array is formed in the control path. The xmit function 5163 * just calculates hash and sends the packet out. 5164 */ 5165 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, 5166 struct net_device *dev) 5167 { 5168 struct bonding *bond = netdev_priv(dev); 5169 struct bond_up_slave *slaves; 5170 struct slave *slave; 5171 5172 slaves = rcu_dereference(bond->usable_slaves); 5173 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); 5174 if (likely(slave)) 5175 return bond_dev_queue_xmit(bond, skb, slave->dev); 5176 5177 return bond_tx_drop(dev, skb); 5178 } 5179 5180 /* in broadcast mode, we send everything to all usable interfaces. */ 5181 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, 5182 struct net_device *bond_dev) 5183 { 5184 struct bonding *bond = netdev_priv(bond_dev); 5185 struct slave *slave = NULL; 5186 struct list_head *iter; 5187 bool xmit_suc = false; 5188 bool skb_used = false; 5189 5190 bond_for_each_slave_rcu(bond, slave, iter) { 5191 struct sk_buff *skb2; 5192 5193 if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)) 5194 continue; 5195 5196 if (bond_is_last_slave(bond, slave)) { 5197 skb2 = skb; 5198 skb_used = true; 5199 } else { 5200 skb2 = skb_clone(skb, GFP_ATOMIC); 5201 if (!skb2) { 5202 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n", 5203 bond_dev->name, __func__); 5204 continue; 5205 } 5206 } 5207 5208 if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK) 5209 xmit_suc = true; 5210 } 5211 5212 if (!skb_used) 5213 dev_kfree_skb_any(skb); 5214 5215 if (xmit_suc) 5216 return NETDEV_TX_OK; 5217 5218 dev_core_stats_tx_dropped_inc(bond_dev); 5219 return NET_XMIT_DROP; 5220 } 5221 5222 /*------------------------- Device initialization ---------------------------*/ 5223 5224 /* Lookup the slave that corresponds to a qid */ 5225 static inline int bond_slave_override(struct bonding *bond, 5226 struct sk_buff *skb) 5227 { 5228 struct slave *slave = NULL; 5229 struct list_head *iter; 5230 5231 if (!skb_rx_queue_recorded(skb)) 5232 return 1; 5233 5234 /* Find out if any slaves have the same mapping as this skb. */ 5235 bond_for_each_slave_rcu(bond, slave, iter) { 5236 if (slave->queue_id == skb_get_queue_mapping(skb)) { 5237 if (bond_slave_is_up(slave) && 5238 slave->link == BOND_LINK_UP) { 5239 bond_dev_queue_xmit(bond, skb, slave->dev); 5240 return 0; 5241 } 5242 /* If the slave isn't UP, use default transmit policy. */ 5243 break; 5244 } 5245 } 5246 5247 return 1; 5248 } 5249 5250 5251 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 5252 struct net_device *sb_dev) 5253 { 5254 /* This helper function exists to help dev_pick_tx get the correct 5255 * destination queue. Using a helper function skips a call to 5256 * skb_tx_hash and will put the skbs in the queue we expect on their 5257 * way down to the bonding driver. 5258 */ 5259 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 5260 5261 /* Save the original txq to restore before passing to the driver */ 5262 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb); 5263 5264 if (unlikely(txq >= dev->real_num_tx_queues)) { 5265 do { 5266 txq -= dev->real_num_tx_queues; 5267 } while (txq >= dev->real_num_tx_queues); 5268 } 5269 return txq; 5270 } 5271 5272 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev, 5273 struct sk_buff *skb, 5274 bool all_slaves) 5275 { 5276 struct bonding *bond = netdev_priv(master_dev); 5277 struct bond_up_slave *slaves; 5278 struct slave *slave = NULL; 5279 5280 switch (BOND_MODE(bond)) { 5281 case BOND_MODE_ROUNDROBIN: 5282 slave = bond_xmit_roundrobin_slave_get(bond, skb); 5283 break; 5284 case BOND_MODE_ACTIVEBACKUP: 5285 slave = bond_xmit_activebackup_slave_get(bond); 5286 break; 5287 case BOND_MODE_8023AD: 5288 case BOND_MODE_XOR: 5289 if (all_slaves) 5290 slaves = rcu_dereference(bond->all_slaves); 5291 else 5292 slaves = rcu_dereference(bond->usable_slaves); 5293 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); 5294 break; 5295 case BOND_MODE_BROADCAST: 5296 break; 5297 case BOND_MODE_ALB: 5298 slave = bond_xmit_alb_slave_get(bond, skb); 5299 break; 5300 case BOND_MODE_TLB: 5301 slave = bond_xmit_tlb_slave_get(bond, skb); 5302 break; 5303 default: 5304 /* Should never happen, mode already checked */ 5305 WARN_ONCE(true, "Unknown bonding mode"); 5306 break; 5307 } 5308 5309 if (slave) 5310 return slave->dev; 5311 return NULL; 5312 } 5313 5314 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow) 5315 { 5316 switch (sk->sk_family) { 5317 #if IS_ENABLED(CONFIG_IPV6) 5318 case AF_INET6: 5319 if (ipv6_only_sock(sk) || 5320 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { 5321 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 5322 flow->addrs.v6addrs.src = inet6_sk(sk)->saddr; 5323 flow->addrs.v6addrs.dst = sk->sk_v6_daddr; 5324 break; 5325 } 5326 fallthrough; 5327 #endif 5328 default: /* AF_INET */ 5329 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 5330 flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr; 5331 flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr; 5332 break; 5333 } 5334 5335 flow->ports.src = inet_sk(sk)->inet_sport; 5336 flow->ports.dst = inet_sk(sk)->inet_dport; 5337 } 5338 5339 /** 5340 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields 5341 * @sk: socket to use for headers 5342 * 5343 * This function will extract the necessary field from the socket and use 5344 * them to generate a hash based on the LAYER34 xmit_policy. 5345 * Assumes that sk is a TCP or UDP socket. 5346 */ 5347 static u32 bond_sk_hash_l34(struct sock *sk) 5348 { 5349 struct flow_keys flow; 5350 u32 hash; 5351 5352 bond_sk_to_flow(sk, &flow); 5353 5354 /* L4 */ 5355 memcpy(&hash, &flow.ports.ports, sizeof(hash)); 5356 /* L3 */ 5357 return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34); 5358 } 5359 5360 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond, 5361 struct sock *sk) 5362 { 5363 struct bond_up_slave *slaves; 5364 struct slave *slave; 5365 unsigned int count; 5366 u32 hash; 5367 5368 slaves = rcu_dereference(bond->usable_slaves); 5369 count = slaves ? READ_ONCE(slaves->count) : 0; 5370 if (unlikely(!count)) 5371 return NULL; 5372 5373 hash = bond_sk_hash_l34(sk); 5374 slave = slaves->arr[hash % count]; 5375 5376 return slave->dev; 5377 } 5378 5379 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev, 5380 struct sock *sk) 5381 { 5382 struct bonding *bond = netdev_priv(dev); 5383 struct net_device *lower = NULL; 5384 5385 rcu_read_lock(); 5386 if (bond_sk_check(bond)) 5387 lower = __bond_sk_get_lower_dev(bond, sk); 5388 rcu_read_unlock(); 5389 5390 return lower; 5391 } 5392 5393 #if IS_ENABLED(CONFIG_TLS_DEVICE) 5394 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb, 5395 struct net_device *dev) 5396 { 5397 struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev); 5398 5399 /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded 5400 * was true, if tls_device_down is running in parallel, but it's OK, 5401 * because bond_get_slave_by_dev has a NULL check. 5402 */ 5403 if (likely(bond_get_slave_by_dev(bond, tls_netdev))) 5404 return bond_dev_queue_xmit(bond, skb, tls_netdev); 5405 return bond_tx_drop(dev, skb); 5406 } 5407 #endif 5408 5409 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 5410 { 5411 struct bonding *bond = netdev_priv(dev); 5412 5413 if (bond_should_override_tx_queue(bond) && 5414 !bond_slave_override(bond, skb)) 5415 return NETDEV_TX_OK; 5416 5417 #if IS_ENABLED(CONFIG_TLS_DEVICE) 5418 if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk)) 5419 return bond_tls_device_xmit(bond, skb, dev); 5420 #endif 5421 5422 switch (BOND_MODE(bond)) { 5423 case BOND_MODE_ROUNDROBIN: 5424 return bond_xmit_roundrobin(skb, dev); 5425 case BOND_MODE_ACTIVEBACKUP: 5426 return bond_xmit_activebackup(skb, dev); 5427 case BOND_MODE_8023AD: 5428 case BOND_MODE_XOR: 5429 return bond_3ad_xor_xmit(skb, dev); 5430 case BOND_MODE_BROADCAST: 5431 return bond_xmit_broadcast(skb, dev); 5432 case BOND_MODE_ALB: 5433 return bond_alb_xmit(skb, dev); 5434 case BOND_MODE_TLB: 5435 return bond_tlb_xmit(skb, dev); 5436 default: 5437 /* Should never happen, mode already checked */ 5438 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); 5439 WARN_ON_ONCE(1); 5440 return bond_tx_drop(dev, skb); 5441 } 5442 } 5443 5444 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 5445 { 5446 struct bonding *bond = netdev_priv(dev); 5447 netdev_tx_t ret = NETDEV_TX_OK; 5448 5449 /* If we risk deadlock from transmitting this in the 5450 * netpoll path, tell netpoll to queue the frame for later tx 5451 */ 5452 if (unlikely(is_netpoll_tx_blocked(dev))) 5453 return NETDEV_TX_BUSY; 5454 5455 rcu_read_lock(); 5456 if (bond_has_slaves(bond)) 5457 ret = __bond_start_xmit(skb, dev); 5458 else 5459 ret = bond_tx_drop(dev, skb); 5460 rcu_read_unlock(); 5461 5462 return ret; 5463 } 5464 5465 static struct net_device * 5466 bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) 5467 { 5468 struct bonding *bond = netdev_priv(bond_dev); 5469 struct slave *slave; 5470 5471 /* Caller needs to hold rcu_read_lock() */ 5472 5473 switch (BOND_MODE(bond)) { 5474 case BOND_MODE_ROUNDROBIN: 5475 slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp); 5476 break; 5477 5478 case BOND_MODE_ACTIVEBACKUP: 5479 slave = bond_xmit_activebackup_slave_get(bond); 5480 break; 5481 5482 case BOND_MODE_8023AD: 5483 case BOND_MODE_XOR: 5484 slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp); 5485 break; 5486 5487 default: 5488 /* Should never happen. Mode guarded by bond_xdp_check() */ 5489 netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); 5490 WARN_ON_ONCE(1); 5491 return NULL; 5492 } 5493 5494 if (slave) 5495 return slave->dev; 5496 5497 return NULL; 5498 } 5499 5500 static int bond_xdp_xmit(struct net_device *bond_dev, 5501 int n, struct xdp_frame **frames, u32 flags) 5502 { 5503 int nxmit, err = -ENXIO; 5504 5505 rcu_read_lock(); 5506 5507 for (nxmit = 0; nxmit < n; nxmit++) { 5508 struct xdp_frame *frame = frames[nxmit]; 5509 struct xdp_frame *frames1[] = {frame}; 5510 struct net_device *slave_dev; 5511 struct xdp_buff xdp; 5512 5513 xdp_convert_frame_to_buff(frame, &xdp); 5514 5515 slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp); 5516 if (!slave_dev) { 5517 err = -ENXIO; 5518 break; 5519 } 5520 5521 err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags); 5522 if (err < 1) 5523 break; 5524 } 5525 5526 rcu_read_unlock(); 5527 5528 /* If error happened on the first frame then we can pass the error up, otherwise 5529 * report the number of frames that were xmitted. 5530 */ 5531 if (err < 0) 5532 return (nxmit == 0 ? err : nxmit); 5533 5534 return nxmit; 5535 } 5536 5537 static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, 5538 struct netlink_ext_ack *extack) 5539 { 5540 struct bonding *bond = netdev_priv(dev); 5541 struct list_head *iter; 5542 struct slave *slave, *rollback_slave; 5543 struct bpf_prog *old_prog; 5544 struct netdev_bpf xdp = { 5545 .command = XDP_SETUP_PROG, 5546 .flags = 0, 5547 .prog = prog, 5548 .extack = extack, 5549 }; 5550 int err; 5551 5552 ASSERT_RTNL(); 5553 5554 if (!bond_xdp_check(bond)) 5555 return -EOPNOTSUPP; 5556 5557 old_prog = bond->xdp_prog; 5558 bond->xdp_prog = prog; 5559 5560 bond_for_each_slave(bond, slave, iter) { 5561 struct net_device *slave_dev = slave->dev; 5562 5563 if (!slave_dev->netdev_ops->ndo_bpf || 5564 !slave_dev->netdev_ops->ndo_xdp_xmit) { 5565 SLAVE_NL_ERR(dev, slave_dev, extack, 5566 "Slave device does not support XDP"); 5567 err = -EOPNOTSUPP; 5568 goto err; 5569 } 5570 5571 if (dev_xdp_prog_count(slave_dev) > 0) { 5572 SLAVE_NL_ERR(dev, slave_dev, extack, 5573 "Slave has XDP program loaded, please unload before enslaving"); 5574 err = -EOPNOTSUPP; 5575 goto err; 5576 } 5577 5578 err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 5579 if (err < 0) { 5580 /* ndo_bpf() sets extack error message */ 5581 slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err); 5582 goto err; 5583 } 5584 if (prog) 5585 bpf_prog_inc(prog); 5586 } 5587 5588 if (prog) { 5589 static_branch_inc(&bpf_master_redirect_enabled_key); 5590 } else if (old_prog) { 5591 bpf_prog_put(old_prog); 5592 static_branch_dec(&bpf_master_redirect_enabled_key); 5593 } 5594 5595 return 0; 5596 5597 err: 5598 /* unwind the program changes */ 5599 bond->xdp_prog = old_prog; 5600 xdp.prog = old_prog; 5601 xdp.extack = NULL; /* do not overwrite original error */ 5602 5603 bond_for_each_slave(bond, rollback_slave, iter) { 5604 struct net_device *slave_dev = rollback_slave->dev; 5605 int err_unwind; 5606 5607 if (slave == rollback_slave) 5608 break; 5609 5610 err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 5611 if (err_unwind < 0) 5612 slave_err(dev, slave_dev, 5613 "Error %d when unwinding XDP program change\n", err_unwind); 5614 else if (xdp.prog) 5615 bpf_prog_inc(xdp.prog); 5616 } 5617 return err; 5618 } 5619 5620 static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp) 5621 { 5622 switch (xdp->command) { 5623 case XDP_SETUP_PROG: 5624 return bond_xdp_set(dev, xdp->prog, xdp->extack); 5625 default: 5626 return -EINVAL; 5627 } 5628 } 5629 5630 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) 5631 { 5632 if (speed == 0 || speed == SPEED_UNKNOWN) 5633 speed = slave->speed; 5634 else 5635 speed = min(speed, slave->speed); 5636 5637 return speed; 5638 } 5639 5640 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, 5641 struct ethtool_link_ksettings *cmd) 5642 { 5643 struct bonding *bond = netdev_priv(bond_dev); 5644 struct list_head *iter; 5645 struct slave *slave; 5646 u32 speed = 0; 5647 5648 cmd->base.duplex = DUPLEX_UNKNOWN; 5649 cmd->base.port = PORT_OTHER; 5650 5651 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we 5652 * do not need to check mode. Though link speed might not represent 5653 * the true receive or transmit bandwidth (not all modes are symmetric) 5654 * this is an accurate maximum. 5655 */ 5656 bond_for_each_slave(bond, slave, iter) { 5657 if (bond_slave_can_tx(slave)) { 5658 if (slave->speed != SPEED_UNKNOWN) { 5659 if (BOND_MODE(bond) == BOND_MODE_BROADCAST) 5660 speed = bond_mode_bcast_speed(slave, 5661 speed); 5662 else 5663 speed += slave->speed; 5664 } 5665 if (cmd->base.duplex == DUPLEX_UNKNOWN && 5666 slave->duplex != DUPLEX_UNKNOWN) 5667 cmd->base.duplex = slave->duplex; 5668 } 5669 } 5670 cmd->base.speed = speed ? : SPEED_UNKNOWN; 5671 5672 return 0; 5673 } 5674 5675 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, 5676 struct ethtool_drvinfo *drvinfo) 5677 { 5678 strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 5679 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d", 5680 BOND_ABI_VERSION); 5681 } 5682 5683 static int bond_ethtool_get_ts_info(struct net_device *bond_dev, 5684 struct ethtool_ts_info *info) 5685 { 5686 struct bonding *bond = netdev_priv(bond_dev); 5687 const struct ethtool_ops *ops; 5688 struct net_device *real_dev; 5689 struct phy_device *phydev; 5690 int ret = 0; 5691 5692 rcu_read_lock(); 5693 real_dev = bond_option_active_slave_get_rcu(bond); 5694 dev_hold(real_dev); 5695 rcu_read_unlock(); 5696 5697 if (real_dev) { 5698 ops = real_dev->ethtool_ops; 5699 phydev = real_dev->phydev; 5700 5701 if (phy_has_tsinfo(phydev)) { 5702 ret = phy_ts_info(phydev, info); 5703 goto out; 5704 } else if (ops->get_ts_info) { 5705 ret = ops->get_ts_info(real_dev, info); 5706 goto out; 5707 } 5708 } 5709 5710 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | 5711 SOF_TIMESTAMPING_SOFTWARE; 5712 info->phc_index = -1; 5713 5714 out: 5715 dev_put(real_dev); 5716 return ret; 5717 } 5718 5719 static const struct ethtool_ops bond_ethtool_ops = { 5720 .get_drvinfo = bond_ethtool_get_drvinfo, 5721 .get_link = ethtool_op_get_link, 5722 .get_link_ksettings = bond_ethtool_get_link_ksettings, 5723 .get_ts_info = bond_ethtool_get_ts_info, 5724 }; 5725 5726 static const struct net_device_ops bond_netdev_ops = { 5727 .ndo_init = bond_init, 5728 .ndo_uninit = bond_uninit, 5729 .ndo_open = bond_open, 5730 .ndo_stop = bond_close, 5731 .ndo_start_xmit = bond_start_xmit, 5732 .ndo_select_queue = bond_select_queue, 5733 .ndo_get_stats64 = bond_get_stats, 5734 .ndo_eth_ioctl = bond_eth_ioctl, 5735 .ndo_siocbond = bond_do_ioctl, 5736 .ndo_siocdevprivate = bond_siocdevprivate, 5737 .ndo_change_rx_flags = bond_change_rx_flags, 5738 .ndo_set_rx_mode = bond_set_rx_mode, 5739 .ndo_change_mtu = bond_change_mtu, 5740 .ndo_set_mac_address = bond_set_mac_address, 5741 .ndo_neigh_setup = bond_neigh_setup, 5742 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 5743 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 5744 #ifdef CONFIG_NET_POLL_CONTROLLER 5745 .ndo_netpoll_setup = bond_netpoll_setup, 5746 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 5747 .ndo_poll_controller = bond_poll_controller, 5748 #endif 5749 .ndo_add_slave = bond_enslave, 5750 .ndo_del_slave = bond_release, 5751 .ndo_fix_features = bond_fix_features, 5752 .ndo_features_check = passthru_features_check, 5753 .ndo_get_xmit_slave = bond_xmit_get_slave, 5754 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev, 5755 .ndo_bpf = bond_xdp, 5756 .ndo_xdp_xmit = bond_xdp_xmit, 5757 .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave, 5758 }; 5759 5760 static const struct device_type bond_type = { 5761 .name = "bond", 5762 }; 5763 5764 static void bond_destructor(struct net_device *bond_dev) 5765 { 5766 struct bonding *bond = netdev_priv(bond_dev); 5767 5768 if (bond->wq) 5769 destroy_workqueue(bond->wq); 5770 5771 if (bond->rr_tx_counter) 5772 free_percpu(bond->rr_tx_counter); 5773 } 5774 5775 void bond_setup(struct net_device *bond_dev) 5776 { 5777 struct bonding *bond = netdev_priv(bond_dev); 5778 5779 spin_lock_init(&bond->mode_lock); 5780 bond->params = bonding_defaults; 5781 5782 /* Initialize pointers */ 5783 bond->dev = bond_dev; 5784 5785 /* Initialize the device entry points */ 5786 ether_setup(bond_dev); 5787 bond_dev->max_mtu = ETH_MAX_MTU; 5788 bond_dev->netdev_ops = &bond_netdev_ops; 5789 bond_dev->ethtool_ops = &bond_ethtool_ops; 5790 5791 bond_dev->needs_free_netdev = true; 5792 bond_dev->priv_destructor = bond_destructor; 5793 5794 SET_NETDEV_DEVTYPE(bond_dev, &bond_type); 5795 5796 /* Initialize the device options */ 5797 bond_dev->flags |= IFF_MASTER; 5798 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; 5799 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 5800 5801 #ifdef CONFIG_XFRM_OFFLOAD 5802 /* set up xfrm device ops (only supported in active-backup right now) */ 5803 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; 5804 INIT_LIST_HEAD(&bond->ipsec_list); 5805 spin_lock_init(&bond->ipsec_lock); 5806 #endif /* CONFIG_XFRM_OFFLOAD */ 5807 5808 /* don't acquire bond device's netif_tx_lock when transmitting */ 5809 bond_dev->features |= NETIF_F_LLTX; 5810 5811 /* By default, we declare the bond to be fully 5812 * VLAN hardware accelerated capable. Special 5813 * care is taken in the various xmit functions 5814 * when there are slaves that are not hw accel 5815 * capable 5816 */ 5817 5818 /* Don't allow bond devices to change network namespaces. */ 5819 bond_dev->features |= NETIF_F_NETNS_LOCAL; 5820 5821 bond_dev->hw_features = BOND_VLAN_FEATURES | 5822 NETIF_F_HW_VLAN_CTAG_RX | 5823 NETIF_F_HW_VLAN_CTAG_FILTER; 5824 5825 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; 5826 bond_dev->features |= bond_dev->hw_features; 5827 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 5828 #ifdef CONFIG_XFRM_OFFLOAD 5829 bond_dev->hw_features |= BOND_XFRM_FEATURES; 5830 /* Only enable XFRM features if this is an active-backup config */ 5831 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 5832 bond_dev->features |= BOND_XFRM_FEATURES; 5833 #endif /* CONFIG_XFRM_OFFLOAD */ 5834 } 5835 5836 /* Destroy a bonding device. 5837 * Must be under rtnl_lock when this function is called. 5838 */ 5839 static void bond_uninit(struct net_device *bond_dev) 5840 { 5841 struct bonding *bond = netdev_priv(bond_dev); 5842 struct bond_up_slave *usable, *all; 5843 struct list_head *iter; 5844 struct slave *slave; 5845 5846 bond_netpoll_cleanup(bond_dev); 5847 5848 /* Release the bonded slaves */ 5849 bond_for_each_slave(bond, slave, iter) 5850 __bond_release_one(bond_dev, slave->dev, true, true); 5851 netdev_info(bond_dev, "Released all slaves\n"); 5852 5853 usable = rtnl_dereference(bond->usable_slaves); 5854 if (usable) { 5855 RCU_INIT_POINTER(bond->usable_slaves, NULL); 5856 kfree_rcu(usable, rcu); 5857 } 5858 5859 all = rtnl_dereference(bond->all_slaves); 5860 if (all) { 5861 RCU_INIT_POINTER(bond->all_slaves, NULL); 5862 kfree_rcu(all, rcu); 5863 } 5864 5865 list_del(&bond->bond_list); 5866 5867 bond_debug_unregister(bond); 5868 } 5869 5870 /*------------------------- Module initialization ---------------------------*/ 5871 5872 static int bond_check_params(struct bond_params *params) 5873 { 5874 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; 5875 struct bond_opt_value newval; 5876 const struct bond_opt_value *valptr; 5877 int arp_all_targets_value = 0; 5878 u16 ad_actor_sys_prio = 0; 5879 u16 ad_user_port_key = 0; 5880 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 }; 5881 int arp_ip_count; 5882 int bond_mode = BOND_MODE_ROUNDROBIN; 5883 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 5884 int lacp_fast = 0; 5885 int tlb_dynamic_lb; 5886 5887 /* Convert string parameters. */ 5888 if (mode) { 5889 bond_opt_initstr(&newval, mode); 5890 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval); 5891 if (!valptr) { 5892 pr_err("Error: Invalid bonding mode \"%s\"\n", mode); 5893 return -EINVAL; 5894 } 5895 bond_mode = valptr->value; 5896 } 5897 5898 if (xmit_hash_policy) { 5899 if (bond_mode == BOND_MODE_ROUNDROBIN || 5900 bond_mode == BOND_MODE_ACTIVEBACKUP || 5901 bond_mode == BOND_MODE_BROADCAST) { 5902 pr_info("xmit_hash_policy param is irrelevant in mode %s\n", 5903 bond_mode_name(bond_mode)); 5904 } else { 5905 bond_opt_initstr(&newval, xmit_hash_policy); 5906 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH), 5907 &newval); 5908 if (!valptr) { 5909 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n", 5910 xmit_hash_policy); 5911 return -EINVAL; 5912 } 5913 xmit_hashtype = valptr->value; 5914 } 5915 } 5916 5917 if (lacp_rate) { 5918 if (bond_mode != BOND_MODE_8023AD) { 5919 pr_info("lacp_rate param is irrelevant in mode %s\n", 5920 bond_mode_name(bond_mode)); 5921 } else { 5922 bond_opt_initstr(&newval, lacp_rate); 5923 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE), 5924 &newval); 5925 if (!valptr) { 5926 pr_err("Error: Invalid lacp rate \"%s\"\n", 5927 lacp_rate); 5928 return -EINVAL; 5929 } 5930 lacp_fast = valptr->value; 5931 } 5932 } 5933 5934 if (ad_select) { 5935 bond_opt_initstr(&newval, ad_select); 5936 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), 5937 &newval); 5938 if (!valptr) { 5939 pr_err("Error: Invalid ad_select \"%s\"\n", ad_select); 5940 return -EINVAL; 5941 } 5942 params->ad_select = valptr->value; 5943 if (bond_mode != BOND_MODE_8023AD) 5944 pr_warn("ad_select param only affects 802.3ad mode\n"); 5945 } else { 5946 params->ad_select = BOND_AD_STABLE; 5947 } 5948 5949 if (max_bonds < 0) { 5950 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n", 5951 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS); 5952 max_bonds = BOND_DEFAULT_MAX_BONDS; 5953 } 5954 5955 if (miimon < 0) { 5956 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5957 miimon, INT_MAX); 5958 miimon = 0; 5959 } 5960 5961 if (updelay < 0) { 5962 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5963 updelay, INT_MAX); 5964 updelay = 0; 5965 } 5966 5967 if (downdelay < 0) { 5968 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5969 downdelay, INT_MAX); 5970 downdelay = 0; 5971 } 5972 5973 if ((use_carrier != 0) && (use_carrier != 1)) { 5974 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n", 5975 use_carrier); 5976 use_carrier = 1; 5977 } 5978 5979 if (num_peer_notif < 0 || num_peer_notif > 255) { 5980 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", 5981 num_peer_notif); 5982 num_peer_notif = 1; 5983 } 5984 5985 /* reset values for 802.3ad/TLB/ALB */ 5986 if (!bond_mode_uses_arp(bond_mode)) { 5987 if (!miimon) { 5988 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 5989 pr_warn("Forcing miimon to 100msec\n"); 5990 miimon = BOND_DEFAULT_MIIMON; 5991 } 5992 } 5993 5994 if (tx_queues < 1 || tx_queues > 255) { 5995 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n", 5996 tx_queues, BOND_DEFAULT_TX_QUEUES); 5997 tx_queues = BOND_DEFAULT_TX_QUEUES; 5998 } 5999 6000 if ((all_slaves_active != 0) && (all_slaves_active != 1)) { 6001 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n", 6002 all_slaves_active); 6003 all_slaves_active = 0; 6004 } 6005 6006 if (resend_igmp < 0 || resend_igmp > 255) { 6007 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n", 6008 resend_igmp, BOND_DEFAULT_RESEND_IGMP); 6009 resend_igmp = BOND_DEFAULT_RESEND_IGMP; 6010 } 6011 6012 bond_opt_initval(&newval, packets_per_slave); 6013 if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) { 6014 pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n", 6015 packets_per_slave, USHRT_MAX); 6016 packets_per_slave = 1; 6017 } 6018 6019 if (bond_mode == BOND_MODE_ALB) { 6020 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n", 6021 updelay); 6022 } 6023 6024 if (!miimon) { 6025 if (updelay || downdelay) { 6026 /* just warn the user the up/down delay will have 6027 * no effect since miimon is zero... 6028 */ 6029 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n", 6030 updelay, downdelay); 6031 } 6032 } else { 6033 /* don't allow arp monitoring */ 6034 if (arp_interval) { 6035 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n", 6036 miimon, arp_interval); 6037 arp_interval = 0; 6038 } 6039 6040 if ((updelay % miimon) != 0) { 6041 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n", 6042 updelay, miimon, (updelay / miimon) * miimon); 6043 } 6044 6045 updelay /= miimon; 6046 6047 if ((downdelay % miimon) != 0) { 6048 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n", 6049 downdelay, miimon, 6050 (downdelay / miimon) * miimon); 6051 } 6052 6053 downdelay /= miimon; 6054 } 6055 6056 if (arp_interval < 0) { 6057 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n", 6058 arp_interval, INT_MAX); 6059 arp_interval = 0; 6060 } 6061 6062 for (arp_ip_count = 0, i = 0; 6063 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { 6064 __be32 ip; 6065 6066 /* not a complete check, but good enough to catch mistakes */ 6067 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || 6068 !bond_is_ip_target_ok(ip)) { 6069 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 6070 arp_ip_target[i]); 6071 arp_interval = 0; 6072 } else { 6073 if (bond_get_targets_ip(arp_target, ip) == -1) 6074 arp_target[arp_ip_count++] = ip; 6075 else 6076 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n", 6077 &ip); 6078 } 6079 } 6080 6081 if (arp_interval && !arp_ip_count) { 6082 /* don't allow arping if no arp_ip_target given... */ 6083 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n", 6084 arp_interval); 6085 arp_interval = 0; 6086 } 6087 6088 if (arp_validate) { 6089 if (!arp_interval) { 6090 pr_err("arp_validate requires arp_interval\n"); 6091 return -EINVAL; 6092 } 6093 6094 bond_opt_initstr(&newval, arp_validate); 6095 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE), 6096 &newval); 6097 if (!valptr) { 6098 pr_err("Error: invalid arp_validate \"%s\"\n", 6099 arp_validate); 6100 return -EINVAL; 6101 } 6102 arp_validate_value = valptr->value; 6103 } else { 6104 arp_validate_value = 0; 6105 } 6106 6107 if (arp_all_targets) { 6108 bond_opt_initstr(&newval, arp_all_targets); 6109 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), 6110 &newval); 6111 if (!valptr) { 6112 pr_err("Error: invalid arp_all_targets_value \"%s\"\n", 6113 arp_all_targets); 6114 arp_all_targets_value = 0; 6115 } else { 6116 arp_all_targets_value = valptr->value; 6117 } 6118 } 6119 6120 if (miimon) { 6121 pr_info("MII link monitoring set to %d ms\n", miimon); 6122 } else if (arp_interval) { 6123 valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE, 6124 arp_validate_value); 6125 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", 6126 arp_interval, valptr->string, arp_ip_count); 6127 6128 for (i = 0; i < arp_ip_count; i++) 6129 pr_cont(" %s", arp_ip_target[i]); 6130 6131 pr_cont("\n"); 6132 6133 } else if (max_bonds) { 6134 /* miimon and arp_interval not set, we need one so things 6135 * work as expected, see bonding.txt for details 6136 */ 6137 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n"); 6138 } 6139 6140 if (primary && !bond_mode_uses_primary(bond_mode)) { 6141 /* currently, using a primary only makes sense 6142 * in active backup, TLB or ALB modes 6143 */ 6144 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n", 6145 primary, bond_mode_name(bond_mode)); 6146 primary = NULL; 6147 } 6148 6149 if (primary && primary_reselect) { 6150 bond_opt_initstr(&newval, primary_reselect); 6151 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT), 6152 &newval); 6153 if (!valptr) { 6154 pr_err("Error: Invalid primary_reselect \"%s\"\n", 6155 primary_reselect); 6156 return -EINVAL; 6157 } 6158 primary_reselect_value = valptr->value; 6159 } else { 6160 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS; 6161 } 6162 6163 if (fail_over_mac) { 6164 bond_opt_initstr(&newval, fail_over_mac); 6165 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC), 6166 &newval); 6167 if (!valptr) { 6168 pr_err("Error: invalid fail_over_mac \"%s\"\n", 6169 fail_over_mac); 6170 return -EINVAL; 6171 } 6172 fail_over_mac_value = valptr->value; 6173 if (bond_mode != BOND_MODE_ACTIVEBACKUP) 6174 pr_warn("Warning: fail_over_mac only affects active-backup mode\n"); 6175 } else { 6176 fail_over_mac_value = BOND_FOM_NONE; 6177 } 6178 6179 bond_opt_initstr(&newval, "default"); 6180 valptr = bond_opt_parse( 6181 bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO), 6182 &newval); 6183 if (!valptr) { 6184 pr_err("Error: No ad_actor_sys_prio default value"); 6185 return -EINVAL; 6186 } 6187 ad_actor_sys_prio = valptr->value; 6188 6189 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY), 6190 &newval); 6191 if (!valptr) { 6192 pr_err("Error: No ad_user_port_key default value"); 6193 return -EINVAL; 6194 } 6195 ad_user_port_key = valptr->value; 6196 6197 bond_opt_initstr(&newval, "default"); 6198 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); 6199 if (!valptr) { 6200 pr_err("Error: No tlb_dynamic_lb default value"); 6201 return -EINVAL; 6202 } 6203 tlb_dynamic_lb = valptr->value; 6204 6205 if (lp_interval == 0) { 6206 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", 6207 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL); 6208 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 6209 } 6210 6211 /* fill params struct with the proper values */ 6212 params->mode = bond_mode; 6213 params->xmit_policy = xmit_hashtype; 6214 params->miimon = miimon; 6215 params->num_peer_notif = num_peer_notif; 6216 params->arp_interval = arp_interval; 6217 params->arp_validate = arp_validate_value; 6218 params->arp_all_targets = arp_all_targets_value; 6219 params->missed_max = 2; 6220 params->updelay = updelay; 6221 params->downdelay = downdelay; 6222 params->peer_notif_delay = 0; 6223 params->use_carrier = use_carrier; 6224 params->lacp_active = 1; 6225 params->lacp_fast = lacp_fast; 6226 params->primary[0] = 0; 6227 params->primary_reselect = primary_reselect_value; 6228 params->fail_over_mac = fail_over_mac_value; 6229 params->tx_queues = tx_queues; 6230 params->all_slaves_active = all_slaves_active; 6231 params->resend_igmp = resend_igmp; 6232 params->min_links = min_links; 6233 params->lp_interval = lp_interval; 6234 params->packets_per_slave = packets_per_slave; 6235 params->tlb_dynamic_lb = tlb_dynamic_lb; 6236 params->ad_actor_sys_prio = ad_actor_sys_prio; 6237 eth_zero_addr(params->ad_actor_system); 6238 params->ad_user_port_key = ad_user_port_key; 6239 if (packets_per_slave > 0) { 6240 params->reciprocal_packets_per_slave = 6241 reciprocal_value(packets_per_slave); 6242 } else { 6243 /* reciprocal_packets_per_slave is unused if 6244 * packets_per_slave is 0 or 1, just initialize it 6245 */ 6246 params->reciprocal_packets_per_slave = 6247 (struct reciprocal_value) { 0 }; 6248 } 6249 6250 if (primary) 6251 strscpy_pad(params->primary, primary, sizeof(params->primary)); 6252 6253 memcpy(params->arp_targets, arp_target, sizeof(arp_target)); 6254 #if IS_ENABLED(CONFIG_IPV6) 6255 memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS); 6256 #endif 6257 6258 return 0; 6259 } 6260 6261 /* Called from registration process */ 6262 static int bond_init(struct net_device *bond_dev) 6263 { 6264 struct bonding *bond = netdev_priv(bond_dev); 6265 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); 6266 6267 netdev_dbg(bond_dev, "Begin bond_init\n"); 6268 6269 bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM); 6270 if (!bond->wq) 6271 return -ENOMEM; 6272 6273 spin_lock_init(&bond->stats_lock); 6274 netdev_lockdep_set_classes(bond_dev); 6275 6276 list_add_tail(&bond->bond_list, &bn->dev_list); 6277 6278 bond_prepare_sysfs_group(bond); 6279 6280 bond_debug_register(bond); 6281 6282 /* Ensure valid dev_addr */ 6283 if (is_zero_ether_addr(bond_dev->dev_addr) && 6284 bond_dev->addr_assign_type == NET_ADDR_PERM) 6285 eth_hw_addr_random(bond_dev); 6286 6287 return 0; 6288 } 6289 6290 unsigned int bond_get_num_tx_queues(void) 6291 { 6292 return tx_queues; 6293 } 6294 6295 /* Create a new bond based on the specified name and bonding parameters. 6296 * If name is NULL, obtain a suitable "bond%d" name for us. 6297 * Caller must NOT hold rtnl_lock; we need to release it here before we 6298 * set up our sysfs entries. 6299 */ 6300 int bond_create(struct net *net, const char *name) 6301 { 6302 struct net_device *bond_dev; 6303 struct bonding *bond; 6304 int res = -ENOMEM; 6305 6306 rtnl_lock(); 6307 6308 bond_dev = alloc_netdev_mq(sizeof(struct bonding), 6309 name ? name : "bond%d", NET_NAME_UNKNOWN, 6310 bond_setup, tx_queues); 6311 if (!bond_dev) 6312 goto out; 6313 6314 bond = netdev_priv(bond_dev); 6315 dev_net_set(bond_dev, net); 6316 bond_dev->rtnl_link_ops = &bond_link_ops; 6317 6318 res = register_netdevice(bond_dev); 6319 if (res < 0) { 6320 free_netdev(bond_dev); 6321 goto out; 6322 } 6323 6324 netif_carrier_off(bond_dev); 6325 6326 bond_work_init_all(bond); 6327 6328 out: 6329 rtnl_unlock(); 6330 return res; 6331 } 6332 6333 static int __net_init bond_net_init(struct net *net) 6334 { 6335 struct bond_net *bn = net_generic(net, bond_net_id); 6336 6337 bn->net = net; 6338 INIT_LIST_HEAD(&bn->dev_list); 6339 6340 bond_create_proc_dir(bn); 6341 bond_create_sysfs(bn); 6342 6343 return 0; 6344 } 6345 6346 static void __net_exit bond_net_exit_batch(struct list_head *net_list) 6347 { 6348 struct bond_net *bn; 6349 struct net *net; 6350 LIST_HEAD(list); 6351 6352 list_for_each_entry(net, net_list, exit_list) { 6353 bn = net_generic(net, bond_net_id); 6354 bond_destroy_sysfs(bn); 6355 } 6356 6357 /* Kill off any bonds created after unregistering bond rtnl ops */ 6358 rtnl_lock(); 6359 list_for_each_entry(net, net_list, exit_list) { 6360 struct bonding *bond, *tmp_bond; 6361 6362 bn = net_generic(net, bond_net_id); 6363 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) 6364 unregister_netdevice_queue(bond->dev, &list); 6365 } 6366 unregister_netdevice_many(&list); 6367 rtnl_unlock(); 6368 6369 list_for_each_entry(net, net_list, exit_list) { 6370 bn = net_generic(net, bond_net_id); 6371 bond_destroy_proc_dir(bn); 6372 } 6373 } 6374 6375 static struct pernet_operations bond_net_ops = { 6376 .init = bond_net_init, 6377 .exit_batch = bond_net_exit_batch, 6378 .id = &bond_net_id, 6379 .size = sizeof(struct bond_net), 6380 }; 6381 6382 static int __init bonding_init(void) 6383 { 6384 int i; 6385 int res; 6386 6387 res = bond_check_params(&bonding_defaults); 6388 if (res) 6389 goto out; 6390 6391 res = register_pernet_subsys(&bond_net_ops); 6392 if (res) 6393 goto out; 6394 6395 res = bond_netlink_init(); 6396 if (res) 6397 goto err_link; 6398 6399 bond_create_debugfs(); 6400 6401 for (i = 0; i < max_bonds; i++) { 6402 res = bond_create(&init_net, NULL); 6403 if (res) 6404 goto err; 6405 } 6406 6407 skb_flow_dissector_init(&flow_keys_bonding, 6408 flow_keys_bonding_keys, 6409 ARRAY_SIZE(flow_keys_bonding_keys)); 6410 6411 register_netdevice_notifier(&bond_netdev_notifier); 6412 out: 6413 return res; 6414 err: 6415 bond_destroy_debugfs(); 6416 bond_netlink_fini(); 6417 err_link: 6418 unregister_pernet_subsys(&bond_net_ops); 6419 goto out; 6420 6421 } 6422 6423 static void __exit bonding_exit(void) 6424 { 6425 unregister_netdevice_notifier(&bond_netdev_notifier); 6426 6427 bond_destroy_debugfs(); 6428 6429 bond_netlink_fini(); 6430 unregister_pernet_subsys(&bond_net_ops); 6431 6432 #ifdef CONFIG_NET_POLL_CONTROLLER 6433 /* Make sure we don't have an imbalance on our netpoll blocking */ 6434 WARN_ON(atomic_read(&netpoll_block_tx)); 6435 #endif 6436 } 6437 6438 module_init(bonding_init); 6439 module_exit(bonding_exit); 6440 MODULE_LICENSE("GPL"); 6441 MODULE_DESCRIPTION(DRV_DESCRIPTION); 6442 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); 6443