1 /* 2 * originally based on the dummy device. 3 * 4 * Copyright 1999, Thomas Davis, tadavis@lbl.gov. 5 * Licensed under the GPL. Based on dummy.c, and eql.c devices. 6 * 7 * bonding.c: an Ethernet Bonding driver 8 * 9 * This is useful to talk to a Cisco EtherChannel compatible equipment: 10 * Cisco 5500 11 * Sun Trunking (Solaris) 12 * Alteon AceDirector Trunks 13 * Linux Bonding 14 * and probably many L2 switches ... 15 * 16 * How it works: 17 * ifconfig bond0 ipaddress netmask up 18 * will setup a network device, with an ip address. No mac address 19 * will be assigned at this time. The hw mac address will come from 20 * the first slave bonded to the channel. All slaves will then use 21 * this hw mac address. 22 * 23 * ifconfig bond0 down 24 * will release all slaves, marking them as down. 25 * 26 * ifenslave bond0 eth0 27 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either 28 * a: be used as initial mac address 29 * b: if a hw mac address already is there, eth0's hw mac address 30 * will then be set from bond0. 31 * 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/types.h> 37 #include <linux/fcntl.h> 38 #include <linux/filter.h> 39 #include <linux/interrupt.h> 40 #include <linux/ptrace.h> 41 #include <linux/ioport.h> 42 #include <linux/in.h> 43 #include <net/ip.h> 44 #include <linux/ip.h> 45 #include <linux/icmp.h> 46 #include <linux/icmpv6.h> 47 #include <linux/tcp.h> 48 #include <linux/udp.h> 49 #include <linux/slab.h> 50 #include <linux/string.h> 51 #include <linux/init.h> 52 #include <linux/timer.h> 53 #include <linux/socket.h> 54 #include <linux/ctype.h> 55 #include <linux/inet.h> 56 #include <linux/bitops.h> 57 #include <linux/io.h> 58 #include <asm/dma.h> 59 #include <linux/uaccess.h> 60 #include <linux/errno.h> 61 #include <linux/netdevice.h> 62 #include <linux/inetdevice.h> 63 #include <linux/igmp.h> 64 #include <linux/etherdevice.h> 65 #include <linux/skbuff.h> 66 #include <net/sock.h> 67 #include <linux/rtnetlink.h> 68 #include <linux/smp.h> 69 #include <linux/if_ether.h> 70 #include <net/arp.h> 71 #include <linux/mii.h> 72 #include <linux/ethtool.h> 73 #include <linux/if_vlan.h> 74 #include <linux/if_bonding.h> 75 #include <linux/phy.h> 76 #include <linux/jiffies.h> 77 #include <linux/preempt.h> 78 #include <net/route.h> 79 #include <net/net_namespace.h> 80 #include <net/netns/generic.h> 81 #include <net/pkt_sched.h> 82 #include <linux/rculist.h> 83 #include <net/flow_dissector.h> 84 #include <net/xfrm.h> 85 #include <net/bonding.h> 86 #include <net/bond_3ad.h> 87 #include <net/bond_alb.h> 88 #if IS_ENABLED(CONFIG_TLS_DEVICE) 89 #include <net/tls.h> 90 #endif 91 #include <net/ip6_route.h> 92 93 #include "bonding_priv.h" 94 95 /*---------------------------- Module parameters ----------------------------*/ 96 97 /* monitor all links that often (in milliseconds). <=0 disables monitoring */ 98 99 static int max_bonds = BOND_DEFAULT_MAX_BONDS; 100 static int tx_queues = BOND_DEFAULT_TX_QUEUES; 101 static int num_peer_notif = 1; 102 static int miimon; 103 static int updelay; 104 static int downdelay; 105 static int use_carrier = 1; 106 static char *mode; 107 static char *primary; 108 static char *primary_reselect; 109 static char *lacp_rate; 110 static int min_links; 111 static char *ad_select; 112 static char *xmit_hash_policy; 113 static int arp_interval; 114 static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; 115 static char *arp_validate; 116 static char *arp_all_targets; 117 static char *fail_over_mac; 118 static int all_slaves_active; 119 static struct bond_params bonding_defaults; 120 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; 121 static int packets_per_slave = 1; 122 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 123 124 module_param(max_bonds, int, 0); 125 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 126 module_param(tx_queues, int, 0); 127 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); 128 module_param_named(num_grat_arp, num_peer_notif, int, 0644); 129 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " 130 "failover event (alias of num_unsol_na)"); 131 module_param_named(num_unsol_na, num_peer_notif, int, 0644); 132 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " 133 "failover event (alias of num_grat_arp)"); 134 module_param(miimon, int, 0); 135 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 136 module_param(updelay, int, 0); 137 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); 138 module_param(downdelay, int, 0); 139 MODULE_PARM_DESC(downdelay, "Delay before considering link down, " 140 "in milliseconds"); 141 module_param(use_carrier, int, 0); 142 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " 143 "0 for off, 1 for on (default)"); 144 module_param(mode, charp, 0); 145 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " 146 "1 for active-backup, 2 for balance-xor, " 147 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " 148 "6 for balance-alb"); 149 module_param(primary, charp, 0); 150 MODULE_PARM_DESC(primary, "Primary network device to use"); 151 module_param(primary_reselect, charp, 0); 152 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " 153 "once it comes up; " 154 "0 for always (default), " 155 "1 for only if speed of primary is " 156 "better, " 157 "2 for only on active slave " 158 "failure"); 159 module_param(lacp_rate, charp, 0); 160 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " 161 "0 for slow, 1 for fast"); 162 module_param(ad_select, charp, 0); 163 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " 164 "0 for stable (default), 1 for bandwidth, " 165 "2 for count"); 166 module_param(min_links, int, 0); 167 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); 168 169 module_param(xmit_hash_policy, charp, 0); 170 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; " 171 "0 for layer 2 (default), 1 for layer 3+4, " 172 "2 for layer 2+3, 3 for encap layer 2+3, " 173 "4 for encap layer 3+4, 5 for vlan+srcmac"); 174 module_param(arp_interval, int, 0); 175 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 176 module_param_array(arp_ip_target, charp, NULL, 0); 177 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); 178 module_param(arp_validate, charp, 0); 179 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " 180 "0 for none (default), 1 for active, " 181 "2 for backup, 3 for all"); 182 module_param(arp_all_targets, charp, 0); 183 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all"); 184 module_param(fail_over_mac, charp, 0); 185 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " 186 "the same MAC; 0 for none (default), " 187 "1 for active, 2 for follow"); 188 module_param(all_slaves_active, int, 0); 189 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " 190 "by setting active flag for all slaves; " 191 "0 for never (default), 1 for always."); 192 module_param(resend_igmp, int, 0); 193 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " 194 "link failure"); 195 module_param(packets_per_slave, int, 0); 196 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr " 197 "mode; 0 for a random slave, 1 packet per " 198 "slave (default), >1 packets per slave."); 199 module_param(lp_interval, uint, 0); 200 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where " 201 "the bonding driver sends learning packets to " 202 "each slaves peer switch. The default is 1."); 203 204 /*----------------------------- Global variables ----------------------------*/ 205 206 #ifdef CONFIG_NET_POLL_CONTROLLER 207 atomic_t netpoll_block_tx = ATOMIC_INIT(0); 208 #endif 209 210 unsigned int bond_net_id __read_mostly; 211 212 static const struct flow_dissector_key flow_keys_bonding_keys[] = { 213 { 214 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 215 .offset = offsetof(struct flow_keys, control), 216 }, 217 { 218 .key_id = FLOW_DISSECTOR_KEY_BASIC, 219 .offset = offsetof(struct flow_keys, basic), 220 }, 221 { 222 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 223 .offset = offsetof(struct flow_keys, addrs.v4addrs), 224 }, 225 { 226 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 227 .offset = offsetof(struct flow_keys, addrs.v6addrs), 228 }, 229 { 230 .key_id = FLOW_DISSECTOR_KEY_TIPC, 231 .offset = offsetof(struct flow_keys, addrs.tipckey), 232 }, 233 { 234 .key_id = FLOW_DISSECTOR_KEY_PORTS, 235 .offset = offsetof(struct flow_keys, ports), 236 }, 237 { 238 .key_id = FLOW_DISSECTOR_KEY_ICMP, 239 .offset = offsetof(struct flow_keys, icmp), 240 }, 241 { 242 .key_id = FLOW_DISSECTOR_KEY_VLAN, 243 .offset = offsetof(struct flow_keys, vlan), 244 }, 245 { 246 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, 247 .offset = offsetof(struct flow_keys, tags), 248 }, 249 { 250 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 251 .offset = offsetof(struct flow_keys, keyid), 252 }, 253 }; 254 255 static struct flow_dissector flow_keys_bonding __read_mostly; 256 257 /*-------------------------- Forward declarations ---------------------------*/ 258 259 static int bond_init(struct net_device *bond_dev); 260 static void bond_uninit(struct net_device *bond_dev); 261 static void bond_get_stats(struct net_device *bond_dev, 262 struct rtnl_link_stats64 *stats); 263 static void bond_slave_arr_handler(struct work_struct *work); 264 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 265 int mod); 266 static void bond_netdev_notify_work(struct work_struct *work); 267 268 /*---------------------------- General routines -----------------------------*/ 269 270 const char *bond_mode_name(int mode) 271 { 272 static const char *names[] = { 273 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", 274 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)", 275 [BOND_MODE_XOR] = "load balancing (xor)", 276 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)", 277 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation", 278 [BOND_MODE_TLB] = "transmit load balancing", 279 [BOND_MODE_ALB] = "adaptive load balancing", 280 }; 281 282 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB) 283 return "unknown"; 284 285 return names[mode]; 286 } 287 288 /** 289 * bond_dev_queue_xmit - Prepare skb for xmit. 290 * 291 * @bond: bond device that got this skb for tx. 292 * @skb: hw accel VLAN tagged skb to transmit 293 * @slave_dev: slave that is supposed to xmit this skbuff 294 */ 295 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, 296 struct net_device *slave_dev) 297 { 298 skb->dev = slave_dev; 299 300 BUILD_BUG_ON(sizeof(skb->queue_mapping) != 301 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); 302 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); 303 304 if (unlikely(netpoll_tx_running(bond->dev))) 305 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 306 307 return dev_queue_xmit(skb); 308 } 309 310 static bool bond_sk_check(struct bonding *bond) 311 { 312 switch (BOND_MODE(bond)) { 313 case BOND_MODE_8023AD: 314 case BOND_MODE_XOR: 315 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) 316 return true; 317 fallthrough; 318 default: 319 return false; 320 } 321 } 322 323 static bool bond_xdp_check(struct bonding *bond) 324 { 325 switch (BOND_MODE(bond)) { 326 case BOND_MODE_ROUNDROBIN: 327 case BOND_MODE_ACTIVEBACKUP: 328 return true; 329 case BOND_MODE_8023AD: 330 case BOND_MODE_XOR: 331 /* vlan+srcmac is not supported with XDP as in most cases the 802.1q 332 * payload is not in the packet due to hardware offload. 333 */ 334 if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC) 335 return true; 336 fallthrough; 337 default: 338 return false; 339 } 340 } 341 342 /*---------------------------------- VLAN -----------------------------------*/ 343 344 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, 345 * We don't protect the slave list iteration with a lock because: 346 * a. This operation is performed in IOCTL context, 347 * b. The operation is protected by the RTNL semaphore in the 8021q code, 348 * c. Holding a lock with BH disabled while directly calling a base driver 349 * entry point is generally a BAD idea. 350 * 351 * The design of synchronization/protection for this operation in the 8021q 352 * module is good for one or more VLAN devices over a single physical device 353 * and cannot be extended for a teaming solution like bonding, so there is a 354 * potential race condition here where a net device from the vlan group might 355 * be referenced (either by a base driver or the 8021q code) while it is being 356 * removed from the system. However, it turns out we're not making matters 357 * worse, and if it works for regular VLAN usage it will work here too. 358 */ 359 360 /** 361 * bond_vlan_rx_add_vid - Propagates adding an id to slaves 362 * @bond_dev: bonding net device that got called 363 * @proto: network protocol ID 364 * @vid: vlan id being added 365 */ 366 static int bond_vlan_rx_add_vid(struct net_device *bond_dev, 367 __be16 proto, u16 vid) 368 { 369 struct bonding *bond = netdev_priv(bond_dev); 370 struct slave *slave, *rollback_slave; 371 struct list_head *iter; 372 int res; 373 374 bond_for_each_slave(bond, slave, iter) { 375 res = vlan_vid_add(slave->dev, proto, vid); 376 if (res) 377 goto unwind; 378 } 379 380 return 0; 381 382 unwind: 383 /* unwind to the slave that failed */ 384 bond_for_each_slave(bond, rollback_slave, iter) { 385 if (rollback_slave == slave) 386 break; 387 388 vlan_vid_del(rollback_slave->dev, proto, vid); 389 } 390 391 return res; 392 } 393 394 /** 395 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves 396 * @bond_dev: bonding net device that got called 397 * @proto: network protocol ID 398 * @vid: vlan id being removed 399 */ 400 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, 401 __be16 proto, u16 vid) 402 { 403 struct bonding *bond = netdev_priv(bond_dev); 404 struct list_head *iter; 405 struct slave *slave; 406 407 bond_for_each_slave(bond, slave, iter) 408 vlan_vid_del(slave->dev, proto, vid); 409 410 if (bond_is_lb(bond)) 411 bond_alb_clear_vlan(bond, vid); 412 413 return 0; 414 } 415 416 /*---------------------------------- XFRM -----------------------------------*/ 417 418 #ifdef CONFIG_XFRM_OFFLOAD 419 /** 420 * bond_ipsec_add_sa - program device with a security association 421 * @xs: pointer to transformer state struct 422 **/ 423 static int bond_ipsec_add_sa(struct xfrm_state *xs) 424 { 425 struct net_device *bond_dev = xs->xso.dev; 426 struct bond_ipsec *ipsec; 427 struct bonding *bond; 428 struct slave *slave; 429 int err; 430 431 if (!bond_dev) 432 return -EINVAL; 433 434 rcu_read_lock(); 435 bond = netdev_priv(bond_dev); 436 slave = rcu_dereference(bond->curr_active_slave); 437 if (!slave) { 438 rcu_read_unlock(); 439 return -ENODEV; 440 } 441 442 if (!slave->dev->xfrmdev_ops || 443 !slave->dev->xfrmdev_ops->xdo_dev_state_add || 444 netif_is_bond_master(slave->dev)) { 445 slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); 446 rcu_read_unlock(); 447 return -EINVAL; 448 } 449 450 ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); 451 if (!ipsec) { 452 rcu_read_unlock(); 453 return -ENOMEM; 454 } 455 xs->xso.real_dev = slave->dev; 456 457 err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); 458 if (!err) { 459 ipsec->xs = xs; 460 INIT_LIST_HEAD(&ipsec->list); 461 spin_lock_bh(&bond->ipsec_lock); 462 list_add(&ipsec->list, &bond->ipsec_list); 463 spin_unlock_bh(&bond->ipsec_lock); 464 } else { 465 kfree(ipsec); 466 } 467 rcu_read_unlock(); 468 return err; 469 } 470 471 static void bond_ipsec_add_sa_all(struct bonding *bond) 472 { 473 struct net_device *bond_dev = bond->dev; 474 struct bond_ipsec *ipsec; 475 struct slave *slave; 476 477 rcu_read_lock(); 478 slave = rcu_dereference(bond->curr_active_slave); 479 if (!slave) 480 goto out; 481 482 if (!slave->dev->xfrmdev_ops || 483 !slave->dev->xfrmdev_ops->xdo_dev_state_add || 484 netif_is_bond_master(slave->dev)) { 485 spin_lock_bh(&bond->ipsec_lock); 486 if (!list_empty(&bond->ipsec_list)) 487 slave_warn(bond_dev, slave->dev, 488 "%s: no slave xdo_dev_state_add\n", 489 __func__); 490 spin_unlock_bh(&bond->ipsec_lock); 491 goto out; 492 } 493 494 spin_lock_bh(&bond->ipsec_lock); 495 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 496 ipsec->xs->xso.real_dev = slave->dev; 497 if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) { 498 slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); 499 ipsec->xs->xso.real_dev = NULL; 500 } 501 } 502 spin_unlock_bh(&bond->ipsec_lock); 503 out: 504 rcu_read_unlock(); 505 } 506 507 /** 508 * bond_ipsec_del_sa - clear out this specific SA 509 * @xs: pointer to transformer state struct 510 **/ 511 static void bond_ipsec_del_sa(struct xfrm_state *xs) 512 { 513 struct net_device *bond_dev = xs->xso.dev; 514 struct bond_ipsec *ipsec; 515 struct bonding *bond; 516 struct slave *slave; 517 518 if (!bond_dev) 519 return; 520 521 rcu_read_lock(); 522 bond = netdev_priv(bond_dev); 523 slave = rcu_dereference(bond->curr_active_slave); 524 525 if (!slave) 526 goto out; 527 528 if (!xs->xso.real_dev) 529 goto out; 530 531 WARN_ON(xs->xso.real_dev != slave->dev); 532 533 if (!slave->dev->xfrmdev_ops || 534 !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 535 netif_is_bond_master(slave->dev)) { 536 slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); 537 goto out; 538 } 539 540 slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); 541 out: 542 spin_lock_bh(&bond->ipsec_lock); 543 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 544 if (ipsec->xs == xs) { 545 list_del(&ipsec->list); 546 kfree(ipsec); 547 break; 548 } 549 } 550 spin_unlock_bh(&bond->ipsec_lock); 551 rcu_read_unlock(); 552 } 553 554 static void bond_ipsec_del_sa_all(struct bonding *bond) 555 { 556 struct net_device *bond_dev = bond->dev; 557 struct bond_ipsec *ipsec; 558 struct slave *slave; 559 560 rcu_read_lock(); 561 slave = rcu_dereference(bond->curr_active_slave); 562 if (!slave) { 563 rcu_read_unlock(); 564 return; 565 } 566 567 spin_lock_bh(&bond->ipsec_lock); 568 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 569 if (!ipsec->xs->xso.real_dev) 570 continue; 571 572 if (!slave->dev->xfrmdev_ops || 573 !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 574 netif_is_bond_master(slave->dev)) { 575 slave_warn(bond_dev, slave->dev, 576 "%s: no slave xdo_dev_state_delete\n", 577 __func__); 578 } else { 579 slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); 580 } 581 ipsec->xs->xso.real_dev = NULL; 582 } 583 spin_unlock_bh(&bond->ipsec_lock); 584 rcu_read_unlock(); 585 } 586 587 /** 588 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload 589 * @skb: current data packet 590 * @xs: pointer to transformer state struct 591 **/ 592 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 593 { 594 struct net_device *bond_dev = xs->xso.dev; 595 struct net_device *real_dev; 596 struct slave *curr_active; 597 struct bonding *bond; 598 int err; 599 600 bond = netdev_priv(bond_dev); 601 rcu_read_lock(); 602 curr_active = rcu_dereference(bond->curr_active_slave); 603 real_dev = curr_active->dev; 604 605 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 606 err = false; 607 goto out; 608 } 609 610 if (!xs->xso.real_dev) { 611 err = false; 612 goto out; 613 } 614 615 if (!real_dev->xfrmdev_ops || 616 !real_dev->xfrmdev_ops->xdo_dev_offload_ok || 617 netif_is_bond_master(real_dev)) { 618 err = false; 619 goto out; 620 } 621 622 err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); 623 out: 624 rcu_read_unlock(); 625 return err; 626 } 627 628 static const struct xfrmdev_ops bond_xfrmdev_ops = { 629 .xdo_dev_state_add = bond_ipsec_add_sa, 630 .xdo_dev_state_delete = bond_ipsec_del_sa, 631 .xdo_dev_offload_ok = bond_ipsec_offload_ok, 632 }; 633 #endif /* CONFIG_XFRM_OFFLOAD */ 634 635 /*------------------------------- Link status -------------------------------*/ 636 637 /* Set the carrier state for the master according to the state of its 638 * slaves. If any slaves are up, the master is up. In 802.3ad mode, 639 * do special 802.3ad magic. 640 * 641 * Returns zero if carrier state does not change, nonzero if it does. 642 */ 643 int bond_set_carrier(struct bonding *bond) 644 { 645 struct list_head *iter; 646 struct slave *slave; 647 648 if (!bond_has_slaves(bond)) 649 goto down; 650 651 if (BOND_MODE(bond) == BOND_MODE_8023AD) 652 return bond_3ad_set_carrier(bond); 653 654 bond_for_each_slave(bond, slave, iter) { 655 if (slave->link == BOND_LINK_UP) { 656 if (!netif_carrier_ok(bond->dev)) { 657 netif_carrier_on(bond->dev); 658 return 1; 659 } 660 return 0; 661 } 662 } 663 664 down: 665 if (netif_carrier_ok(bond->dev)) { 666 netif_carrier_off(bond->dev); 667 return 1; 668 } 669 return 0; 670 } 671 672 /* Get link speed and duplex from the slave's base driver 673 * using ethtool. If for some reason the call fails or the 674 * values are invalid, set speed and duplex to -1, 675 * and return. Return 1 if speed or duplex settings are 676 * UNKNOWN; 0 otherwise. 677 */ 678 static int bond_update_speed_duplex(struct slave *slave) 679 { 680 struct net_device *slave_dev = slave->dev; 681 struct ethtool_link_ksettings ecmd; 682 int res; 683 684 slave->speed = SPEED_UNKNOWN; 685 slave->duplex = DUPLEX_UNKNOWN; 686 687 res = __ethtool_get_link_ksettings(slave_dev, &ecmd); 688 if (res < 0) 689 return 1; 690 if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) 691 return 1; 692 switch (ecmd.base.duplex) { 693 case DUPLEX_FULL: 694 case DUPLEX_HALF: 695 break; 696 default: 697 return 1; 698 } 699 700 slave->speed = ecmd.base.speed; 701 slave->duplex = ecmd.base.duplex; 702 703 return 0; 704 } 705 706 const char *bond_slave_link_status(s8 link) 707 { 708 switch (link) { 709 case BOND_LINK_UP: 710 return "up"; 711 case BOND_LINK_FAIL: 712 return "going down"; 713 case BOND_LINK_DOWN: 714 return "down"; 715 case BOND_LINK_BACK: 716 return "going back"; 717 default: 718 return "unknown"; 719 } 720 } 721 722 /* if <dev> supports MII link status reporting, check its link status. 723 * 724 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), 725 * depending upon the setting of the use_carrier parameter. 726 * 727 * Return either BMSR_LSTATUS, meaning that the link is up (or we 728 * can't tell and just pretend it is), or 0, meaning that the link is 729 * down. 730 * 731 * If reporting is non-zero, instead of faking link up, return -1 if 732 * both ETHTOOL and MII ioctls fail (meaning the device does not 733 * support them). If use_carrier is set, return whatever it says. 734 * It'd be nice if there was a good way to tell if a driver supports 735 * netif_carrier, but there really isn't. 736 */ 737 static int bond_check_dev_link(struct bonding *bond, 738 struct net_device *slave_dev, int reporting) 739 { 740 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 741 int (*ioctl)(struct net_device *, struct ifreq *, int); 742 struct ifreq ifr; 743 struct mii_ioctl_data *mii; 744 745 if (!reporting && !netif_running(slave_dev)) 746 return 0; 747 748 if (bond->params.use_carrier) 749 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; 750 751 /* Try to get link status using Ethtool first. */ 752 if (slave_dev->ethtool_ops->get_link) 753 return slave_dev->ethtool_ops->get_link(slave_dev) ? 754 BMSR_LSTATUS : 0; 755 756 /* Ethtool can't be used, fallback to MII ioctls. */ 757 ioctl = slave_ops->ndo_eth_ioctl; 758 if (ioctl) { 759 /* TODO: set pointer to correct ioctl on a per team member 760 * bases to make this more efficient. that is, once 761 * we determine the correct ioctl, we will always 762 * call it and not the others for that team 763 * member. 764 */ 765 766 /* We cannot assume that SIOCGMIIPHY will also read a 767 * register; not all network drivers (e.g., e100) 768 * support that. 769 */ 770 771 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ 772 strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ); 773 mii = if_mii(&ifr); 774 if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { 775 mii->reg_num = MII_BMSR; 776 if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0) 777 return mii->val_out & BMSR_LSTATUS; 778 } 779 } 780 781 /* If reporting, report that either there's no ndo_eth_ioctl, 782 * or both SIOCGMIIREG and get_link failed (meaning that we 783 * cannot report link status). If not reporting, pretend 784 * we're ok. 785 */ 786 return reporting ? -1 : BMSR_LSTATUS; 787 } 788 789 /*----------------------------- Multicast list ------------------------------*/ 790 791 /* Push the promiscuity flag down to appropriate slaves */ 792 static int bond_set_promiscuity(struct bonding *bond, int inc) 793 { 794 struct list_head *iter; 795 int err = 0; 796 797 if (bond_uses_primary(bond)) { 798 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); 799 800 if (curr_active) 801 err = dev_set_promiscuity(curr_active->dev, inc); 802 } else { 803 struct slave *slave; 804 805 bond_for_each_slave(bond, slave, iter) { 806 err = dev_set_promiscuity(slave->dev, inc); 807 if (err) 808 return err; 809 } 810 } 811 return err; 812 } 813 814 /* Push the allmulti flag down to all slaves */ 815 static int bond_set_allmulti(struct bonding *bond, int inc) 816 { 817 struct list_head *iter; 818 int err = 0; 819 820 if (bond_uses_primary(bond)) { 821 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); 822 823 if (curr_active) 824 err = dev_set_allmulti(curr_active->dev, inc); 825 } else { 826 struct slave *slave; 827 828 bond_for_each_slave(bond, slave, iter) { 829 err = dev_set_allmulti(slave->dev, inc); 830 if (err) 831 return err; 832 } 833 } 834 return err; 835 } 836 837 /* Retrieve the list of registered multicast addresses for the bonding 838 * device and retransmit an IGMP JOIN request to the current active 839 * slave. 840 */ 841 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) 842 { 843 struct bonding *bond = container_of(work, struct bonding, 844 mcast_work.work); 845 846 if (!rtnl_trylock()) { 847 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 848 return; 849 } 850 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev); 851 852 if (bond->igmp_retrans > 1) { 853 bond->igmp_retrans--; 854 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 855 } 856 rtnl_unlock(); 857 } 858 859 /* Flush bond's hardware addresses from slave */ 860 static void bond_hw_addr_flush(struct net_device *bond_dev, 861 struct net_device *slave_dev) 862 { 863 struct bonding *bond = netdev_priv(bond_dev); 864 865 dev_uc_unsync(slave_dev, bond_dev); 866 dev_mc_unsync(slave_dev, bond_dev); 867 868 if (BOND_MODE(bond) == BOND_MODE_8023AD) 869 dev_mc_del(slave_dev, lacpdu_mcast_addr); 870 } 871 872 /*--------------------------- Active slave change ---------------------------*/ 873 874 /* Update the hardware address list and promisc/allmulti for the new and 875 * old active slaves (if any). Modes that are not using primary keep all 876 * slaves up date at all times; only the modes that use primary need to call 877 * this function to swap these settings during a failover. 878 */ 879 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, 880 struct slave *old_active) 881 { 882 if (old_active) { 883 if (bond->dev->flags & IFF_PROMISC) 884 dev_set_promiscuity(old_active->dev, -1); 885 886 if (bond->dev->flags & IFF_ALLMULTI) 887 dev_set_allmulti(old_active->dev, -1); 888 889 if (bond->dev->flags & IFF_UP) 890 bond_hw_addr_flush(bond->dev, old_active->dev); 891 } 892 893 if (new_active) { 894 /* FIXME: Signal errors upstream. */ 895 if (bond->dev->flags & IFF_PROMISC) 896 dev_set_promiscuity(new_active->dev, 1); 897 898 if (bond->dev->flags & IFF_ALLMULTI) 899 dev_set_allmulti(new_active->dev, 1); 900 901 if (bond->dev->flags & IFF_UP) { 902 netif_addr_lock_bh(bond->dev); 903 dev_uc_sync(new_active->dev, bond->dev); 904 dev_mc_sync(new_active->dev, bond->dev); 905 netif_addr_unlock_bh(bond->dev); 906 } 907 } 908 } 909 910 /** 911 * bond_set_dev_addr - clone slave's address to bond 912 * @bond_dev: bond net device 913 * @slave_dev: slave net device 914 * 915 * Should be called with RTNL held. 916 */ 917 static int bond_set_dev_addr(struct net_device *bond_dev, 918 struct net_device *slave_dev) 919 { 920 int err; 921 922 slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n", 923 bond_dev, slave_dev, slave_dev->addr_len); 924 err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL); 925 if (err) 926 return err; 927 928 __dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len); 929 bond_dev->addr_assign_type = NET_ADDR_STOLEN; 930 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 931 return 0; 932 } 933 934 static struct slave *bond_get_old_active(struct bonding *bond, 935 struct slave *new_active) 936 { 937 struct slave *slave; 938 struct list_head *iter; 939 940 bond_for_each_slave(bond, slave, iter) { 941 if (slave == new_active) 942 continue; 943 944 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) 945 return slave; 946 } 947 948 return NULL; 949 } 950 951 /* bond_do_fail_over_mac 952 * 953 * Perform special MAC address swapping for fail_over_mac settings 954 * 955 * Called with RTNL 956 */ 957 static void bond_do_fail_over_mac(struct bonding *bond, 958 struct slave *new_active, 959 struct slave *old_active) 960 { 961 u8 tmp_mac[MAX_ADDR_LEN]; 962 struct sockaddr_storage ss; 963 int rv; 964 965 switch (bond->params.fail_over_mac) { 966 case BOND_FOM_ACTIVE: 967 if (new_active) { 968 rv = bond_set_dev_addr(bond->dev, new_active->dev); 969 if (rv) 970 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n", 971 -rv); 972 } 973 break; 974 case BOND_FOM_FOLLOW: 975 /* if new_active && old_active, swap them 976 * if just old_active, do nothing (going to no active slave) 977 * if just new_active, set new_active to bond's MAC 978 */ 979 if (!new_active) 980 return; 981 982 if (!old_active) 983 old_active = bond_get_old_active(bond, new_active); 984 985 if (old_active) { 986 bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr, 987 new_active->dev->addr_len); 988 bond_hw_addr_copy(ss.__data, 989 old_active->dev->dev_addr, 990 old_active->dev->addr_len); 991 ss.ss_family = new_active->dev->type; 992 } else { 993 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, 994 bond->dev->addr_len); 995 ss.ss_family = bond->dev->type; 996 } 997 998 rv = dev_set_mac_address(new_active->dev, 999 (struct sockaddr *)&ss, NULL); 1000 if (rv) { 1001 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n", 1002 -rv); 1003 goto out; 1004 } 1005 1006 if (!old_active) 1007 goto out; 1008 1009 bond_hw_addr_copy(ss.__data, tmp_mac, 1010 new_active->dev->addr_len); 1011 ss.ss_family = old_active->dev->type; 1012 1013 rv = dev_set_mac_address(old_active->dev, 1014 (struct sockaddr *)&ss, NULL); 1015 if (rv) 1016 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n", 1017 -rv); 1018 out: 1019 break; 1020 default: 1021 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", 1022 bond->params.fail_over_mac); 1023 break; 1024 } 1025 1026 } 1027 1028 /** 1029 * bond_choose_primary_or_current - select the primary or high priority slave 1030 * @bond: our bonding struct 1031 * 1032 * - Check if there is a primary link. If the primary link was set and is up, 1033 * go on and do link reselection. 1034 * 1035 * - If primary link is not set or down, find the highest priority link. 1036 * If the highest priority link is not current slave, set it as primary 1037 * link and do link reselection. 1038 */ 1039 static struct slave *bond_choose_primary_or_current(struct bonding *bond) 1040 { 1041 struct slave *prim = rtnl_dereference(bond->primary_slave); 1042 struct slave *curr = rtnl_dereference(bond->curr_active_slave); 1043 struct slave *slave, *hprio = NULL; 1044 struct list_head *iter; 1045 1046 if (!prim || prim->link != BOND_LINK_UP) { 1047 bond_for_each_slave(bond, slave, iter) { 1048 if (slave->link == BOND_LINK_UP) { 1049 hprio = hprio ?: slave; 1050 if (slave->prio > hprio->prio) 1051 hprio = slave; 1052 } 1053 } 1054 1055 if (hprio && hprio != curr) { 1056 prim = hprio; 1057 goto link_reselect; 1058 } 1059 1060 if (!curr || curr->link != BOND_LINK_UP) 1061 return NULL; 1062 return curr; 1063 } 1064 1065 if (bond->force_primary) { 1066 bond->force_primary = false; 1067 return prim; 1068 } 1069 1070 link_reselect: 1071 if (!curr || curr->link != BOND_LINK_UP) 1072 return prim; 1073 1074 /* At this point, prim and curr are both up */ 1075 switch (bond->params.primary_reselect) { 1076 case BOND_PRI_RESELECT_ALWAYS: 1077 return prim; 1078 case BOND_PRI_RESELECT_BETTER: 1079 if (prim->speed < curr->speed) 1080 return curr; 1081 if (prim->speed == curr->speed && prim->duplex <= curr->duplex) 1082 return curr; 1083 return prim; 1084 case BOND_PRI_RESELECT_FAILURE: 1085 return curr; 1086 default: 1087 netdev_err(bond->dev, "impossible primary_reselect %d\n", 1088 bond->params.primary_reselect); 1089 return curr; 1090 } 1091 } 1092 1093 /** 1094 * bond_find_best_slave - select the best available slave to be the active one 1095 * @bond: our bonding struct 1096 */ 1097 static struct slave *bond_find_best_slave(struct bonding *bond) 1098 { 1099 struct slave *slave, *bestslave = NULL; 1100 struct list_head *iter; 1101 int mintime = bond->params.updelay; 1102 1103 slave = bond_choose_primary_or_current(bond); 1104 if (slave) 1105 return slave; 1106 1107 bond_for_each_slave(bond, slave, iter) { 1108 if (slave->link == BOND_LINK_UP) 1109 return slave; 1110 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) && 1111 slave->delay < mintime) { 1112 mintime = slave->delay; 1113 bestslave = slave; 1114 } 1115 } 1116 1117 return bestslave; 1118 } 1119 1120 static bool bond_should_notify_peers(struct bonding *bond) 1121 { 1122 struct slave *slave; 1123 1124 rcu_read_lock(); 1125 slave = rcu_dereference(bond->curr_active_slave); 1126 rcu_read_unlock(); 1127 1128 if (!slave || !bond->send_peer_notif || 1129 bond->send_peer_notif % 1130 max(1, bond->params.peer_notif_delay) != 0 || 1131 !netif_carrier_ok(bond->dev) || 1132 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 1133 return false; 1134 1135 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", 1136 slave ? slave->dev->name : "NULL"); 1137 1138 return true; 1139 } 1140 1141 /** 1142 * bond_change_active_slave - change the active slave into the specified one 1143 * @bond: our bonding struct 1144 * @new_active: the new slave to make the active one 1145 * 1146 * Set the new slave to the bond's settings and unset them on the old 1147 * curr_active_slave. 1148 * Setting include flags, mc-list, promiscuity, allmulti, etc. 1149 * 1150 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP, 1151 * because it is apparently the best available slave we have, even though its 1152 * updelay hasn't timed out yet. 1153 * 1154 * Caller must hold RTNL. 1155 */ 1156 void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 1157 { 1158 struct slave *old_active; 1159 1160 ASSERT_RTNL(); 1161 1162 old_active = rtnl_dereference(bond->curr_active_slave); 1163 1164 if (old_active == new_active) 1165 return; 1166 1167 #ifdef CONFIG_XFRM_OFFLOAD 1168 bond_ipsec_del_sa_all(bond); 1169 #endif /* CONFIG_XFRM_OFFLOAD */ 1170 1171 if (new_active) { 1172 new_active->last_link_up = jiffies; 1173 1174 if (new_active->link == BOND_LINK_BACK) { 1175 if (bond_uses_primary(bond)) { 1176 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n", 1177 (bond->params.updelay - new_active->delay) * bond->params.miimon); 1178 } 1179 1180 new_active->delay = 0; 1181 bond_set_slave_link_state(new_active, BOND_LINK_UP, 1182 BOND_SLAVE_NOTIFY_NOW); 1183 1184 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1185 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 1186 1187 if (bond_is_lb(bond)) 1188 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 1189 } else { 1190 if (bond_uses_primary(bond)) 1191 slave_info(bond->dev, new_active->dev, "making interface the new active one\n"); 1192 } 1193 } 1194 1195 if (bond_uses_primary(bond)) 1196 bond_hw_addr_swap(bond, new_active, old_active); 1197 1198 if (bond_is_lb(bond)) { 1199 bond_alb_handle_active_change(bond, new_active); 1200 if (old_active) 1201 bond_set_slave_inactive_flags(old_active, 1202 BOND_SLAVE_NOTIFY_NOW); 1203 if (new_active) 1204 bond_set_slave_active_flags(new_active, 1205 BOND_SLAVE_NOTIFY_NOW); 1206 } else { 1207 rcu_assign_pointer(bond->curr_active_slave, new_active); 1208 } 1209 1210 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { 1211 if (old_active) 1212 bond_set_slave_inactive_flags(old_active, 1213 BOND_SLAVE_NOTIFY_NOW); 1214 1215 if (new_active) { 1216 bool should_notify_peers = false; 1217 1218 bond_set_slave_active_flags(new_active, 1219 BOND_SLAVE_NOTIFY_NOW); 1220 1221 if (bond->params.fail_over_mac) 1222 bond_do_fail_over_mac(bond, new_active, 1223 old_active); 1224 1225 if (netif_running(bond->dev)) { 1226 bond->send_peer_notif = 1227 bond->params.num_peer_notif * 1228 max(1, bond->params.peer_notif_delay); 1229 should_notify_peers = 1230 bond_should_notify_peers(bond); 1231 } 1232 1233 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); 1234 if (should_notify_peers) { 1235 bond->send_peer_notif--; 1236 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 1237 bond->dev); 1238 } 1239 } 1240 } 1241 1242 #ifdef CONFIG_XFRM_OFFLOAD 1243 bond_ipsec_add_sa_all(bond); 1244 #endif /* CONFIG_XFRM_OFFLOAD */ 1245 1246 /* resend IGMP joins since active slave has changed or 1247 * all were sent on curr_active_slave. 1248 * resend only if bond is brought up with the affected 1249 * bonding modes and the retransmission is enabled 1250 */ 1251 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && 1252 ((bond_uses_primary(bond) && new_active) || 1253 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { 1254 bond->igmp_retrans = bond->params.resend_igmp; 1255 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 1256 } 1257 } 1258 1259 /** 1260 * bond_select_active_slave - select a new active slave, if needed 1261 * @bond: our bonding struct 1262 * 1263 * This functions should be called when one of the following occurs: 1264 * - The old curr_active_slave has been released or lost its link. 1265 * - The primary_slave has got its link back. 1266 * - A slave has got its link back and there's no old curr_active_slave. 1267 * 1268 * Caller must hold RTNL. 1269 */ 1270 void bond_select_active_slave(struct bonding *bond) 1271 { 1272 struct slave *best_slave; 1273 int rv; 1274 1275 ASSERT_RTNL(); 1276 1277 best_slave = bond_find_best_slave(bond); 1278 if (best_slave != rtnl_dereference(bond->curr_active_slave)) { 1279 bond_change_active_slave(bond, best_slave); 1280 rv = bond_set_carrier(bond); 1281 if (!rv) 1282 return; 1283 1284 if (netif_carrier_ok(bond->dev)) 1285 netdev_info(bond->dev, "active interface up!\n"); 1286 else 1287 netdev_info(bond->dev, "now running without any active interface!\n"); 1288 } 1289 } 1290 1291 #ifdef CONFIG_NET_POLL_CONTROLLER 1292 static inline int slave_enable_netpoll(struct slave *slave) 1293 { 1294 struct netpoll *np; 1295 int err = 0; 1296 1297 np = kzalloc(sizeof(*np), GFP_KERNEL); 1298 err = -ENOMEM; 1299 if (!np) 1300 goto out; 1301 1302 err = __netpoll_setup(np, slave->dev); 1303 if (err) { 1304 kfree(np); 1305 goto out; 1306 } 1307 slave->np = np; 1308 out: 1309 return err; 1310 } 1311 static inline void slave_disable_netpoll(struct slave *slave) 1312 { 1313 struct netpoll *np = slave->np; 1314 1315 if (!np) 1316 return; 1317 1318 slave->np = NULL; 1319 1320 __netpoll_free(np); 1321 } 1322 1323 static void bond_poll_controller(struct net_device *bond_dev) 1324 { 1325 struct bonding *bond = netdev_priv(bond_dev); 1326 struct slave *slave = NULL; 1327 struct list_head *iter; 1328 struct ad_info ad_info; 1329 1330 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1331 if (bond_3ad_get_active_agg_info(bond, &ad_info)) 1332 return; 1333 1334 bond_for_each_slave_rcu(bond, slave, iter) { 1335 if (!bond_slave_is_up(slave)) 1336 continue; 1337 1338 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 1339 struct aggregator *agg = 1340 SLAVE_AD_INFO(slave)->port.aggregator; 1341 1342 if (agg && 1343 agg->aggregator_identifier != ad_info.aggregator_id) 1344 continue; 1345 } 1346 1347 netpoll_poll_dev(slave->dev); 1348 } 1349 } 1350 1351 static void bond_netpoll_cleanup(struct net_device *bond_dev) 1352 { 1353 struct bonding *bond = netdev_priv(bond_dev); 1354 struct list_head *iter; 1355 struct slave *slave; 1356 1357 bond_for_each_slave(bond, slave, iter) 1358 if (bond_slave_is_up(slave)) 1359 slave_disable_netpoll(slave); 1360 } 1361 1362 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 1363 { 1364 struct bonding *bond = netdev_priv(dev); 1365 struct list_head *iter; 1366 struct slave *slave; 1367 int err = 0; 1368 1369 bond_for_each_slave(bond, slave, iter) { 1370 err = slave_enable_netpoll(slave); 1371 if (err) { 1372 bond_netpoll_cleanup(dev); 1373 break; 1374 } 1375 } 1376 return err; 1377 } 1378 #else 1379 static inline int slave_enable_netpoll(struct slave *slave) 1380 { 1381 return 0; 1382 } 1383 static inline void slave_disable_netpoll(struct slave *slave) 1384 { 1385 } 1386 static void bond_netpoll_cleanup(struct net_device *bond_dev) 1387 { 1388 } 1389 #endif 1390 1391 /*---------------------------------- IOCTL ----------------------------------*/ 1392 1393 static netdev_features_t bond_fix_features(struct net_device *dev, 1394 netdev_features_t features) 1395 { 1396 struct bonding *bond = netdev_priv(dev); 1397 struct list_head *iter; 1398 netdev_features_t mask; 1399 struct slave *slave; 1400 1401 mask = features; 1402 1403 features &= ~NETIF_F_ONE_FOR_ALL; 1404 features |= NETIF_F_ALL_FOR_ALL; 1405 1406 bond_for_each_slave(bond, slave, iter) { 1407 features = netdev_increment_features(features, 1408 slave->dev->features, 1409 mask); 1410 } 1411 features = netdev_add_tso_features(features, mask); 1412 1413 return features; 1414 } 1415 1416 #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1417 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ 1418 NETIF_F_HIGHDMA | NETIF_F_LRO) 1419 1420 #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1421 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) 1422 1423 #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 1424 NETIF_F_GSO_SOFTWARE) 1425 1426 1427 static void bond_compute_features(struct bonding *bond) 1428 { 1429 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | 1430 IFF_XMIT_DST_RELEASE_PERM; 1431 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1432 netdev_features_t enc_features = BOND_ENC_FEATURES; 1433 #ifdef CONFIG_XFRM_OFFLOAD 1434 netdev_features_t xfrm_features = BOND_XFRM_FEATURES; 1435 #endif /* CONFIG_XFRM_OFFLOAD */ 1436 netdev_features_t mpls_features = BOND_MPLS_FEATURES; 1437 struct net_device *bond_dev = bond->dev; 1438 struct list_head *iter; 1439 struct slave *slave; 1440 unsigned short max_hard_header_len = ETH_HLEN; 1441 unsigned int tso_max_size = TSO_MAX_SIZE; 1442 u16 tso_max_segs = TSO_MAX_SEGS; 1443 1444 if (!bond_has_slaves(bond)) 1445 goto done; 1446 vlan_features &= NETIF_F_ALL_FOR_ALL; 1447 mpls_features &= NETIF_F_ALL_FOR_ALL; 1448 1449 bond_for_each_slave(bond, slave, iter) { 1450 vlan_features = netdev_increment_features(vlan_features, 1451 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1452 1453 enc_features = netdev_increment_features(enc_features, 1454 slave->dev->hw_enc_features, 1455 BOND_ENC_FEATURES); 1456 1457 #ifdef CONFIG_XFRM_OFFLOAD 1458 xfrm_features = netdev_increment_features(xfrm_features, 1459 slave->dev->hw_enc_features, 1460 BOND_XFRM_FEATURES); 1461 #endif /* CONFIG_XFRM_OFFLOAD */ 1462 1463 mpls_features = netdev_increment_features(mpls_features, 1464 slave->dev->mpls_features, 1465 BOND_MPLS_FEATURES); 1466 1467 dst_release_flag &= slave->dev->priv_flags; 1468 if (slave->dev->hard_header_len > max_hard_header_len) 1469 max_hard_header_len = slave->dev->hard_header_len; 1470 1471 tso_max_size = min(tso_max_size, slave->dev->tso_max_size); 1472 tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs); 1473 } 1474 bond_dev->hard_header_len = max_hard_header_len; 1475 1476 done: 1477 bond_dev->vlan_features = vlan_features; 1478 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1479 NETIF_F_HW_VLAN_CTAG_TX | 1480 NETIF_F_HW_VLAN_STAG_TX; 1481 #ifdef CONFIG_XFRM_OFFLOAD 1482 bond_dev->hw_enc_features |= xfrm_features; 1483 #endif /* CONFIG_XFRM_OFFLOAD */ 1484 bond_dev->mpls_features = mpls_features; 1485 netif_set_tso_max_segs(bond_dev, tso_max_segs); 1486 netif_set_tso_max_size(bond_dev, tso_max_size); 1487 1488 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1489 if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && 1490 dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) 1491 bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1492 1493 netdev_change_features(bond_dev); 1494 } 1495 1496 static void bond_setup_by_slave(struct net_device *bond_dev, 1497 struct net_device *slave_dev) 1498 { 1499 bond_dev->header_ops = slave_dev->header_ops; 1500 1501 bond_dev->type = slave_dev->type; 1502 bond_dev->hard_header_len = slave_dev->hard_header_len; 1503 bond_dev->needed_headroom = slave_dev->needed_headroom; 1504 bond_dev->addr_len = slave_dev->addr_len; 1505 1506 memcpy(bond_dev->broadcast, slave_dev->broadcast, 1507 slave_dev->addr_len); 1508 } 1509 1510 /* On bonding slaves other than the currently active slave, suppress 1511 * duplicates except for alb non-mcast/bcast. 1512 */ 1513 static bool bond_should_deliver_exact_match(struct sk_buff *skb, 1514 struct slave *slave, 1515 struct bonding *bond) 1516 { 1517 if (bond_is_slave_inactive(slave)) { 1518 if (BOND_MODE(bond) == BOND_MODE_ALB && 1519 skb->pkt_type != PACKET_BROADCAST && 1520 skb->pkt_type != PACKET_MULTICAST) 1521 return false; 1522 return true; 1523 } 1524 return false; 1525 } 1526 1527 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) 1528 { 1529 struct sk_buff *skb = *pskb; 1530 struct slave *slave; 1531 struct bonding *bond; 1532 int (*recv_probe)(const struct sk_buff *, struct bonding *, 1533 struct slave *); 1534 int ret = RX_HANDLER_ANOTHER; 1535 1536 skb = skb_share_check(skb, GFP_ATOMIC); 1537 if (unlikely(!skb)) 1538 return RX_HANDLER_CONSUMED; 1539 1540 *pskb = skb; 1541 1542 slave = bond_slave_get_rcu(skb->dev); 1543 bond = slave->bond; 1544 1545 recv_probe = READ_ONCE(bond->recv_probe); 1546 if (recv_probe) { 1547 ret = recv_probe(skb, bond, slave); 1548 if (ret == RX_HANDLER_CONSUMED) { 1549 consume_skb(skb); 1550 return ret; 1551 } 1552 } 1553 1554 /* 1555 * For packets determined by bond_should_deliver_exact_match() call to 1556 * be suppressed we want to make an exception for link-local packets. 1557 * This is necessary for e.g. LLDP daemons to be able to monitor 1558 * inactive slave links without being forced to bind to them 1559 * explicitly. 1560 * 1561 * At the same time, packets that are passed to the bonding master 1562 * (including link-local ones) can have their originating interface 1563 * determined via PACKET_ORIGDEV socket option. 1564 */ 1565 if (bond_should_deliver_exact_match(skb, slave, bond)) { 1566 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) 1567 return RX_HANDLER_PASS; 1568 return RX_HANDLER_EXACT; 1569 } 1570 1571 skb->dev = bond->dev; 1572 1573 if (BOND_MODE(bond) == BOND_MODE_ALB && 1574 netif_is_bridge_port(bond->dev) && 1575 skb->pkt_type == PACKET_HOST) { 1576 1577 if (unlikely(skb_cow_head(skb, 1578 skb->data - skb_mac_header(skb)))) { 1579 kfree_skb(skb); 1580 return RX_HANDLER_CONSUMED; 1581 } 1582 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, 1583 bond->dev->addr_len); 1584 } 1585 1586 return ret; 1587 } 1588 1589 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) 1590 { 1591 switch (BOND_MODE(bond)) { 1592 case BOND_MODE_ROUNDROBIN: 1593 return NETDEV_LAG_TX_TYPE_ROUNDROBIN; 1594 case BOND_MODE_ACTIVEBACKUP: 1595 return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; 1596 case BOND_MODE_BROADCAST: 1597 return NETDEV_LAG_TX_TYPE_BROADCAST; 1598 case BOND_MODE_XOR: 1599 case BOND_MODE_8023AD: 1600 return NETDEV_LAG_TX_TYPE_HASH; 1601 default: 1602 return NETDEV_LAG_TX_TYPE_UNKNOWN; 1603 } 1604 } 1605 1606 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, 1607 enum netdev_lag_tx_type type) 1608 { 1609 if (type != NETDEV_LAG_TX_TYPE_HASH) 1610 return NETDEV_LAG_HASH_NONE; 1611 1612 switch (bond->params.xmit_policy) { 1613 case BOND_XMIT_POLICY_LAYER2: 1614 return NETDEV_LAG_HASH_L2; 1615 case BOND_XMIT_POLICY_LAYER34: 1616 return NETDEV_LAG_HASH_L34; 1617 case BOND_XMIT_POLICY_LAYER23: 1618 return NETDEV_LAG_HASH_L23; 1619 case BOND_XMIT_POLICY_ENCAP23: 1620 return NETDEV_LAG_HASH_E23; 1621 case BOND_XMIT_POLICY_ENCAP34: 1622 return NETDEV_LAG_HASH_E34; 1623 case BOND_XMIT_POLICY_VLAN_SRCMAC: 1624 return NETDEV_LAG_HASH_VLAN_SRCMAC; 1625 default: 1626 return NETDEV_LAG_HASH_UNKNOWN; 1627 } 1628 } 1629 1630 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, 1631 struct netlink_ext_ack *extack) 1632 { 1633 struct netdev_lag_upper_info lag_upper_info; 1634 enum netdev_lag_tx_type type; 1635 int err; 1636 1637 type = bond_lag_tx_type(bond); 1638 lag_upper_info.tx_type = type; 1639 lag_upper_info.hash_type = bond_lag_hash_type(bond, type); 1640 1641 err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave, 1642 &lag_upper_info, extack); 1643 if (err) 1644 return err; 1645 1646 slave->dev->flags |= IFF_SLAVE; 1647 return 0; 1648 } 1649 1650 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) 1651 { 1652 netdev_upper_dev_unlink(slave->dev, bond->dev); 1653 slave->dev->flags &= ~IFF_SLAVE; 1654 } 1655 1656 static void slave_kobj_release(struct kobject *kobj) 1657 { 1658 struct slave *slave = to_slave(kobj); 1659 struct bonding *bond = bond_get_bond_by_slave(slave); 1660 1661 cancel_delayed_work_sync(&slave->notify_work); 1662 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1663 kfree(SLAVE_AD_INFO(slave)); 1664 1665 kfree(slave); 1666 } 1667 1668 static struct kobj_type slave_ktype = { 1669 .release = slave_kobj_release, 1670 #ifdef CONFIG_SYSFS 1671 .sysfs_ops = &slave_sysfs_ops, 1672 #endif 1673 }; 1674 1675 static int bond_kobj_init(struct slave *slave) 1676 { 1677 int err; 1678 1679 err = kobject_init_and_add(&slave->kobj, &slave_ktype, 1680 &(slave->dev->dev.kobj), "bonding_slave"); 1681 if (err) 1682 kobject_put(&slave->kobj); 1683 1684 return err; 1685 } 1686 1687 static struct slave *bond_alloc_slave(struct bonding *bond, 1688 struct net_device *slave_dev) 1689 { 1690 struct slave *slave = NULL; 1691 1692 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 1693 if (!slave) 1694 return NULL; 1695 1696 slave->bond = bond; 1697 slave->dev = slave_dev; 1698 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); 1699 1700 if (bond_kobj_init(slave)) 1701 return NULL; 1702 1703 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 1704 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), 1705 GFP_KERNEL); 1706 if (!SLAVE_AD_INFO(slave)) { 1707 kobject_put(&slave->kobj); 1708 return NULL; 1709 } 1710 } 1711 1712 return slave; 1713 } 1714 1715 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) 1716 { 1717 info->bond_mode = BOND_MODE(bond); 1718 info->miimon = bond->params.miimon; 1719 info->num_slaves = bond->slave_cnt; 1720 } 1721 1722 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) 1723 { 1724 strcpy(info->slave_name, slave->dev->name); 1725 info->link = slave->link; 1726 info->state = bond_slave_state(slave); 1727 info->link_failure_count = slave->link_failure_count; 1728 } 1729 1730 static void bond_netdev_notify_work(struct work_struct *_work) 1731 { 1732 struct slave *slave = container_of(_work, struct slave, 1733 notify_work.work); 1734 1735 if (rtnl_trylock()) { 1736 struct netdev_bonding_info binfo; 1737 1738 bond_fill_ifslave(slave, &binfo.slave); 1739 bond_fill_ifbond(slave->bond, &binfo.master); 1740 netdev_bonding_info_change(slave->dev, &binfo); 1741 rtnl_unlock(); 1742 } else { 1743 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); 1744 } 1745 } 1746 1747 void bond_queue_slave_event(struct slave *slave) 1748 { 1749 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); 1750 } 1751 1752 void bond_lower_state_changed(struct slave *slave) 1753 { 1754 struct netdev_lag_lower_state_info info; 1755 1756 info.link_up = slave->link == BOND_LINK_UP || 1757 slave->link == BOND_LINK_FAIL; 1758 info.tx_enabled = bond_is_active_slave(slave); 1759 netdev_lower_state_changed(slave->dev, &info); 1760 } 1761 1762 #define BOND_NL_ERR(bond_dev, extack, errmsg) do { \ 1763 if (extack) \ 1764 NL_SET_ERR_MSG(extack, errmsg); \ 1765 else \ 1766 netdev_err(bond_dev, "Error: %s\n", errmsg); \ 1767 } while (0) 1768 1769 #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \ 1770 if (extack) \ 1771 NL_SET_ERR_MSG(extack, errmsg); \ 1772 else \ 1773 slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \ 1774 } while (0) 1775 1776 /* enslave device <slave> to bond device <master> */ 1777 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, 1778 struct netlink_ext_ack *extack) 1779 { 1780 struct bonding *bond = netdev_priv(bond_dev); 1781 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 1782 struct slave *new_slave = NULL, *prev_slave; 1783 struct sockaddr_storage ss; 1784 int link_reporting; 1785 int res = 0, i; 1786 1787 if (slave_dev->flags & IFF_MASTER && 1788 !netif_is_bond_master(slave_dev)) { 1789 BOND_NL_ERR(bond_dev, extack, 1790 "Device type (master device) cannot be enslaved"); 1791 return -EPERM; 1792 } 1793 1794 if (!bond->params.use_carrier && 1795 slave_dev->ethtool_ops->get_link == NULL && 1796 slave_ops->ndo_eth_ioctl == NULL) { 1797 slave_warn(bond_dev, slave_dev, "no link monitoring support\n"); 1798 } 1799 1800 /* already in-use? */ 1801 if (netdev_is_rx_handler_busy(slave_dev)) { 1802 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1803 "Device is in use and cannot be enslaved"); 1804 return -EBUSY; 1805 } 1806 1807 if (bond_dev == slave_dev) { 1808 BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself."); 1809 return -EPERM; 1810 } 1811 1812 /* vlan challenged mutual exclusion */ 1813 /* no need to lock since we're protected by rtnl_lock */ 1814 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { 1815 slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n"); 1816 if (vlan_uses_dev(bond_dev)) { 1817 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1818 "Can not enslave VLAN challenged device to VLAN enabled bond"); 1819 return -EPERM; 1820 } else { 1821 slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n"); 1822 } 1823 } else { 1824 slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n"); 1825 } 1826 1827 if (slave_dev->features & NETIF_F_HW_ESP) 1828 slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n"); 1829 1830 /* Old ifenslave binaries are no longer supported. These can 1831 * be identified with moderate accuracy by the state of the slave: 1832 * the current ifenslave will set the interface down prior to 1833 * enslaving it; the old ifenslave will not. 1834 */ 1835 if (slave_dev->flags & IFF_UP) { 1836 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1837 "Device can not be enslaved while up"); 1838 return -EPERM; 1839 } 1840 1841 /* set bonding device ether type by slave - bonding netdevices are 1842 * created with ether_setup, so when the slave type is not ARPHRD_ETHER 1843 * there is a need to override some of the type dependent attribs/funcs. 1844 * 1845 * bond ether type mutual exclusion - don't allow slaves of dissimilar 1846 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond 1847 */ 1848 if (!bond_has_slaves(bond)) { 1849 if (bond_dev->type != slave_dev->type) { 1850 slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n", 1851 bond_dev->type, slave_dev->type); 1852 1853 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, 1854 bond_dev); 1855 res = notifier_to_errno(res); 1856 if (res) { 1857 slave_err(bond_dev, slave_dev, "refused to change device type\n"); 1858 return -EBUSY; 1859 } 1860 1861 /* Flush unicast and multicast addresses */ 1862 dev_uc_flush(bond_dev); 1863 dev_mc_flush(bond_dev); 1864 1865 if (slave_dev->type != ARPHRD_ETHER) 1866 bond_setup_by_slave(bond_dev, slave_dev); 1867 else { 1868 ether_setup(bond_dev); 1869 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1870 } 1871 1872 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, 1873 bond_dev); 1874 } 1875 } else if (bond_dev->type != slave_dev->type) { 1876 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1877 "Device type is different from other slaves"); 1878 return -EINVAL; 1879 } 1880 1881 if (slave_dev->type == ARPHRD_INFINIBAND && 1882 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 1883 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1884 "Only active-backup mode is supported for infiniband slaves"); 1885 res = -EOPNOTSUPP; 1886 goto err_undo_flags; 1887 } 1888 1889 if (!slave_ops->ndo_set_mac_address || 1890 slave_dev->type == ARPHRD_INFINIBAND) { 1891 slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n"); 1892 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && 1893 bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1894 if (!bond_has_slaves(bond)) { 1895 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1896 slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n"); 1897 } else { 1898 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 1899 "Slave device does not support setting the MAC address, but fail_over_mac is not set to active"); 1900 res = -EOPNOTSUPP; 1901 goto err_undo_flags; 1902 } 1903 } 1904 } 1905 1906 call_netdevice_notifiers(NETDEV_JOIN, slave_dev); 1907 1908 /* If this is the first slave, then we need to set the master's hardware 1909 * address to be the same as the slave's. 1910 */ 1911 if (!bond_has_slaves(bond) && 1912 bond->dev->addr_assign_type == NET_ADDR_RANDOM) { 1913 res = bond_set_dev_addr(bond->dev, slave_dev); 1914 if (res) 1915 goto err_undo_flags; 1916 } 1917 1918 new_slave = bond_alloc_slave(bond, slave_dev); 1919 if (!new_slave) { 1920 res = -ENOMEM; 1921 goto err_undo_flags; 1922 } 1923 1924 /* Set the new_slave's queue_id to be zero. Queue ID mapping 1925 * is set via sysfs or module option if desired. 1926 */ 1927 new_slave->queue_id = 0; 1928 1929 /* Save slave's original mtu and then set it to match the bond */ 1930 new_slave->original_mtu = slave_dev->mtu; 1931 res = dev_set_mtu(slave_dev, bond->dev->mtu); 1932 if (res) { 1933 slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res); 1934 goto err_free; 1935 } 1936 1937 /* Save slave's original ("permanent") mac address for modes 1938 * that need it, and for restoring it upon release, and then 1939 * set it to the master's address 1940 */ 1941 bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr, 1942 slave_dev->addr_len); 1943 1944 if (!bond->params.fail_over_mac || 1945 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 1946 /* Set slave to master's mac address. The application already 1947 * set the master's mac address to that of the first slave 1948 */ 1949 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); 1950 ss.ss_family = slave_dev->type; 1951 res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, 1952 extack); 1953 if (res) { 1954 slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res); 1955 goto err_restore_mtu; 1956 } 1957 } 1958 1959 /* set no_addrconf flag before open to prevent IPv6 addrconf */ 1960 slave_dev->priv_flags |= IFF_NO_ADDRCONF; 1961 1962 /* open the slave since the application closed it */ 1963 res = dev_open(slave_dev, extack); 1964 if (res) { 1965 slave_err(bond_dev, slave_dev, "Opening slave failed\n"); 1966 goto err_restore_mac; 1967 } 1968 1969 slave_dev->priv_flags |= IFF_BONDING; 1970 /* initialize slave stats */ 1971 dev_get_stats(new_slave->dev, &new_slave->slave_stats); 1972 1973 if (bond_is_lb(bond)) { 1974 /* bond_alb_init_slave() must be called before all other stages since 1975 * it might fail and we do not want to have to undo everything 1976 */ 1977 res = bond_alb_init_slave(bond, new_slave); 1978 if (res) 1979 goto err_close; 1980 } 1981 1982 res = vlan_vids_add_by_dev(slave_dev, bond_dev); 1983 if (res) { 1984 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n"); 1985 goto err_close; 1986 } 1987 1988 prev_slave = bond_last_slave(bond); 1989 1990 new_slave->delay = 0; 1991 new_slave->link_failure_count = 0; 1992 1993 if (bond_update_speed_duplex(new_slave) && 1994 bond_needs_speed_duplex(bond)) 1995 new_slave->link = BOND_LINK_DOWN; 1996 1997 new_slave->last_rx = jiffies - 1998 (msecs_to_jiffies(bond->params.arp_interval) + 1); 1999 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) 2000 new_slave->target_last_arp_rx[i] = new_slave->last_rx; 2001 2002 new_slave->last_tx = new_slave->last_rx; 2003 2004 if (bond->params.miimon && !bond->params.use_carrier) { 2005 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 2006 2007 if ((link_reporting == -1) && !bond->params.arp_interval) { 2008 /* miimon is set but a bonded network driver 2009 * does not support ETHTOOL/MII and 2010 * arp_interval is not set. Note: if 2011 * use_carrier is enabled, we will never go 2012 * here (because netif_carrier is always 2013 * supported); thus, we don't need to change 2014 * the messages for netif_carrier. 2015 */ 2016 slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n"); 2017 } else if (link_reporting == -1) { 2018 /* unable get link status using mii/ethtool */ 2019 slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n"); 2020 } 2021 } 2022 2023 /* check for initial state */ 2024 new_slave->link = BOND_LINK_NOCHANGE; 2025 if (bond->params.miimon) { 2026 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 2027 if (bond->params.updelay) { 2028 bond_set_slave_link_state(new_slave, 2029 BOND_LINK_BACK, 2030 BOND_SLAVE_NOTIFY_NOW); 2031 new_slave->delay = bond->params.updelay; 2032 } else { 2033 bond_set_slave_link_state(new_slave, 2034 BOND_LINK_UP, 2035 BOND_SLAVE_NOTIFY_NOW); 2036 } 2037 } else { 2038 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN, 2039 BOND_SLAVE_NOTIFY_NOW); 2040 } 2041 } else if (bond->params.arp_interval) { 2042 bond_set_slave_link_state(new_slave, 2043 (netif_carrier_ok(slave_dev) ? 2044 BOND_LINK_UP : BOND_LINK_DOWN), 2045 BOND_SLAVE_NOTIFY_NOW); 2046 } else { 2047 bond_set_slave_link_state(new_slave, BOND_LINK_UP, 2048 BOND_SLAVE_NOTIFY_NOW); 2049 } 2050 2051 if (new_slave->link != BOND_LINK_DOWN) 2052 new_slave->last_link_up = jiffies; 2053 slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n", 2054 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 2055 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 2056 2057 if (bond_uses_primary(bond) && bond->params.primary[0]) { 2058 /* if there is a primary slave, remember it */ 2059 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { 2060 rcu_assign_pointer(bond->primary_slave, new_slave); 2061 bond->force_primary = true; 2062 } 2063 } 2064 2065 switch (BOND_MODE(bond)) { 2066 case BOND_MODE_ACTIVEBACKUP: 2067 bond_set_slave_inactive_flags(new_slave, 2068 BOND_SLAVE_NOTIFY_NOW); 2069 break; 2070 case BOND_MODE_8023AD: 2071 /* in 802.3ad mode, the internal mechanism 2072 * will activate the slaves in the selected 2073 * aggregator 2074 */ 2075 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 2076 /* if this is the first slave */ 2077 if (!prev_slave) { 2078 SLAVE_AD_INFO(new_slave)->id = 1; 2079 /* Initialize AD with the number of times that the AD timer is called in 1 second 2080 * can be called only after the mac address of the bond is set 2081 */ 2082 bond_3ad_initialize(bond); 2083 } else { 2084 SLAVE_AD_INFO(new_slave)->id = 2085 SLAVE_AD_INFO(prev_slave)->id + 1; 2086 } 2087 2088 bond_3ad_bind_slave(new_slave); 2089 break; 2090 case BOND_MODE_TLB: 2091 case BOND_MODE_ALB: 2092 bond_set_active_slave(new_slave); 2093 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 2094 break; 2095 default: 2096 slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n"); 2097 2098 /* always active in trunk mode */ 2099 bond_set_active_slave(new_slave); 2100 2101 /* In trunking mode there is little meaning to curr_active_slave 2102 * anyway (it holds no special properties of the bond device), 2103 * so we can change it without calling change_active_interface() 2104 */ 2105 if (!rcu_access_pointer(bond->curr_active_slave) && 2106 new_slave->link == BOND_LINK_UP) 2107 rcu_assign_pointer(bond->curr_active_slave, new_slave); 2108 2109 break; 2110 } /* switch(bond_mode) */ 2111 2112 #ifdef CONFIG_NET_POLL_CONTROLLER 2113 if (bond->dev->npinfo) { 2114 if (slave_enable_netpoll(new_slave)) { 2115 slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); 2116 res = -EBUSY; 2117 goto err_detach; 2118 } 2119 } 2120 #endif 2121 2122 if (!(bond_dev->features & NETIF_F_LRO)) 2123 dev_disable_lro(slave_dev); 2124 2125 res = netdev_rx_handler_register(slave_dev, bond_handle_frame, 2126 new_slave); 2127 if (res) { 2128 slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res); 2129 goto err_detach; 2130 } 2131 2132 res = bond_master_upper_dev_link(bond, new_slave, extack); 2133 if (res) { 2134 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res); 2135 goto err_unregister; 2136 } 2137 2138 bond_lower_state_changed(new_slave); 2139 2140 res = bond_sysfs_slave_add(new_slave); 2141 if (res) { 2142 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res); 2143 goto err_upper_unlink; 2144 } 2145 2146 /* If the mode uses primary, then the following is handled by 2147 * bond_change_active_slave(). 2148 */ 2149 if (!bond_uses_primary(bond)) { 2150 /* set promiscuity level to new slave */ 2151 if (bond_dev->flags & IFF_PROMISC) { 2152 res = dev_set_promiscuity(slave_dev, 1); 2153 if (res) 2154 goto err_sysfs_del; 2155 } 2156 2157 /* set allmulti level to new slave */ 2158 if (bond_dev->flags & IFF_ALLMULTI) { 2159 res = dev_set_allmulti(slave_dev, 1); 2160 if (res) { 2161 if (bond_dev->flags & IFF_PROMISC) 2162 dev_set_promiscuity(slave_dev, -1); 2163 goto err_sysfs_del; 2164 } 2165 } 2166 2167 if (bond_dev->flags & IFF_UP) { 2168 netif_addr_lock_bh(bond_dev); 2169 dev_mc_sync_multiple(slave_dev, bond_dev); 2170 dev_uc_sync_multiple(slave_dev, bond_dev); 2171 netif_addr_unlock_bh(bond_dev); 2172 2173 if (BOND_MODE(bond) == BOND_MODE_8023AD) 2174 dev_mc_add(slave_dev, lacpdu_mcast_addr); 2175 } 2176 } 2177 2178 bond->slave_cnt++; 2179 bond_compute_features(bond); 2180 bond_set_carrier(bond); 2181 2182 if (bond_uses_primary(bond)) { 2183 block_netpoll_tx(); 2184 bond_select_active_slave(bond); 2185 unblock_netpoll_tx(); 2186 } 2187 2188 if (bond_mode_can_use_xmit_hash(bond)) 2189 bond_update_slave_arr(bond, NULL); 2190 2191 2192 if (!slave_dev->netdev_ops->ndo_bpf || 2193 !slave_dev->netdev_ops->ndo_xdp_xmit) { 2194 if (bond->xdp_prog) { 2195 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 2196 "Slave does not support XDP"); 2197 res = -EOPNOTSUPP; 2198 goto err_sysfs_del; 2199 } 2200 } else if (bond->xdp_prog) { 2201 struct netdev_bpf xdp = { 2202 .command = XDP_SETUP_PROG, 2203 .flags = 0, 2204 .prog = bond->xdp_prog, 2205 .extack = extack, 2206 }; 2207 2208 if (dev_xdp_prog_count(slave_dev) > 0) { 2209 SLAVE_NL_ERR(bond_dev, slave_dev, extack, 2210 "Slave has XDP program loaded, please unload before enslaving"); 2211 res = -EOPNOTSUPP; 2212 goto err_sysfs_del; 2213 } 2214 2215 res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 2216 if (res < 0) { 2217 /* ndo_bpf() sets extack error message */ 2218 slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res); 2219 goto err_sysfs_del; 2220 } 2221 if (bond->xdp_prog) 2222 bpf_prog_inc(bond->xdp_prog); 2223 } 2224 2225 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", 2226 bond_is_active_slave(new_slave) ? "an active" : "a backup", 2227 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); 2228 2229 /* enslave is successful */ 2230 bond_queue_slave_event(new_slave); 2231 return 0; 2232 2233 /* Undo stages on error */ 2234 err_sysfs_del: 2235 bond_sysfs_slave_del(new_slave); 2236 2237 err_upper_unlink: 2238 bond_upper_dev_unlink(bond, new_slave); 2239 2240 err_unregister: 2241 netdev_rx_handler_unregister(slave_dev); 2242 2243 err_detach: 2244 vlan_vids_del_by_dev(slave_dev, bond_dev); 2245 if (rcu_access_pointer(bond->primary_slave) == new_slave) 2246 RCU_INIT_POINTER(bond->primary_slave, NULL); 2247 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { 2248 block_netpoll_tx(); 2249 bond_change_active_slave(bond, NULL); 2250 bond_select_active_slave(bond); 2251 unblock_netpoll_tx(); 2252 } 2253 /* either primary_slave or curr_active_slave might've changed */ 2254 synchronize_rcu(); 2255 slave_disable_netpoll(new_slave); 2256 2257 err_close: 2258 if (!netif_is_bond_master(slave_dev)) 2259 slave_dev->priv_flags &= ~IFF_BONDING; 2260 dev_close(slave_dev); 2261 2262 err_restore_mac: 2263 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF; 2264 if (!bond->params.fail_over_mac || 2265 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2266 /* XXX TODO - fom follow mode needs to change master's 2267 * MAC if this slave's MAC is in use by the bond, or at 2268 * least print a warning. 2269 */ 2270 bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr, 2271 new_slave->dev->addr_len); 2272 ss.ss_family = slave_dev->type; 2273 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); 2274 } 2275 2276 err_restore_mtu: 2277 dev_set_mtu(slave_dev, new_slave->original_mtu); 2278 2279 err_free: 2280 kobject_put(&new_slave->kobj); 2281 2282 err_undo_flags: 2283 /* Enslave of first slave has failed and we need to fix master's mac */ 2284 if (!bond_has_slaves(bond)) { 2285 if (ether_addr_equal_64bits(bond_dev->dev_addr, 2286 slave_dev->dev_addr)) 2287 eth_hw_addr_random(bond_dev); 2288 if (bond_dev->type != ARPHRD_ETHER) { 2289 dev_close(bond_dev); 2290 ether_setup(bond_dev); 2291 bond_dev->flags |= IFF_MASTER; 2292 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2293 } 2294 } 2295 2296 return res; 2297 } 2298 2299 /* Try to release the slave device <slave> from the bond device <master> 2300 * It is legal to access curr_active_slave without a lock because all the function 2301 * is RTNL-locked. If "all" is true it means that the function is being called 2302 * while destroying a bond interface and all slaves are being released. 2303 * 2304 * The rules for slave state should be: 2305 * for Active/Backup: 2306 * Active stays on all backups go down 2307 * for Bonded connections: 2308 * The first up interface should be left on and all others downed. 2309 */ 2310 static int __bond_release_one(struct net_device *bond_dev, 2311 struct net_device *slave_dev, 2312 bool all, bool unregister) 2313 { 2314 struct bonding *bond = netdev_priv(bond_dev); 2315 struct slave *slave, *oldcurrent; 2316 struct sockaddr_storage ss; 2317 int old_flags = bond_dev->flags; 2318 netdev_features_t old_features = bond_dev->features; 2319 2320 /* slave is not a slave or master is not master of this slave */ 2321 if (!(slave_dev->flags & IFF_SLAVE) || 2322 !netdev_has_upper_dev(slave_dev, bond_dev)) { 2323 slave_dbg(bond_dev, slave_dev, "cannot release slave\n"); 2324 return -EINVAL; 2325 } 2326 2327 block_netpoll_tx(); 2328 2329 slave = bond_get_slave_by_dev(bond, slave_dev); 2330 if (!slave) { 2331 /* not a slave of this bond */ 2332 slave_info(bond_dev, slave_dev, "interface not enslaved\n"); 2333 unblock_netpoll_tx(); 2334 return -EINVAL; 2335 } 2336 2337 bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); 2338 2339 bond_sysfs_slave_del(slave); 2340 2341 /* recompute stats just before removing the slave */ 2342 bond_get_stats(bond->dev, &bond->bond_stats); 2343 2344 if (bond->xdp_prog) { 2345 struct netdev_bpf xdp = { 2346 .command = XDP_SETUP_PROG, 2347 .flags = 0, 2348 .prog = NULL, 2349 .extack = NULL, 2350 }; 2351 if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp)) 2352 slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n"); 2353 } 2354 2355 /* unregister rx_handler early so bond_handle_frame wouldn't be called 2356 * for this slave anymore. 2357 */ 2358 netdev_rx_handler_unregister(slave_dev); 2359 2360 if (BOND_MODE(bond) == BOND_MODE_8023AD) 2361 bond_3ad_unbind_slave(slave); 2362 2363 bond_upper_dev_unlink(bond, slave); 2364 2365 if (bond_mode_can_use_xmit_hash(bond)) 2366 bond_update_slave_arr(bond, slave); 2367 2368 slave_info(bond_dev, slave_dev, "Releasing %s interface\n", 2369 bond_is_active_slave(slave) ? "active" : "backup"); 2370 2371 oldcurrent = rcu_access_pointer(bond->curr_active_slave); 2372 2373 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 2374 2375 if (!all && (!bond->params.fail_over_mac || 2376 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { 2377 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 2378 bond_has_slaves(bond)) 2379 slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n", 2380 slave->perm_hwaddr); 2381 } 2382 2383 if (rtnl_dereference(bond->primary_slave) == slave) 2384 RCU_INIT_POINTER(bond->primary_slave, NULL); 2385 2386 if (oldcurrent == slave) 2387 bond_change_active_slave(bond, NULL); 2388 2389 if (bond_is_lb(bond)) { 2390 /* Must be called only after the slave has been 2391 * detached from the list and the curr_active_slave 2392 * has been cleared (if our_slave == old_current), 2393 * but before a new active slave is selected. 2394 */ 2395 bond_alb_deinit_slave(bond, slave); 2396 } 2397 2398 if (all) { 2399 RCU_INIT_POINTER(bond->curr_active_slave, NULL); 2400 } else if (oldcurrent == slave) { 2401 /* Note that we hold RTNL over this sequence, so there 2402 * is no concern that another slave add/remove event 2403 * will interfere. 2404 */ 2405 bond_select_active_slave(bond); 2406 } 2407 2408 bond_set_carrier(bond); 2409 if (!bond_has_slaves(bond)) 2410 eth_hw_addr_random(bond_dev); 2411 2412 unblock_netpoll_tx(); 2413 synchronize_rcu(); 2414 bond->slave_cnt--; 2415 2416 if (!bond_has_slaves(bond)) { 2417 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 2418 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); 2419 } 2420 2421 bond_compute_features(bond); 2422 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2423 (old_features & NETIF_F_VLAN_CHALLENGED)) 2424 slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n"); 2425 2426 vlan_vids_del_by_dev(slave_dev, bond_dev); 2427 2428 /* If the mode uses primary, then this case was handled above by 2429 * bond_change_active_slave(..., NULL) 2430 */ 2431 if (!bond_uses_primary(bond)) { 2432 /* unset promiscuity level from slave 2433 * NOTE: The NETDEV_CHANGEADDR call above may change the value 2434 * of the IFF_PROMISC flag in the bond_dev, but we need the 2435 * value of that flag before that change, as that was the value 2436 * when this slave was attached, so we cache at the start of the 2437 * function and use it here. Same goes for ALLMULTI below 2438 */ 2439 if (old_flags & IFF_PROMISC) 2440 dev_set_promiscuity(slave_dev, -1); 2441 2442 /* unset allmulti level from slave */ 2443 if (old_flags & IFF_ALLMULTI) 2444 dev_set_allmulti(slave_dev, -1); 2445 2446 if (old_flags & IFF_UP) 2447 bond_hw_addr_flush(bond_dev, slave_dev); 2448 } 2449 2450 slave_disable_netpoll(slave); 2451 2452 /* close slave before restoring its mac address */ 2453 dev_close(slave_dev); 2454 2455 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF; 2456 2457 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || 2458 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2459 /* restore original ("permanent") mac address */ 2460 bond_hw_addr_copy(ss.__data, slave->perm_hwaddr, 2461 slave->dev->addr_len); 2462 ss.ss_family = slave_dev->type; 2463 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); 2464 } 2465 2466 if (unregister) 2467 __dev_set_mtu(slave_dev, slave->original_mtu); 2468 else 2469 dev_set_mtu(slave_dev, slave->original_mtu); 2470 2471 if (!netif_is_bond_master(slave_dev)) 2472 slave_dev->priv_flags &= ~IFF_BONDING; 2473 2474 kobject_put(&slave->kobj); 2475 2476 return 0; 2477 } 2478 2479 /* A wrapper used because of ndo_del_link */ 2480 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) 2481 { 2482 return __bond_release_one(bond_dev, slave_dev, false, false); 2483 } 2484 2485 /* First release a slave and then destroy the bond if no more slaves are left. 2486 * Must be under rtnl_lock when this function is called. 2487 */ 2488 static int bond_release_and_destroy(struct net_device *bond_dev, 2489 struct net_device *slave_dev) 2490 { 2491 struct bonding *bond = netdev_priv(bond_dev); 2492 int ret; 2493 2494 ret = __bond_release_one(bond_dev, slave_dev, false, true); 2495 if (ret == 0 && !bond_has_slaves(bond) && 2496 bond_dev->reg_state != NETREG_UNREGISTERING) { 2497 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 2498 netdev_info(bond_dev, "Destroying bond\n"); 2499 bond_remove_proc_entry(bond); 2500 unregister_netdevice(bond_dev); 2501 } 2502 return ret; 2503 } 2504 2505 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info) 2506 { 2507 struct bonding *bond = netdev_priv(bond_dev); 2508 2509 bond_fill_ifbond(bond, info); 2510 } 2511 2512 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) 2513 { 2514 struct bonding *bond = netdev_priv(bond_dev); 2515 struct list_head *iter; 2516 int i = 0, res = -ENODEV; 2517 struct slave *slave; 2518 2519 bond_for_each_slave(bond, slave, iter) { 2520 if (i++ == (int)info->slave_id) { 2521 res = 0; 2522 bond_fill_ifslave(slave, info); 2523 break; 2524 } 2525 } 2526 2527 return res; 2528 } 2529 2530 /*-------------------------------- Monitoring -------------------------------*/ 2531 2532 /* called with rcu_read_lock() */ 2533 static int bond_miimon_inspect(struct bonding *bond) 2534 { 2535 bool ignore_updelay = false; 2536 int link_state, commit = 0; 2537 struct list_head *iter; 2538 struct slave *slave; 2539 2540 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { 2541 ignore_updelay = !rcu_dereference(bond->curr_active_slave); 2542 } else { 2543 struct bond_up_slave *usable_slaves; 2544 2545 usable_slaves = rcu_dereference(bond->usable_slaves); 2546 2547 if (usable_slaves && usable_slaves->count == 0) 2548 ignore_updelay = true; 2549 } 2550 2551 bond_for_each_slave_rcu(bond, slave, iter) { 2552 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2553 2554 link_state = bond_check_dev_link(bond, slave->dev, 0); 2555 2556 switch (slave->link) { 2557 case BOND_LINK_UP: 2558 if (link_state) 2559 continue; 2560 2561 bond_propose_link_state(slave, BOND_LINK_FAIL); 2562 commit++; 2563 slave->delay = bond->params.downdelay; 2564 if (slave->delay) { 2565 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n", 2566 (BOND_MODE(bond) == 2567 BOND_MODE_ACTIVEBACKUP) ? 2568 (bond_is_active_slave(slave) ? 2569 "active " : "backup ") : "", 2570 bond->params.downdelay * bond->params.miimon); 2571 } 2572 fallthrough; 2573 case BOND_LINK_FAIL: 2574 if (link_state) { 2575 /* recovered before downdelay expired */ 2576 bond_propose_link_state(slave, BOND_LINK_UP); 2577 slave->last_link_up = jiffies; 2578 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n", 2579 (bond->params.downdelay - slave->delay) * 2580 bond->params.miimon); 2581 commit++; 2582 continue; 2583 } 2584 2585 if (slave->delay <= 0) { 2586 bond_propose_link_state(slave, BOND_LINK_DOWN); 2587 commit++; 2588 continue; 2589 } 2590 2591 slave->delay--; 2592 break; 2593 2594 case BOND_LINK_DOWN: 2595 if (!link_state) 2596 continue; 2597 2598 bond_propose_link_state(slave, BOND_LINK_BACK); 2599 commit++; 2600 slave->delay = bond->params.updelay; 2601 2602 if (slave->delay) { 2603 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n", 2604 ignore_updelay ? 0 : 2605 bond->params.updelay * 2606 bond->params.miimon); 2607 } 2608 fallthrough; 2609 case BOND_LINK_BACK: 2610 if (!link_state) { 2611 bond_propose_link_state(slave, BOND_LINK_DOWN); 2612 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n", 2613 (bond->params.updelay - slave->delay) * 2614 bond->params.miimon); 2615 commit++; 2616 continue; 2617 } 2618 2619 if (ignore_updelay) 2620 slave->delay = 0; 2621 2622 if (slave->delay <= 0) { 2623 bond_propose_link_state(slave, BOND_LINK_UP); 2624 commit++; 2625 ignore_updelay = false; 2626 continue; 2627 } 2628 2629 slave->delay--; 2630 break; 2631 } 2632 } 2633 2634 return commit; 2635 } 2636 2637 static void bond_miimon_link_change(struct bonding *bond, 2638 struct slave *slave, 2639 char link) 2640 { 2641 switch (BOND_MODE(bond)) { 2642 case BOND_MODE_8023AD: 2643 bond_3ad_handle_link_change(slave, link); 2644 break; 2645 case BOND_MODE_TLB: 2646 case BOND_MODE_ALB: 2647 bond_alb_handle_link_change(bond, slave, link); 2648 break; 2649 case BOND_MODE_XOR: 2650 bond_update_slave_arr(bond, NULL); 2651 break; 2652 } 2653 } 2654 2655 static void bond_miimon_commit(struct bonding *bond) 2656 { 2657 struct slave *slave, *primary; 2658 bool do_failover = false; 2659 struct list_head *iter; 2660 2661 bond_for_each_slave(bond, slave, iter) { 2662 switch (slave->link_new_state) { 2663 case BOND_LINK_NOCHANGE: 2664 /* For 802.3ad mode, check current slave speed and 2665 * duplex again in case its port was disabled after 2666 * invalid speed/duplex reporting but recovered before 2667 * link monitoring could make a decision on the actual 2668 * link status 2669 */ 2670 if (BOND_MODE(bond) == BOND_MODE_8023AD && 2671 slave->link == BOND_LINK_UP) 2672 bond_3ad_adapter_speed_duplex_changed(slave); 2673 continue; 2674 2675 case BOND_LINK_UP: 2676 if (bond_update_speed_duplex(slave) && 2677 bond_needs_speed_duplex(bond)) { 2678 slave->link = BOND_LINK_DOWN; 2679 if (net_ratelimit()) 2680 slave_warn(bond->dev, slave->dev, 2681 "failed to get link speed/duplex\n"); 2682 continue; 2683 } 2684 bond_set_slave_link_state(slave, BOND_LINK_UP, 2685 BOND_SLAVE_NOTIFY_NOW); 2686 slave->last_link_up = jiffies; 2687 2688 primary = rtnl_dereference(bond->primary_slave); 2689 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 2690 /* prevent it from being the active one */ 2691 bond_set_backup_slave(slave); 2692 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { 2693 /* make it immediately active */ 2694 bond_set_active_slave(slave); 2695 } 2696 2697 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", 2698 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, 2699 slave->duplex ? "full" : "half"); 2700 2701 bond_miimon_link_change(bond, slave, BOND_LINK_UP); 2702 2703 if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary || 2704 slave->prio > rcu_dereference(bond->curr_active_slave)->prio) 2705 do_failover = true; 2706 2707 continue; 2708 2709 case BOND_LINK_DOWN: 2710 if (slave->link_failure_count < UINT_MAX) 2711 slave->link_failure_count++; 2712 2713 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 2714 BOND_SLAVE_NOTIFY_NOW); 2715 2716 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || 2717 BOND_MODE(bond) == BOND_MODE_8023AD) 2718 bond_set_slave_inactive_flags(slave, 2719 BOND_SLAVE_NOTIFY_NOW); 2720 2721 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); 2722 2723 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); 2724 2725 if (slave == rcu_access_pointer(bond->curr_active_slave)) 2726 do_failover = true; 2727 2728 continue; 2729 2730 default: 2731 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", 2732 slave->link_new_state); 2733 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2734 2735 continue; 2736 } 2737 } 2738 2739 if (do_failover) { 2740 block_netpoll_tx(); 2741 bond_select_active_slave(bond); 2742 unblock_netpoll_tx(); 2743 } 2744 2745 bond_set_carrier(bond); 2746 } 2747 2748 /* bond_mii_monitor 2749 * 2750 * Really a wrapper that splits the mii monitor into two phases: an 2751 * inspection, then (if inspection indicates something needs to be done) 2752 * an acquisition of appropriate locks followed by a commit phase to 2753 * implement whatever link state changes are indicated. 2754 */ 2755 static void bond_mii_monitor(struct work_struct *work) 2756 { 2757 struct bonding *bond = container_of(work, struct bonding, 2758 mii_work.work); 2759 bool should_notify_peers = false; 2760 bool commit; 2761 unsigned long delay; 2762 struct slave *slave; 2763 struct list_head *iter; 2764 2765 delay = msecs_to_jiffies(bond->params.miimon); 2766 2767 if (!bond_has_slaves(bond)) 2768 goto re_arm; 2769 2770 rcu_read_lock(); 2771 should_notify_peers = bond_should_notify_peers(bond); 2772 commit = !!bond_miimon_inspect(bond); 2773 if (bond->send_peer_notif) { 2774 rcu_read_unlock(); 2775 if (rtnl_trylock()) { 2776 bond->send_peer_notif--; 2777 rtnl_unlock(); 2778 } 2779 } else { 2780 rcu_read_unlock(); 2781 } 2782 2783 if (commit) { 2784 /* Race avoidance with bond_close cancel of workqueue */ 2785 if (!rtnl_trylock()) { 2786 delay = 1; 2787 should_notify_peers = false; 2788 goto re_arm; 2789 } 2790 2791 bond_for_each_slave(bond, slave, iter) { 2792 bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); 2793 } 2794 bond_miimon_commit(bond); 2795 2796 rtnl_unlock(); /* might sleep, hold no other locks */ 2797 } 2798 2799 re_arm: 2800 if (bond->params.miimon) 2801 queue_delayed_work(bond->wq, &bond->mii_work, delay); 2802 2803 if (should_notify_peers) { 2804 if (!rtnl_trylock()) 2805 return; 2806 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); 2807 rtnl_unlock(); 2808 } 2809 } 2810 2811 static int bond_upper_dev_walk(struct net_device *upper, 2812 struct netdev_nested_priv *priv) 2813 { 2814 __be32 ip = *(__be32 *)priv->data; 2815 2816 return ip == bond_confirm_addr(upper, 0, ip); 2817 } 2818 2819 static bool bond_has_this_ip(struct bonding *bond, __be32 ip) 2820 { 2821 struct netdev_nested_priv priv = { 2822 .data = (void *)&ip, 2823 }; 2824 bool ret = false; 2825 2826 if (ip == bond_confirm_addr(bond->dev, 0, ip)) 2827 return true; 2828 2829 rcu_read_lock(); 2830 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv)) 2831 ret = true; 2832 rcu_read_unlock(); 2833 2834 return ret; 2835 } 2836 2837 static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags, 2838 struct sk_buff *skb) 2839 { 2840 struct net_device *bond_dev = slave->bond->dev; 2841 struct net_device *slave_dev = slave->dev; 2842 struct bond_vlan_tag *outer_tag = tags; 2843 2844 if (!tags || tags->vlan_proto == VLAN_N_VID) 2845 return true; 2846 2847 tags++; 2848 2849 /* Go through all the tags backwards and add them to the packet */ 2850 while (tags->vlan_proto != VLAN_N_VID) { 2851 if (!tags->vlan_id) { 2852 tags++; 2853 continue; 2854 } 2855 2856 slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n", 2857 ntohs(outer_tag->vlan_proto), tags->vlan_id); 2858 skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto, 2859 tags->vlan_id); 2860 if (!skb) { 2861 net_err_ratelimited("failed to insert inner VLAN tag\n"); 2862 return false; 2863 } 2864 2865 tags++; 2866 } 2867 /* Set the outer tag */ 2868 if (outer_tag->vlan_id) { 2869 slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n", 2870 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id); 2871 __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto, 2872 outer_tag->vlan_id); 2873 } 2874 2875 return true; 2876 } 2877 2878 /* We go to the (large) trouble of VLAN tagging ARP frames because 2879 * switches in VLAN mode (especially if ports are configured as 2880 * "native" to a VLAN) might not pass non-tagged frames. 2881 */ 2882 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, 2883 __be32 src_ip, struct bond_vlan_tag *tags) 2884 { 2885 struct net_device *bond_dev = slave->bond->dev; 2886 struct net_device *slave_dev = slave->dev; 2887 struct sk_buff *skb; 2888 2889 slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n", 2890 arp_op, &dest_ip, &src_ip); 2891 2892 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, 2893 NULL, slave_dev->dev_addr, NULL); 2894 2895 if (!skb) { 2896 net_err_ratelimited("ARP packet allocation failed\n"); 2897 return; 2898 } 2899 2900 if (bond_handle_vlan(slave, tags, skb)) { 2901 slave_update_last_tx(slave); 2902 arp_xmit(skb); 2903 } 2904 2905 return; 2906 } 2907 2908 /* Validate the device path between the @start_dev and the @end_dev. 2909 * The path is valid if the @end_dev is reachable through device 2910 * stacking. 2911 * When the path is validated, collect any vlan information in the 2912 * path. 2913 */ 2914 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, 2915 struct net_device *end_dev, 2916 int level) 2917 { 2918 struct bond_vlan_tag *tags; 2919 struct net_device *upper; 2920 struct list_head *iter; 2921 2922 if (start_dev == end_dev) { 2923 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC); 2924 if (!tags) 2925 return ERR_PTR(-ENOMEM); 2926 tags[level].vlan_proto = VLAN_N_VID; 2927 return tags; 2928 } 2929 2930 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { 2931 tags = bond_verify_device_path(upper, end_dev, level + 1); 2932 if (IS_ERR_OR_NULL(tags)) { 2933 if (IS_ERR(tags)) 2934 return tags; 2935 continue; 2936 } 2937 if (is_vlan_dev(upper)) { 2938 tags[level].vlan_proto = vlan_dev_vlan_proto(upper); 2939 tags[level].vlan_id = vlan_dev_vlan_id(upper); 2940 } 2941 2942 return tags; 2943 } 2944 2945 return NULL; 2946 } 2947 2948 static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2949 { 2950 struct rtable *rt; 2951 struct bond_vlan_tag *tags; 2952 __be32 *targets = bond->params.arp_targets, addr; 2953 int i; 2954 2955 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2956 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n", 2957 __func__, &targets[i]); 2958 tags = NULL; 2959 2960 /* Find out through which dev should the packet go */ 2961 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2962 RTO_ONLINK, 0); 2963 if (IS_ERR(rt)) { 2964 /* there's no route to target - try to send arp 2965 * probe to generate any traffic (arp_validate=0) 2966 */ 2967 if (bond->params.arp_validate) 2968 pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2969 bond->dev->name, 2970 &targets[i]); 2971 bond_arp_send(slave, ARPOP_REQUEST, targets[i], 2972 0, tags); 2973 continue; 2974 } 2975 2976 /* bond device itself */ 2977 if (rt->dst.dev == bond->dev) 2978 goto found; 2979 2980 rcu_read_lock(); 2981 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0); 2982 rcu_read_unlock(); 2983 2984 if (!IS_ERR_OR_NULL(tags)) 2985 goto found; 2986 2987 /* Not our device - skip */ 2988 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n", 2989 &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL"); 2990 2991 ip_rt_put(rt); 2992 continue; 2993 2994 found: 2995 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2996 ip_rt_put(rt); 2997 bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags); 2998 kfree(tags); 2999 } 3000 } 3001 3002 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) 3003 { 3004 int i; 3005 3006 if (!sip || !bond_has_this_ip(bond, tip)) { 3007 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n", 3008 __func__, &sip, &tip); 3009 return; 3010 } 3011 3012 i = bond_get_targets_ip(bond->params.arp_targets, sip); 3013 if (i == -1) { 3014 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n", 3015 __func__, &sip); 3016 return; 3017 } 3018 slave->last_rx = jiffies; 3019 slave->target_last_arp_rx[i] = jiffies; 3020 } 3021 3022 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, 3023 struct slave *slave) 3024 { 3025 struct arphdr *arp = (struct arphdr *)skb->data; 3026 struct slave *curr_active_slave, *curr_arp_slave; 3027 unsigned char *arp_ptr; 3028 __be32 sip, tip; 3029 unsigned int alen; 3030 3031 alen = arp_hdr_len(bond->dev); 3032 3033 if (alen > skb_headlen(skb)) { 3034 arp = kmalloc(alen, GFP_ATOMIC); 3035 if (!arp) 3036 goto out_unlock; 3037 if (skb_copy_bits(skb, 0, arp, alen) < 0) 3038 goto out_unlock; 3039 } 3040 3041 if (arp->ar_hln != bond->dev->addr_len || 3042 skb->pkt_type == PACKET_OTHERHOST || 3043 skb->pkt_type == PACKET_LOOPBACK || 3044 arp->ar_hrd != htons(ARPHRD_ETHER) || 3045 arp->ar_pro != htons(ETH_P_IP) || 3046 arp->ar_pln != 4) 3047 goto out_unlock; 3048 3049 arp_ptr = (unsigned char *)(arp + 1); 3050 arp_ptr += bond->dev->addr_len; 3051 memcpy(&sip, arp_ptr, 4); 3052 arp_ptr += 4 + bond->dev->addr_len; 3053 memcpy(&tip, arp_ptr, 4); 3054 3055 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n", 3056 __func__, slave->dev->name, bond_slave_state(slave), 3057 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 3058 &sip, &tip); 3059 3060 curr_active_slave = rcu_dereference(bond->curr_active_slave); 3061 curr_arp_slave = rcu_dereference(bond->current_arp_slave); 3062 3063 /* We 'trust' the received ARP enough to validate it if: 3064 * 3065 * (a) the slave receiving the ARP is active (which includes the 3066 * current ARP slave, if any), or 3067 * 3068 * (b) the receiving slave isn't active, but there is a currently 3069 * active slave and it received valid arp reply(s) after it became 3070 * the currently active slave, or 3071 * 3072 * (c) there is an ARP slave that sent an ARP during the prior ARP 3073 * interval, and we receive an ARP reply on any slave. We accept 3074 * these because switch FDB update delays may deliver the ARP 3075 * reply to a slave other than the sender of the ARP request. 3076 * 3077 * Note: for (b), backup slaves are receiving the broadcast ARP 3078 * request, not a reply. This request passes from the sending 3079 * slave through the L2 switch(es) to the receiving slave. Since 3080 * this is checking the request, sip/tip are swapped for 3081 * validation. 3082 * 3083 * This is done to avoid endless looping when we can't reach the 3084 * arp_ip_target and fool ourselves with our own arp requests. 3085 */ 3086 if (bond_is_active_slave(slave)) 3087 bond_validate_arp(bond, slave, sip, tip); 3088 else if (curr_active_slave && 3089 time_after(slave_last_rx(bond, curr_active_slave), 3090 curr_active_slave->last_link_up)) 3091 bond_validate_arp(bond, slave, tip, sip); 3092 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) && 3093 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) 3094 bond_validate_arp(bond, slave, sip, tip); 3095 3096 out_unlock: 3097 if (arp != (struct arphdr *)skb->data) 3098 kfree(arp); 3099 return RX_HANDLER_ANOTHER; 3100 } 3101 3102 #if IS_ENABLED(CONFIG_IPV6) 3103 static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr, 3104 const struct in6_addr *saddr, struct bond_vlan_tag *tags) 3105 { 3106 struct net_device *bond_dev = slave->bond->dev; 3107 struct net_device *slave_dev = slave->dev; 3108 struct in6_addr mcaddr; 3109 struct sk_buff *skb; 3110 3111 slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n", 3112 daddr, saddr); 3113 3114 skb = ndisc_ns_create(slave_dev, daddr, saddr, 0); 3115 if (!skb) { 3116 net_err_ratelimited("NS packet allocation failed\n"); 3117 return; 3118 } 3119 3120 addrconf_addr_solict_mult(daddr, &mcaddr); 3121 if (bond_handle_vlan(slave, tags, skb)) { 3122 slave_update_last_tx(slave); 3123 ndisc_send_skb(skb, &mcaddr, saddr); 3124 } 3125 } 3126 3127 static void bond_ns_send_all(struct bonding *bond, struct slave *slave) 3128 { 3129 struct in6_addr *targets = bond->params.ns_targets; 3130 struct bond_vlan_tag *tags; 3131 struct dst_entry *dst; 3132 struct in6_addr saddr; 3133 struct flowi6 fl6; 3134 int i; 3135 3136 for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) { 3137 slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n", 3138 __func__, &targets[i]); 3139 tags = NULL; 3140 3141 /* Find out through which dev should the packet go */ 3142 memset(&fl6, 0, sizeof(struct flowi6)); 3143 fl6.daddr = targets[i]; 3144 fl6.flowi6_oif = bond->dev->ifindex; 3145 3146 dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); 3147 if (dst->error) { 3148 dst_release(dst); 3149 /* there's no route to target - try to send arp 3150 * probe to generate any traffic (arp_validate=0) 3151 */ 3152 if (bond->params.arp_validate) 3153 pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n", 3154 bond->dev->name, 3155 &targets[i]); 3156 bond_ns_send(slave, &targets[i], &in6addr_any, tags); 3157 continue; 3158 } 3159 3160 /* bond device itself */ 3161 if (dst->dev == bond->dev) 3162 goto found; 3163 3164 rcu_read_lock(); 3165 tags = bond_verify_device_path(bond->dev, dst->dev, 0); 3166 rcu_read_unlock(); 3167 3168 if (!IS_ERR_OR_NULL(tags)) 3169 goto found; 3170 3171 /* Not our device - skip */ 3172 slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n", 3173 &targets[i], dst->dev ? dst->dev->name : "NULL"); 3174 3175 dst_release(dst); 3176 continue; 3177 3178 found: 3179 if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr)) 3180 bond_ns_send(slave, &targets[i], &saddr, tags); 3181 else 3182 bond_ns_send(slave, &targets[i], &in6addr_any, tags); 3183 3184 dst_release(dst); 3185 kfree(tags); 3186 } 3187 } 3188 3189 static int bond_confirm_addr6(struct net_device *dev, 3190 struct netdev_nested_priv *priv) 3191 { 3192 struct in6_addr *addr = (struct in6_addr *)priv->data; 3193 3194 return ipv6_chk_addr(dev_net(dev), addr, dev, 0); 3195 } 3196 3197 static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) 3198 { 3199 struct netdev_nested_priv priv = { 3200 .data = addr, 3201 }; 3202 int ret = false; 3203 3204 if (bond_confirm_addr6(bond->dev, &priv)) 3205 return true; 3206 3207 rcu_read_lock(); 3208 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv)) 3209 ret = true; 3210 rcu_read_unlock(); 3211 3212 return ret; 3213 } 3214 3215 static void bond_validate_na(struct bonding *bond, struct slave *slave, 3216 struct in6_addr *saddr, struct in6_addr *daddr) 3217 { 3218 int i; 3219 3220 /* Ignore NAs that: 3221 * 1. Source address is unspecified address. 3222 * 2. Dest address is neither all-nodes multicast address nor 3223 * exist on bond interface. 3224 */ 3225 if (ipv6_addr_any(saddr) || 3226 (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) && 3227 !bond_has_this_ip6(bond, daddr))) { 3228 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", 3229 __func__, saddr, daddr); 3230 return; 3231 } 3232 3233 i = bond_get_targets_ip6(bond->params.ns_targets, saddr); 3234 if (i == -1) { 3235 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n", 3236 __func__, saddr); 3237 return; 3238 } 3239 slave->last_rx = jiffies; 3240 slave->target_last_arp_rx[i] = jiffies; 3241 } 3242 3243 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, 3244 struct slave *slave) 3245 { 3246 struct slave *curr_active_slave, *curr_arp_slave; 3247 struct in6_addr *saddr, *daddr; 3248 struct { 3249 struct ipv6hdr ip6; 3250 struct icmp6hdr icmp6; 3251 } *combined, _combined; 3252 3253 if (skb->pkt_type == PACKET_OTHERHOST || 3254 skb->pkt_type == PACKET_LOOPBACK) 3255 goto out; 3256 3257 combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined); 3258 if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP || 3259 combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT) 3260 goto out; 3261 3262 saddr = &combined->ip6.saddr; 3263 daddr = &combined->ip6.daddr; 3264 3265 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", 3266 __func__, slave->dev->name, bond_slave_state(slave), 3267 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 3268 saddr, daddr); 3269 3270 curr_active_slave = rcu_dereference(bond->curr_active_slave); 3271 curr_arp_slave = rcu_dereference(bond->current_arp_slave); 3272 3273 /* We 'trust' the received ARP enough to validate it if: 3274 * see bond_arp_rcv(). 3275 */ 3276 if (bond_is_active_slave(slave)) 3277 bond_validate_na(bond, slave, saddr, daddr); 3278 else if (curr_active_slave && 3279 time_after(slave_last_rx(bond, curr_active_slave), 3280 curr_active_slave->last_link_up)) 3281 bond_validate_na(bond, slave, saddr, daddr); 3282 else if (curr_arp_slave && 3283 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) 3284 bond_validate_na(bond, slave, saddr, daddr); 3285 3286 out: 3287 return RX_HANDLER_ANOTHER; 3288 } 3289 #endif 3290 3291 int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, 3292 struct slave *slave) 3293 { 3294 #if IS_ENABLED(CONFIG_IPV6) 3295 bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6); 3296 #endif 3297 bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 3298 3299 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", 3300 __func__, skb->dev->name); 3301 3302 /* Use arp validate logic for both ARP and NS */ 3303 if (!slave_do_arp_validate(bond, slave)) { 3304 if ((slave_do_arp_validate_only(bond) && is_arp) || 3305 #if IS_ENABLED(CONFIG_IPV6) 3306 (slave_do_arp_validate_only(bond) && is_ipv6) || 3307 #endif 3308 !slave_do_arp_validate_only(bond)) 3309 slave->last_rx = jiffies; 3310 return RX_HANDLER_ANOTHER; 3311 } else if (is_arp) { 3312 return bond_arp_rcv(skb, bond, slave); 3313 #if IS_ENABLED(CONFIG_IPV6) 3314 } else if (is_ipv6) { 3315 return bond_na_rcv(skb, bond, slave); 3316 #endif 3317 } else { 3318 return RX_HANDLER_ANOTHER; 3319 } 3320 } 3321 3322 static void bond_send_validate(struct bonding *bond, struct slave *slave) 3323 { 3324 bond_arp_send_all(bond, slave); 3325 #if IS_ENABLED(CONFIG_IPV6) 3326 bond_ns_send_all(bond, slave); 3327 #endif 3328 } 3329 3330 /* function to verify if we're in the arp_interval timeslice, returns true if 3331 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval + 3332 * arp_interval/2) . the arp_interval/2 is needed for really fast networks. 3333 */ 3334 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 3335 int mod) 3336 { 3337 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 3338 3339 return time_in_range(jiffies, 3340 last_act - delta_in_ticks, 3341 last_act + mod * delta_in_ticks + delta_in_ticks/2); 3342 } 3343 3344 /* This function is called regularly to monitor each slave's link 3345 * ensuring that traffic is being sent and received when arp monitoring 3346 * is used in load-balancing mode. if the adapter has been dormant, then an 3347 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 3348 * arp monitoring in active backup mode. 3349 */ 3350 static void bond_loadbalance_arp_mon(struct bonding *bond) 3351 { 3352 struct slave *slave, *oldcurrent; 3353 struct list_head *iter; 3354 int do_failover = 0, slave_state_changed = 0; 3355 3356 if (!bond_has_slaves(bond)) 3357 goto re_arm; 3358 3359 rcu_read_lock(); 3360 3361 oldcurrent = rcu_dereference(bond->curr_active_slave); 3362 /* see if any of the previous devices are up now (i.e. they have 3363 * xmt and rcv traffic). the curr_active_slave does not come into 3364 * the picture unless it is null. also, slave->last_link_up is not 3365 * needed here because we send an arp on each slave and give a slave 3366 * as long as it needs to get the tx/rx within the delta. 3367 * TODO: what about up/down delay in arp mode? it wasn't here before 3368 * so it can wait 3369 */ 3370 bond_for_each_slave_rcu(bond, slave, iter) { 3371 unsigned long last_tx = slave_last_tx(slave); 3372 3373 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 3374 3375 if (slave->link != BOND_LINK_UP) { 3376 if (bond_time_in_interval(bond, last_tx, 1) && 3377 bond_time_in_interval(bond, slave->last_rx, 1)) { 3378 3379 bond_propose_link_state(slave, BOND_LINK_UP); 3380 slave_state_changed = 1; 3381 3382 /* primary_slave has no meaning in round-robin 3383 * mode. the window of a slave being up and 3384 * curr_active_slave being null after enslaving 3385 * is closed. 3386 */ 3387 if (!oldcurrent) { 3388 slave_info(bond->dev, slave->dev, "link status definitely up\n"); 3389 do_failover = 1; 3390 } else { 3391 slave_info(bond->dev, slave->dev, "interface is now up\n"); 3392 } 3393 } 3394 } else { 3395 /* slave->link == BOND_LINK_UP */ 3396 3397 /* not all switches will respond to an arp request 3398 * when the source ip is 0, so don't take the link down 3399 * if we don't know our ip yet 3400 */ 3401 if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || 3402 !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { 3403 3404 bond_propose_link_state(slave, BOND_LINK_DOWN); 3405 slave_state_changed = 1; 3406 3407 if (slave->link_failure_count < UINT_MAX) 3408 slave->link_failure_count++; 3409 3410 slave_info(bond->dev, slave->dev, "interface is now down\n"); 3411 3412 if (slave == oldcurrent) 3413 do_failover = 1; 3414 } 3415 } 3416 3417 /* note: if switch is in round-robin mode, all links 3418 * must tx arp to ensure all links rx an arp - otherwise 3419 * links may oscillate or not come up at all; if switch is 3420 * in something like xor mode, there is nothing we can 3421 * do - all replies will be rx'ed on same link causing slaves 3422 * to be unstable during low/no traffic periods 3423 */ 3424 if (bond_slave_is_up(slave)) 3425 bond_send_validate(bond, slave); 3426 } 3427 3428 rcu_read_unlock(); 3429 3430 if (do_failover || slave_state_changed) { 3431 if (!rtnl_trylock()) 3432 goto re_arm; 3433 3434 bond_for_each_slave(bond, slave, iter) { 3435 if (slave->link_new_state != BOND_LINK_NOCHANGE) 3436 slave->link = slave->link_new_state; 3437 } 3438 3439 if (slave_state_changed) { 3440 bond_slave_state_change(bond); 3441 if (BOND_MODE(bond) == BOND_MODE_XOR) 3442 bond_update_slave_arr(bond, NULL); 3443 } 3444 if (do_failover) { 3445 block_netpoll_tx(); 3446 bond_select_active_slave(bond); 3447 unblock_netpoll_tx(); 3448 } 3449 rtnl_unlock(); 3450 } 3451 3452 re_arm: 3453 if (bond->params.arp_interval) 3454 queue_delayed_work(bond->wq, &bond->arp_work, 3455 msecs_to_jiffies(bond->params.arp_interval)); 3456 } 3457 3458 /* Called to inspect slaves for active-backup mode ARP monitor link state 3459 * changes. Sets proposed link state in slaves to specify what action 3460 * should take place for the slave. Returns 0 if no changes are found, >0 3461 * if changes to link states must be committed. 3462 * 3463 * Called with rcu_read_lock held. 3464 */ 3465 static int bond_ab_arp_inspect(struct bonding *bond) 3466 { 3467 unsigned long last_tx, last_rx; 3468 struct list_head *iter; 3469 struct slave *slave; 3470 int commit = 0; 3471 3472 bond_for_each_slave_rcu(bond, slave, iter) { 3473 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 3474 last_rx = slave_last_rx(bond, slave); 3475 3476 if (slave->link != BOND_LINK_UP) { 3477 if (bond_time_in_interval(bond, last_rx, 1)) { 3478 bond_propose_link_state(slave, BOND_LINK_UP); 3479 commit++; 3480 } else if (slave->link == BOND_LINK_BACK) { 3481 bond_propose_link_state(slave, BOND_LINK_FAIL); 3482 commit++; 3483 } 3484 continue; 3485 } 3486 3487 /* Give slaves 2*delta after being enslaved or made 3488 * active. This avoids bouncing, as the last receive 3489 * times need a full ARP monitor cycle to be updated. 3490 */ 3491 if (bond_time_in_interval(bond, slave->last_link_up, 2)) 3492 continue; 3493 3494 /* Backup slave is down if: 3495 * - No current_arp_slave AND 3496 * - more than (missed_max+1)*delta since last receive AND 3497 * - the bond has an IP address 3498 * 3499 * Note: a non-null current_arp_slave indicates 3500 * the curr_active_slave went down and we are 3501 * searching for a new one; under this condition 3502 * we only take the curr_active_slave down - this 3503 * gives each slave a chance to tx/rx traffic 3504 * before being taken out 3505 */ 3506 if (!bond_is_active_slave(slave) && 3507 !rcu_access_pointer(bond->current_arp_slave) && 3508 !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) { 3509 bond_propose_link_state(slave, BOND_LINK_DOWN); 3510 commit++; 3511 } 3512 3513 /* Active slave is down if: 3514 * - more than missed_max*delta since transmitting OR 3515 * - (more than missed_max*delta since receive AND 3516 * the bond has an IP address) 3517 */ 3518 last_tx = slave_last_tx(slave); 3519 if (bond_is_active_slave(slave) && 3520 (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || 3521 !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) { 3522 bond_propose_link_state(slave, BOND_LINK_DOWN); 3523 commit++; 3524 } 3525 } 3526 3527 return commit; 3528 } 3529 3530 /* Called to commit link state changes noted by inspection step of 3531 * active-backup mode ARP monitor. 3532 * 3533 * Called with RTNL hold. 3534 */ 3535 static void bond_ab_arp_commit(struct bonding *bond) 3536 { 3537 bool do_failover = false; 3538 struct list_head *iter; 3539 unsigned long last_tx; 3540 struct slave *slave; 3541 3542 bond_for_each_slave(bond, slave, iter) { 3543 switch (slave->link_new_state) { 3544 case BOND_LINK_NOCHANGE: 3545 continue; 3546 3547 case BOND_LINK_UP: 3548 last_tx = slave_last_tx(slave); 3549 if (rtnl_dereference(bond->curr_active_slave) != slave || 3550 (!rtnl_dereference(bond->curr_active_slave) && 3551 bond_time_in_interval(bond, last_tx, 1))) { 3552 struct slave *current_arp_slave; 3553 3554 current_arp_slave = rtnl_dereference(bond->current_arp_slave); 3555 bond_set_slave_link_state(slave, BOND_LINK_UP, 3556 BOND_SLAVE_NOTIFY_NOW); 3557 if (current_arp_slave) { 3558 bond_set_slave_inactive_flags( 3559 current_arp_slave, 3560 BOND_SLAVE_NOTIFY_NOW); 3561 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3562 } 3563 3564 slave_info(bond->dev, slave->dev, "link status definitely up\n"); 3565 3566 if (!rtnl_dereference(bond->curr_active_slave) || 3567 slave == rtnl_dereference(bond->primary_slave) || 3568 slave->prio > rtnl_dereference(bond->curr_active_slave)->prio) 3569 do_failover = true; 3570 3571 } 3572 3573 continue; 3574 3575 case BOND_LINK_DOWN: 3576 if (slave->link_failure_count < UINT_MAX) 3577 slave->link_failure_count++; 3578 3579 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 3580 BOND_SLAVE_NOTIFY_NOW); 3581 bond_set_slave_inactive_flags(slave, 3582 BOND_SLAVE_NOTIFY_NOW); 3583 3584 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); 3585 3586 if (slave == rtnl_dereference(bond->curr_active_slave)) { 3587 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3588 do_failover = true; 3589 } 3590 3591 continue; 3592 3593 case BOND_LINK_FAIL: 3594 bond_set_slave_link_state(slave, BOND_LINK_FAIL, 3595 BOND_SLAVE_NOTIFY_NOW); 3596 bond_set_slave_inactive_flags(slave, 3597 BOND_SLAVE_NOTIFY_NOW); 3598 3599 /* A slave has just been enslaved and has become 3600 * the current active slave. 3601 */ 3602 if (rtnl_dereference(bond->curr_active_slave)) 3603 RCU_INIT_POINTER(bond->current_arp_slave, NULL); 3604 continue; 3605 3606 default: 3607 slave_err(bond->dev, slave->dev, 3608 "impossible: link_new_state %d on slave\n", 3609 slave->link_new_state); 3610 continue; 3611 } 3612 } 3613 3614 if (do_failover) { 3615 block_netpoll_tx(); 3616 bond_select_active_slave(bond); 3617 unblock_netpoll_tx(); 3618 } 3619 3620 bond_set_carrier(bond); 3621 } 3622 3623 /* Send ARP probes for active-backup mode ARP monitor. 3624 * 3625 * Called with rcu_read_lock held. 3626 */ 3627 static bool bond_ab_arp_probe(struct bonding *bond) 3628 { 3629 struct slave *slave, *before = NULL, *new_slave = NULL, 3630 *curr_arp_slave = rcu_dereference(bond->current_arp_slave), 3631 *curr_active_slave = rcu_dereference(bond->curr_active_slave); 3632 struct list_head *iter; 3633 bool found = false; 3634 bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; 3635 3636 if (curr_arp_slave && curr_active_slave) 3637 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n", 3638 curr_arp_slave->dev->name, 3639 curr_active_slave->dev->name); 3640 3641 if (curr_active_slave) { 3642 bond_send_validate(bond, curr_active_slave); 3643 return should_notify_rtnl; 3644 } 3645 3646 /* if we don't have a curr_active_slave, search for the next available 3647 * backup slave from the current_arp_slave and make it the candidate 3648 * for becoming the curr_active_slave 3649 */ 3650 3651 if (!curr_arp_slave) { 3652 curr_arp_slave = bond_first_slave_rcu(bond); 3653 if (!curr_arp_slave) 3654 return should_notify_rtnl; 3655 } 3656 3657 bond_for_each_slave_rcu(bond, slave, iter) { 3658 if (!found && !before && bond_slave_is_up(slave)) 3659 before = slave; 3660 3661 if (found && !new_slave && bond_slave_is_up(slave)) 3662 new_slave = slave; 3663 /* if the link state is up at this point, we 3664 * mark it down - this can happen if we have 3665 * simultaneous link failures and 3666 * reselect_active_interface doesn't make this 3667 * one the current slave so it is still marked 3668 * up when it is actually down 3669 */ 3670 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { 3671 bond_set_slave_link_state(slave, BOND_LINK_DOWN, 3672 BOND_SLAVE_NOTIFY_LATER); 3673 if (slave->link_failure_count < UINT_MAX) 3674 slave->link_failure_count++; 3675 3676 bond_set_slave_inactive_flags(slave, 3677 BOND_SLAVE_NOTIFY_LATER); 3678 3679 slave_info(bond->dev, slave->dev, "backup interface is now down\n"); 3680 } 3681 if (slave == curr_arp_slave) 3682 found = true; 3683 } 3684 3685 if (!new_slave && before) 3686 new_slave = before; 3687 3688 if (!new_slave) 3689 goto check_state; 3690 3691 bond_set_slave_link_state(new_slave, BOND_LINK_BACK, 3692 BOND_SLAVE_NOTIFY_LATER); 3693 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); 3694 bond_send_validate(bond, new_slave); 3695 new_slave->last_link_up = jiffies; 3696 rcu_assign_pointer(bond->current_arp_slave, new_slave); 3697 3698 check_state: 3699 bond_for_each_slave_rcu(bond, slave, iter) { 3700 if (slave->should_notify || slave->should_notify_link) { 3701 should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; 3702 break; 3703 } 3704 } 3705 return should_notify_rtnl; 3706 } 3707 3708 static void bond_activebackup_arp_mon(struct bonding *bond) 3709 { 3710 bool should_notify_peers = false; 3711 bool should_notify_rtnl = false; 3712 int delta_in_ticks; 3713 3714 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 3715 3716 if (!bond_has_slaves(bond)) 3717 goto re_arm; 3718 3719 rcu_read_lock(); 3720 3721 should_notify_peers = bond_should_notify_peers(bond); 3722 3723 if (bond_ab_arp_inspect(bond)) { 3724 rcu_read_unlock(); 3725 3726 /* Race avoidance with bond_close flush of workqueue */ 3727 if (!rtnl_trylock()) { 3728 delta_in_ticks = 1; 3729 should_notify_peers = false; 3730 goto re_arm; 3731 } 3732 3733 bond_ab_arp_commit(bond); 3734 3735 rtnl_unlock(); 3736 rcu_read_lock(); 3737 } 3738 3739 should_notify_rtnl = bond_ab_arp_probe(bond); 3740 rcu_read_unlock(); 3741 3742 re_arm: 3743 if (bond->params.arp_interval) 3744 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3745 3746 if (should_notify_peers || should_notify_rtnl) { 3747 if (!rtnl_trylock()) 3748 return; 3749 3750 if (should_notify_peers) { 3751 bond->send_peer_notif--; 3752 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 3753 bond->dev); 3754 } 3755 if (should_notify_rtnl) { 3756 bond_slave_state_notify(bond); 3757 bond_slave_link_notify(bond); 3758 } 3759 3760 rtnl_unlock(); 3761 } 3762 } 3763 3764 static void bond_arp_monitor(struct work_struct *work) 3765 { 3766 struct bonding *bond = container_of(work, struct bonding, 3767 arp_work.work); 3768 3769 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 3770 bond_activebackup_arp_mon(bond); 3771 else 3772 bond_loadbalance_arp_mon(bond); 3773 } 3774 3775 /*-------------------------- netdev event handling --------------------------*/ 3776 3777 /* Change device name */ 3778 static int bond_event_changename(struct bonding *bond) 3779 { 3780 bond_remove_proc_entry(bond); 3781 bond_create_proc_entry(bond); 3782 3783 bond_debug_reregister(bond); 3784 3785 return NOTIFY_DONE; 3786 } 3787 3788 static int bond_master_netdev_event(unsigned long event, 3789 struct net_device *bond_dev) 3790 { 3791 struct bonding *event_bond = netdev_priv(bond_dev); 3792 3793 netdev_dbg(bond_dev, "%s called\n", __func__); 3794 3795 switch (event) { 3796 case NETDEV_CHANGENAME: 3797 return bond_event_changename(event_bond); 3798 case NETDEV_UNREGISTER: 3799 bond_remove_proc_entry(event_bond); 3800 #ifdef CONFIG_XFRM_OFFLOAD 3801 xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); 3802 #endif /* CONFIG_XFRM_OFFLOAD */ 3803 break; 3804 case NETDEV_REGISTER: 3805 bond_create_proc_entry(event_bond); 3806 break; 3807 default: 3808 break; 3809 } 3810 3811 return NOTIFY_DONE; 3812 } 3813 3814 static int bond_slave_netdev_event(unsigned long event, 3815 struct net_device *slave_dev) 3816 { 3817 struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary; 3818 struct bonding *bond; 3819 struct net_device *bond_dev; 3820 3821 /* A netdev event can be generated while enslaving a device 3822 * before netdev_rx_handler_register is called in which case 3823 * slave will be NULL 3824 */ 3825 if (!slave) { 3826 netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__); 3827 return NOTIFY_DONE; 3828 } 3829 3830 bond_dev = slave->bond->dev; 3831 bond = slave->bond; 3832 primary = rtnl_dereference(bond->primary_slave); 3833 3834 slave_dbg(bond_dev, slave_dev, "%s called\n", __func__); 3835 3836 switch (event) { 3837 case NETDEV_UNREGISTER: 3838 if (bond_dev->type != ARPHRD_ETHER) 3839 bond_release_and_destroy(bond_dev, slave_dev); 3840 else 3841 __bond_release_one(bond_dev, slave_dev, false, true); 3842 break; 3843 case NETDEV_UP: 3844 case NETDEV_CHANGE: 3845 /* For 802.3ad mode only: 3846 * Getting invalid Speed/Duplex values here will put slave 3847 * in weird state. Mark it as link-fail if the link was 3848 * previously up or link-down if it hasn't yet come up, and 3849 * let link-monitoring (miimon) set it right when correct 3850 * speeds/duplex are available. 3851 */ 3852 if (bond_update_speed_duplex(slave) && 3853 BOND_MODE(bond) == BOND_MODE_8023AD) { 3854 if (slave->last_link_up) 3855 slave->link = BOND_LINK_FAIL; 3856 else 3857 slave->link = BOND_LINK_DOWN; 3858 } 3859 3860 if (BOND_MODE(bond) == BOND_MODE_8023AD) 3861 bond_3ad_adapter_speed_duplex_changed(slave); 3862 fallthrough; 3863 case NETDEV_DOWN: 3864 /* Refresh slave-array if applicable! 3865 * If the setup does not use miimon or arpmon (mode-specific!), 3866 * then these events will not cause the slave-array to be 3867 * refreshed. This will cause xmit to use a slave that is not 3868 * usable. Avoid such situation by refeshing the array at these 3869 * events. If these (miimon/arpmon) parameters are configured 3870 * then array gets refreshed twice and that should be fine! 3871 */ 3872 if (bond_mode_can_use_xmit_hash(bond)) 3873 bond_update_slave_arr(bond, NULL); 3874 break; 3875 case NETDEV_CHANGEMTU: 3876 /* TODO: Should slaves be allowed to 3877 * independently alter their MTU? For 3878 * an active-backup bond, slaves need 3879 * not be the same type of device, so 3880 * MTUs may vary. For other modes, 3881 * slaves arguably should have the 3882 * same MTUs. To do this, we'd need to 3883 * take over the slave's change_mtu 3884 * function for the duration of their 3885 * servitude. 3886 */ 3887 break; 3888 case NETDEV_CHANGENAME: 3889 /* we don't care if we don't have primary set */ 3890 if (!bond_uses_primary(bond) || 3891 !bond->params.primary[0]) 3892 break; 3893 3894 if (slave == primary) { 3895 /* slave's name changed - he's no longer primary */ 3896 RCU_INIT_POINTER(bond->primary_slave, NULL); 3897 } else if (!strcmp(slave_dev->name, bond->params.primary)) { 3898 /* we have a new primary slave */ 3899 rcu_assign_pointer(bond->primary_slave, slave); 3900 } else { /* we didn't change primary - exit */ 3901 break; 3902 } 3903 3904 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n", 3905 primary ? slave_dev->name : "none"); 3906 3907 block_netpoll_tx(); 3908 bond_select_active_slave(bond); 3909 unblock_netpoll_tx(); 3910 break; 3911 case NETDEV_FEAT_CHANGE: 3912 bond_compute_features(bond); 3913 break; 3914 case NETDEV_RESEND_IGMP: 3915 /* Propagate to master device */ 3916 call_netdevice_notifiers(event, slave->bond->dev); 3917 break; 3918 default: 3919 break; 3920 } 3921 3922 return NOTIFY_DONE; 3923 } 3924 3925 /* bond_netdev_event: handle netdev notifier chain events. 3926 * 3927 * This function receives events for the netdev chain. The caller (an 3928 * ioctl handler calling blocking_notifier_call_chain) holds the necessary 3929 * locks for us to safely manipulate the slave devices (RTNL lock, 3930 * dev_probe_lock). 3931 */ 3932 static int bond_netdev_event(struct notifier_block *this, 3933 unsigned long event, void *ptr) 3934 { 3935 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 3936 3937 netdev_dbg(event_dev, "%s received %s\n", 3938 __func__, netdev_cmd_to_name(event)); 3939 3940 if (!(event_dev->priv_flags & IFF_BONDING)) 3941 return NOTIFY_DONE; 3942 3943 if (event_dev->flags & IFF_MASTER) { 3944 int ret; 3945 3946 ret = bond_master_netdev_event(event, event_dev); 3947 if (ret != NOTIFY_DONE) 3948 return ret; 3949 } 3950 3951 if (event_dev->flags & IFF_SLAVE) 3952 return bond_slave_netdev_event(event, event_dev); 3953 3954 return NOTIFY_DONE; 3955 } 3956 3957 static struct notifier_block bond_netdev_notifier = { 3958 .notifier_call = bond_netdev_event, 3959 }; 3960 3961 /*---------------------------- Hashing Policies -----------------------------*/ 3962 3963 /* Helper to access data in a packet, with or without a backing skb. 3964 * If skb is given the data is linearized if necessary via pskb_may_pull. 3965 */ 3966 static inline const void *bond_pull_data(struct sk_buff *skb, 3967 const void *data, int hlen, int n) 3968 { 3969 if (likely(n <= hlen)) 3970 return data; 3971 else if (skb && likely(pskb_may_pull(skb, n))) 3972 return skb->head; 3973 3974 return NULL; 3975 } 3976 3977 /* L2 hash helper */ 3978 static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) 3979 { 3980 struct ethhdr *ep; 3981 3982 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); 3983 if (!data) 3984 return 0; 3985 3986 ep = (struct ethhdr *)(data + mhoff); 3987 return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto); 3988 } 3989 3990 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data, 3991 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34) 3992 { 3993 const struct ipv6hdr *iph6; 3994 const struct iphdr *iph; 3995 3996 if (l2_proto == htons(ETH_P_IP)) { 3997 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph)); 3998 if (!data) 3999 return false; 4000 4001 iph = (const struct iphdr *)(data + *nhoff); 4002 iph_to_flow_copy_v4addrs(fk, iph); 4003 *nhoff += iph->ihl << 2; 4004 if (!ip_is_fragment(iph)) 4005 *ip_proto = iph->protocol; 4006 } else if (l2_proto == htons(ETH_P_IPV6)) { 4007 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6)); 4008 if (!data) 4009 return false; 4010 4011 iph6 = (const struct ipv6hdr *)(data + *nhoff); 4012 iph_to_flow_copy_v6addrs(fk, iph6); 4013 *nhoff += sizeof(*iph6); 4014 *ip_proto = iph6->nexthdr; 4015 } else { 4016 return false; 4017 } 4018 4019 if (l34 && *ip_proto >= 0) 4020 fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen); 4021 4022 return true; 4023 } 4024 4025 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) 4026 { 4027 u32 srcmac_vendor = 0, srcmac_dev = 0; 4028 struct ethhdr *mac_hdr; 4029 u16 vlan = 0; 4030 int i; 4031 4032 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); 4033 if (!data) 4034 return 0; 4035 mac_hdr = (struct ethhdr *)(data + mhoff); 4036 4037 for (i = 0; i < 3; i++) 4038 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i]; 4039 4040 for (i = 3; i < ETH_ALEN; i++) 4041 srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i]; 4042 4043 if (skb && skb_vlan_tag_present(skb)) 4044 vlan = skb_vlan_tag_get(skb); 4045 4046 return vlan ^ srcmac_vendor ^ srcmac_dev; 4047 } 4048 4049 /* Extract the appropriate headers based on bond's xmit policy */ 4050 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data, 4051 __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk) 4052 { 4053 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; 4054 int ip_proto = -1; 4055 4056 switch (bond->params.xmit_policy) { 4057 case BOND_XMIT_POLICY_ENCAP23: 4058 case BOND_XMIT_POLICY_ENCAP34: 4059 memset(fk, 0, sizeof(*fk)); 4060 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding, 4061 fk, data, l2_proto, nhoff, hlen, 0); 4062 default: 4063 break; 4064 } 4065 4066 fk->ports.ports = 0; 4067 memset(&fk->icmp, 0, sizeof(fk->icmp)); 4068 if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34)) 4069 return false; 4070 4071 /* ICMP error packets contains at least 8 bytes of the header 4072 * of the packet which generated the error. Use this information 4073 * to correlate ICMP error packets within the same flow which 4074 * generated the error. 4075 */ 4076 if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) { 4077 skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen); 4078 if (ip_proto == IPPROTO_ICMP) { 4079 if (!icmp_is_err(fk->icmp.type)) 4080 return true; 4081 4082 nhoff += sizeof(struct icmphdr); 4083 } else if (ip_proto == IPPROTO_ICMPV6) { 4084 if (!icmpv6_is_err(fk->icmp.type)) 4085 return true; 4086 4087 nhoff += sizeof(struct icmp6hdr); 4088 } 4089 return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34); 4090 } 4091 4092 return true; 4093 } 4094 4095 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy) 4096 { 4097 hash ^= (__force u32)flow_get_u32_dst(flow) ^ 4098 (__force u32)flow_get_u32_src(flow); 4099 hash ^= (hash >> 16); 4100 hash ^= (hash >> 8); 4101 4102 /* discard lowest hash bit to deal with the common even ports pattern */ 4103 if (xmit_policy == BOND_XMIT_POLICY_LAYER34 || 4104 xmit_policy == BOND_XMIT_POLICY_ENCAP34) 4105 return hash >> 1; 4106 4107 return hash; 4108 } 4109 4110 /* Generate hash based on xmit policy. If @skb is given it is used to linearize 4111 * the data as required, but this function can be used without it if the data is 4112 * known to be linear (e.g. with xdp_buff). 4113 */ 4114 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data, 4115 __be16 l2_proto, int mhoff, int nhoff, int hlen) 4116 { 4117 struct flow_keys flow; 4118 u32 hash; 4119 4120 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) 4121 return bond_vlan_srcmac_hash(skb, data, mhoff, hlen); 4122 4123 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || 4124 !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow)) 4125 return bond_eth_hash(skb, data, mhoff, hlen); 4126 4127 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || 4128 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { 4129 hash = bond_eth_hash(skb, data, mhoff, hlen); 4130 } else { 4131 if (flow.icmp.id) 4132 memcpy(&hash, &flow.icmp, sizeof(hash)); 4133 else 4134 memcpy(&hash, &flow.ports.ports, sizeof(hash)); 4135 } 4136 4137 return bond_ip_hash(hash, &flow, bond->params.xmit_policy); 4138 } 4139 4140 /** 4141 * bond_xmit_hash - generate a hash value based on the xmit policy 4142 * @bond: bonding device 4143 * @skb: buffer to use for headers 4144 * 4145 * This function will extract the necessary headers from the skb buffer and use 4146 * them to generate a hash based on the xmit_policy set in the bonding device 4147 */ 4148 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) 4149 { 4150 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && 4151 skb->l4_hash) 4152 return skb->hash; 4153 4154 return __bond_xmit_hash(bond, skb, skb->data, skb->protocol, 4155 skb_mac_offset(skb), skb_network_offset(skb), 4156 skb_headlen(skb)); 4157 } 4158 4159 /** 4160 * bond_xmit_hash_xdp - generate a hash value based on the xmit policy 4161 * @bond: bonding device 4162 * @xdp: buffer to use for headers 4163 * 4164 * The XDP variant of bond_xmit_hash. 4165 */ 4166 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp) 4167 { 4168 struct ethhdr *eth; 4169 4170 if (xdp->data + sizeof(struct ethhdr) > xdp->data_end) 4171 return 0; 4172 4173 eth = (struct ethhdr *)xdp->data; 4174 4175 return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0, 4176 sizeof(struct ethhdr), xdp->data_end - xdp->data); 4177 } 4178 4179 /*-------------------------- Device entry points ----------------------------*/ 4180 4181 void bond_work_init_all(struct bonding *bond) 4182 { 4183 INIT_DELAYED_WORK(&bond->mcast_work, 4184 bond_resend_igmp_join_requests_delayed); 4185 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 4186 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); 4187 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor); 4188 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); 4189 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); 4190 } 4191 4192 static void bond_work_cancel_all(struct bonding *bond) 4193 { 4194 cancel_delayed_work_sync(&bond->mii_work); 4195 cancel_delayed_work_sync(&bond->arp_work); 4196 cancel_delayed_work_sync(&bond->alb_work); 4197 cancel_delayed_work_sync(&bond->ad_work); 4198 cancel_delayed_work_sync(&bond->mcast_work); 4199 cancel_delayed_work_sync(&bond->slave_arr_work); 4200 } 4201 4202 static int bond_open(struct net_device *bond_dev) 4203 { 4204 struct bonding *bond = netdev_priv(bond_dev); 4205 struct list_head *iter; 4206 struct slave *slave; 4207 4208 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { 4209 bond->rr_tx_counter = alloc_percpu(u32); 4210 if (!bond->rr_tx_counter) 4211 return -ENOMEM; 4212 } 4213 4214 /* reset slave->backup and slave->inactive */ 4215 if (bond_has_slaves(bond)) { 4216 bond_for_each_slave(bond, slave, iter) { 4217 if (bond_uses_primary(bond) && 4218 slave != rcu_access_pointer(bond->curr_active_slave)) { 4219 bond_set_slave_inactive_flags(slave, 4220 BOND_SLAVE_NOTIFY_NOW); 4221 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) { 4222 bond_set_slave_active_flags(slave, 4223 BOND_SLAVE_NOTIFY_NOW); 4224 } 4225 } 4226 } 4227 4228 if (bond_is_lb(bond)) { 4229 /* bond_alb_initialize must be called before the timer 4230 * is started. 4231 */ 4232 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) 4233 return -ENOMEM; 4234 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) 4235 queue_delayed_work(bond->wq, &bond->alb_work, 0); 4236 } 4237 4238 if (bond->params.miimon) /* link check interval, in milliseconds. */ 4239 queue_delayed_work(bond->wq, &bond->mii_work, 0); 4240 4241 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 4242 queue_delayed_work(bond->wq, &bond->arp_work, 0); 4243 bond->recv_probe = bond_rcv_validate; 4244 } 4245 4246 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 4247 queue_delayed_work(bond->wq, &bond->ad_work, 0); 4248 /* register to receive LACPDUs */ 4249 bond->recv_probe = bond_3ad_lacpdu_recv; 4250 bond_3ad_initiate_agg_selection(bond, 1); 4251 4252 bond_for_each_slave(bond, slave, iter) 4253 dev_mc_add(slave->dev, lacpdu_mcast_addr); 4254 } 4255 4256 if (bond_mode_can_use_xmit_hash(bond)) 4257 bond_update_slave_arr(bond, NULL); 4258 4259 return 0; 4260 } 4261 4262 static int bond_close(struct net_device *bond_dev) 4263 { 4264 struct bonding *bond = netdev_priv(bond_dev); 4265 struct slave *slave; 4266 4267 bond_work_cancel_all(bond); 4268 bond->send_peer_notif = 0; 4269 if (bond_is_lb(bond)) 4270 bond_alb_deinitialize(bond); 4271 bond->recv_probe = NULL; 4272 4273 if (bond_uses_primary(bond)) { 4274 rcu_read_lock(); 4275 slave = rcu_dereference(bond->curr_active_slave); 4276 if (slave) 4277 bond_hw_addr_flush(bond_dev, slave->dev); 4278 rcu_read_unlock(); 4279 } else { 4280 struct list_head *iter; 4281 4282 bond_for_each_slave(bond, slave, iter) 4283 bond_hw_addr_flush(bond_dev, slave->dev); 4284 } 4285 4286 return 0; 4287 } 4288 4289 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but 4290 * that some drivers can provide 32bit values only. 4291 */ 4292 static void bond_fold_stats(struct rtnl_link_stats64 *_res, 4293 const struct rtnl_link_stats64 *_new, 4294 const struct rtnl_link_stats64 *_old) 4295 { 4296 const u64 *new = (const u64 *)_new; 4297 const u64 *old = (const u64 *)_old; 4298 u64 *res = (u64 *)_res; 4299 int i; 4300 4301 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { 4302 u64 nv = new[i]; 4303 u64 ov = old[i]; 4304 s64 delta = nv - ov; 4305 4306 /* detects if this particular field is 32bit only */ 4307 if (((nv | ov) >> 32) == 0) 4308 delta = (s64)(s32)((u32)nv - (u32)ov); 4309 4310 /* filter anomalies, some drivers reset their stats 4311 * at down/up events. 4312 */ 4313 if (delta > 0) 4314 res[i] += delta; 4315 } 4316 } 4317 4318 #ifdef CONFIG_LOCKDEP 4319 static int bond_get_lowest_level_rcu(struct net_device *dev) 4320 { 4321 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 4322 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 4323 int cur = 0, max = 0; 4324 4325 now = dev; 4326 iter = &dev->adj_list.lower; 4327 4328 while (1) { 4329 next = NULL; 4330 while (1) { 4331 ldev = netdev_next_lower_dev_rcu(now, &iter); 4332 if (!ldev) 4333 break; 4334 4335 next = ldev; 4336 niter = &ldev->adj_list.lower; 4337 dev_stack[cur] = now; 4338 iter_stack[cur++] = iter; 4339 if (max <= cur) 4340 max = cur; 4341 break; 4342 } 4343 4344 if (!next) { 4345 if (!cur) 4346 return max; 4347 next = dev_stack[--cur]; 4348 niter = iter_stack[cur]; 4349 } 4350 4351 now = next; 4352 iter = niter; 4353 } 4354 4355 return max; 4356 } 4357 #endif 4358 4359 static void bond_get_stats(struct net_device *bond_dev, 4360 struct rtnl_link_stats64 *stats) 4361 { 4362 struct bonding *bond = netdev_priv(bond_dev); 4363 struct rtnl_link_stats64 temp; 4364 struct list_head *iter; 4365 struct slave *slave; 4366 int nest_level = 0; 4367 4368 4369 rcu_read_lock(); 4370 #ifdef CONFIG_LOCKDEP 4371 nest_level = bond_get_lowest_level_rcu(bond_dev); 4372 #endif 4373 4374 spin_lock_nested(&bond->stats_lock, nest_level); 4375 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 4376 4377 bond_for_each_slave_rcu(bond, slave, iter) { 4378 const struct rtnl_link_stats64 *new = 4379 dev_get_stats(slave->dev, &temp); 4380 4381 bond_fold_stats(stats, new, &slave->slave_stats); 4382 4383 /* save off the slave stats for the next run */ 4384 memcpy(&slave->slave_stats, new, sizeof(*new)); 4385 } 4386 4387 memcpy(&bond->bond_stats, stats, sizeof(*stats)); 4388 spin_unlock(&bond->stats_lock); 4389 rcu_read_unlock(); 4390 } 4391 4392 static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) 4393 { 4394 struct bonding *bond = netdev_priv(bond_dev); 4395 struct mii_ioctl_data *mii = NULL; 4396 const struct net_device_ops *ops; 4397 struct net_device *real_dev; 4398 struct hwtstamp_config cfg; 4399 struct ifreq ifrr; 4400 int res = 0; 4401 4402 netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd); 4403 4404 switch (cmd) { 4405 case SIOCGMIIPHY: 4406 mii = if_mii(ifr); 4407 if (!mii) 4408 return -EINVAL; 4409 4410 mii->phy_id = 0; 4411 fallthrough; 4412 case SIOCGMIIREG: 4413 /* We do this again just in case we were called by SIOCGMIIREG 4414 * instead of SIOCGMIIPHY. 4415 */ 4416 mii = if_mii(ifr); 4417 if (!mii) 4418 return -EINVAL; 4419 4420 if (mii->reg_num == 1) { 4421 mii->val_out = 0; 4422 if (netif_carrier_ok(bond->dev)) 4423 mii->val_out = BMSR_LSTATUS; 4424 } 4425 4426 break; 4427 case SIOCSHWTSTAMP: 4428 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 4429 return -EFAULT; 4430 4431 if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX)) 4432 return -EOPNOTSUPP; 4433 4434 fallthrough; 4435 case SIOCGHWTSTAMP: 4436 real_dev = bond_option_active_slave_get_rcu(bond); 4437 if (!real_dev) 4438 return -EOPNOTSUPP; 4439 4440 strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); 4441 ifrr.ifr_ifru = ifr->ifr_ifru; 4442 4443 ops = real_dev->netdev_ops; 4444 if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) { 4445 res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); 4446 if (res) 4447 return res; 4448 4449 ifr->ifr_ifru = ifrr.ifr_ifru; 4450 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 4451 return -EFAULT; 4452 4453 /* Set the BOND_PHC_INDEX flag to notify user space */ 4454 cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; 4455 4456 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? 4457 -EFAULT : 0; 4458 } 4459 fallthrough; 4460 default: 4461 res = -EOPNOTSUPP; 4462 } 4463 4464 return res; 4465 } 4466 4467 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) 4468 { 4469 struct bonding *bond = netdev_priv(bond_dev); 4470 struct net_device *slave_dev = NULL; 4471 struct ifbond k_binfo; 4472 struct ifbond __user *u_binfo = NULL; 4473 struct ifslave k_sinfo; 4474 struct ifslave __user *u_sinfo = NULL; 4475 struct bond_opt_value newval; 4476 struct net *net; 4477 int res = 0; 4478 4479 netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd); 4480 4481 switch (cmd) { 4482 case SIOCBONDINFOQUERY: 4483 u_binfo = (struct ifbond __user *)ifr->ifr_data; 4484 4485 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) 4486 return -EFAULT; 4487 4488 bond_info_query(bond_dev, &k_binfo); 4489 if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) 4490 return -EFAULT; 4491 4492 return 0; 4493 case SIOCBONDSLAVEINFOQUERY: 4494 u_sinfo = (struct ifslave __user *)ifr->ifr_data; 4495 4496 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave))) 4497 return -EFAULT; 4498 4499 res = bond_slave_info_query(bond_dev, &k_sinfo); 4500 if (res == 0 && 4501 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave))) 4502 return -EFAULT; 4503 4504 return res; 4505 default: 4506 break; 4507 } 4508 4509 net = dev_net(bond_dev); 4510 4511 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4512 return -EPERM; 4513 4514 slave_dev = __dev_get_by_name(net, ifr->ifr_slave); 4515 4516 slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev); 4517 4518 if (!slave_dev) 4519 return -ENODEV; 4520 4521 switch (cmd) { 4522 case SIOCBONDENSLAVE: 4523 res = bond_enslave(bond_dev, slave_dev, NULL); 4524 break; 4525 case SIOCBONDRELEASE: 4526 res = bond_release(bond_dev, slave_dev); 4527 break; 4528 case SIOCBONDSETHWADDR: 4529 res = bond_set_dev_addr(bond_dev, slave_dev); 4530 break; 4531 case SIOCBONDCHANGEACTIVE: 4532 bond_opt_initstr(&newval, slave_dev->name); 4533 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, 4534 &newval); 4535 break; 4536 default: 4537 res = -EOPNOTSUPP; 4538 } 4539 4540 return res; 4541 } 4542 4543 static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr, 4544 void __user *data, int cmd) 4545 { 4546 struct ifreq ifrdata = { .ifr_data = data }; 4547 4548 switch (cmd) { 4549 case BOND_INFO_QUERY_OLD: 4550 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY); 4551 case BOND_SLAVE_INFO_QUERY_OLD: 4552 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY); 4553 case BOND_ENSLAVE_OLD: 4554 return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE); 4555 case BOND_RELEASE_OLD: 4556 return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE); 4557 case BOND_SETHWADDR_OLD: 4558 return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR); 4559 case BOND_CHANGE_ACTIVE_OLD: 4560 return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE); 4561 } 4562 4563 return -EOPNOTSUPP; 4564 } 4565 4566 static void bond_change_rx_flags(struct net_device *bond_dev, int change) 4567 { 4568 struct bonding *bond = netdev_priv(bond_dev); 4569 4570 if (change & IFF_PROMISC) 4571 bond_set_promiscuity(bond, 4572 bond_dev->flags & IFF_PROMISC ? 1 : -1); 4573 4574 if (change & IFF_ALLMULTI) 4575 bond_set_allmulti(bond, 4576 bond_dev->flags & IFF_ALLMULTI ? 1 : -1); 4577 } 4578 4579 static void bond_set_rx_mode(struct net_device *bond_dev) 4580 { 4581 struct bonding *bond = netdev_priv(bond_dev); 4582 struct list_head *iter; 4583 struct slave *slave; 4584 4585 rcu_read_lock(); 4586 if (bond_uses_primary(bond)) { 4587 slave = rcu_dereference(bond->curr_active_slave); 4588 if (slave) { 4589 dev_uc_sync(slave->dev, bond_dev); 4590 dev_mc_sync(slave->dev, bond_dev); 4591 } 4592 } else { 4593 bond_for_each_slave_rcu(bond, slave, iter) { 4594 dev_uc_sync_multiple(slave->dev, bond_dev); 4595 dev_mc_sync_multiple(slave->dev, bond_dev); 4596 } 4597 } 4598 rcu_read_unlock(); 4599 } 4600 4601 static int bond_neigh_init(struct neighbour *n) 4602 { 4603 struct bonding *bond = netdev_priv(n->dev); 4604 const struct net_device_ops *slave_ops; 4605 struct neigh_parms parms; 4606 struct slave *slave; 4607 int ret = 0; 4608 4609 rcu_read_lock(); 4610 slave = bond_first_slave_rcu(bond); 4611 if (!slave) 4612 goto out; 4613 slave_ops = slave->dev->netdev_ops; 4614 if (!slave_ops->ndo_neigh_setup) 4615 goto out; 4616 4617 /* TODO: find another way [1] to implement this. 4618 * Passing a zeroed structure is fragile, 4619 * but at least we do not pass garbage. 4620 * 4621 * [1] One way would be that ndo_neigh_setup() never touch 4622 * struct neigh_parms, but propagate the new neigh_setup() 4623 * back to ___neigh_create() / neigh_parms_alloc() 4624 */ 4625 memset(&parms, 0, sizeof(parms)); 4626 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); 4627 4628 if (ret) 4629 goto out; 4630 4631 if (parms.neigh_setup) 4632 ret = parms.neigh_setup(n); 4633 out: 4634 rcu_read_unlock(); 4635 return ret; 4636 } 4637 4638 /* The bonding ndo_neigh_setup is called at init time beofre any 4639 * slave exists. So we must declare proxy setup function which will 4640 * be used at run time to resolve the actual slave neigh param setup. 4641 * 4642 * It's also called by master devices (such as vlans) to setup their 4643 * underlying devices. In that case - do nothing, we're already set up from 4644 * our init. 4645 */ 4646 static int bond_neigh_setup(struct net_device *dev, 4647 struct neigh_parms *parms) 4648 { 4649 /* modify only our neigh_parms */ 4650 if (parms->dev == dev) 4651 parms->neigh_setup = bond_neigh_init; 4652 4653 return 0; 4654 } 4655 4656 /* Change the MTU of all of a master's slaves to match the master */ 4657 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) 4658 { 4659 struct bonding *bond = netdev_priv(bond_dev); 4660 struct slave *slave, *rollback_slave; 4661 struct list_head *iter; 4662 int res = 0; 4663 4664 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu); 4665 4666 bond_for_each_slave(bond, slave, iter) { 4667 slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n", 4668 slave, slave->dev->netdev_ops->ndo_change_mtu); 4669 4670 res = dev_set_mtu(slave->dev, new_mtu); 4671 4672 if (res) { 4673 /* If we failed to set the slave's mtu to the new value 4674 * we must abort the operation even in ACTIVE_BACKUP 4675 * mode, because if we allow the backup slaves to have 4676 * different mtu values than the active slave we'll 4677 * need to change their mtu when doing a failover. That 4678 * means changing their mtu from timer context, which 4679 * is probably not a good idea. 4680 */ 4681 slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n", 4682 res, new_mtu); 4683 goto unwind; 4684 } 4685 } 4686 4687 bond_dev->mtu = new_mtu; 4688 4689 return 0; 4690 4691 unwind: 4692 /* unwind from head to the slave that failed */ 4693 bond_for_each_slave(bond, rollback_slave, iter) { 4694 int tmp_res; 4695 4696 if (rollback_slave == slave) 4697 break; 4698 4699 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu); 4700 if (tmp_res) 4701 slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n", 4702 tmp_res); 4703 } 4704 4705 return res; 4706 } 4707 4708 /* Change HW address 4709 * 4710 * Note that many devices must be down to change the HW address, and 4711 * downing the master releases all slaves. We can make bonds full of 4712 * bonding devices to test this, however. 4713 */ 4714 static int bond_set_mac_address(struct net_device *bond_dev, void *addr) 4715 { 4716 struct bonding *bond = netdev_priv(bond_dev); 4717 struct slave *slave, *rollback_slave; 4718 struct sockaddr_storage *ss = addr, tmp_ss; 4719 struct list_head *iter; 4720 int res = 0; 4721 4722 if (BOND_MODE(bond) == BOND_MODE_ALB) 4723 return bond_alb_set_mac_address(bond_dev, addr); 4724 4725 4726 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond); 4727 4728 /* If fail_over_mac is enabled, do nothing and return success. 4729 * Returning an error causes ifenslave to fail. 4730 */ 4731 if (bond->params.fail_over_mac && 4732 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 4733 return 0; 4734 4735 if (!is_valid_ether_addr(ss->__data)) 4736 return -EADDRNOTAVAIL; 4737 4738 bond_for_each_slave(bond, slave, iter) { 4739 slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n", 4740 __func__, slave); 4741 res = dev_set_mac_address(slave->dev, addr, NULL); 4742 if (res) { 4743 /* TODO: consider downing the slave 4744 * and retry ? 4745 * User should expect communications 4746 * breakage anyway until ARP finish 4747 * updating, so... 4748 */ 4749 slave_dbg(bond_dev, slave->dev, "%s: err %d\n", 4750 __func__, res); 4751 goto unwind; 4752 } 4753 } 4754 4755 /* success */ 4756 dev_addr_set(bond_dev, ss->__data); 4757 return 0; 4758 4759 unwind: 4760 memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len); 4761 tmp_ss.ss_family = bond_dev->type; 4762 4763 /* unwind from head to the slave that failed */ 4764 bond_for_each_slave(bond, rollback_slave, iter) { 4765 int tmp_res; 4766 4767 if (rollback_slave == slave) 4768 break; 4769 4770 tmp_res = dev_set_mac_address(rollback_slave->dev, 4771 (struct sockaddr *)&tmp_ss, NULL); 4772 if (tmp_res) { 4773 slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n", 4774 __func__, tmp_res); 4775 } 4776 } 4777 4778 return res; 4779 } 4780 4781 /** 4782 * bond_get_slave_by_id - get xmit slave with slave_id 4783 * @bond: bonding device that is transmitting 4784 * @slave_id: slave id up to slave_cnt-1 through which to transmit 4785 * 4786 * This function tries to get slave with slave_id but in case 4787 * it fails, it tries to find the first available slave for transmission. 4788 */ 4789 static struct slave *bond_get_slave_by_id(struct bonding *bond, 4790 int slave_id) 4791 { 4792 struct list_head *iter; 4793 struct slave *slave; 4794 int i = slave_id; 4795 4796 /* Here we start from the slave with slave_id */ 4797 bond_for_each_slave_rcu(bond, slave, iter) { 4798 if (--i < 0) { 4799 if (bond_slave_can_tx(slave)) 4800 return slave; 4801 } 4802 } 4803 4804 /* Here we start from the first slave up to slave_id */ 4805 i = slave_id; 4806 bond_for_each_slave_rcu(bond, slave, iter) { 4807 if (--i < 0) 4808 break; 4809 if (bond_slave_can_tx(slave)) 4810 return slave; 4811 } 4812 /* no slave that can tx has been found */ 4813 return NULL; 4814 } 4815 4816 /** 4817 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave 4818 * @bond: bonding device to use 4819 * 4820 * Based on the value of the bonding device's packets_per_slave parameter 4821 * this function generates a slave id, which is usually used as the next 4822 * slave to transmit through. 4823 */ 4824 static u32 bond_rr_gen_slave_id(struct bonding *bond) 4825 { 4826 u32 slave_id; 4827 struct reciprocal_value reciprocal_packets_per_slave; 4828 int packets_per_slave = bond->params.packets_per_slave; 4829 4830 switch (packets_per_slave) { 4831 case 0: 4832 slave_id = get_random_u32(); 4833 break; 4834 case 1: 4835 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); 4836 break; 4837 default: 4838 reciprocal_packets_per_slave = 4839 bond->params.reciprocal_packets_per_slave; 4840 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); 4841 slave_id = reciprocal_divide(slave_id, 4842 reciprocal_packets_per_slave); 4843 break; 4844 } 4845 4846 return slave_id; 4847 } 4848 4849 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond, 4850 struct sk_buff *skb) 4851 { 4852 struct slave *slave; 4853 int slave_cnt; 4854 u32 slave_id; 4855 4856 /* Start with the curr_active_slave that joined the bond as the 4857 * default for sending IGMP traffic. For failover purposes one 4858 * needs to maintain some consistency for the interface that will 4859 * send the join/membership reports. The curr_active_slave found 4860 * will send all of this type of traffic. 4861 */ 4862 if (skb->protocol == htons(ETH_P_IP)) { 4863 int noff = skb_network_offset(skb); 4864 struct iphdr *iph; 4865 4866 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) 4867 goto non_igmp; 4868 4869 iph = ip_hdr(skb); 4870 if (iph->protocol == IPPROTO_IGMP) { 4871 slave = rcu_dereference(bond->curr_active_slave); 4872 if (slave) 4873 return slave; 4874 return bond_get_slave_by_id(bond, 0); 4875 } 4876 } 4877 4878 non_igmp: 4879 slave_cnt = READ_ONCE(bond->slave_cnt); 4880 if (likely(slave_cnt)) { 4881 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; 4882 return bond_get_slave_by_id(bond, slave_id); 4883 } 4884 return NULL; 4885 } 4886 4887 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond, 4888 struct xdp_buff *xdp) 4889 { 4890 struct slave *slave; 4891 int slave_cnt; 4892 u32 slave_id; 4893 const struct ethhdr *eth; 4894 void *data = xdp->data; 4895 4896 if (data + sizeof(struct ethhdr) > xdp->data_end) 4897 goto non_igmp; 4898 4899 eth = (struct ethhdr *)data; 4900 data += sizeof(struct ethhdr); 4901 4902 /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */ 4903 if (eth->h_proto == htons(ETH_P_IP)) { 4904 const struct iphdr *iph; 4905 4906 if (data + sizeof(struct iphdr) > xdp->data_end) 4907 goto non_igmp; 4908 4909 iph = (struct iphdr *)data; 4910 4911 if (iph->protocol == IPPROTO_IGMP) { 4912 slave = rcu_dereference(bond->curr_active_slave); 4913 if (slave) 4914 return slave; 4915 return bond_get_slave_by_id(bond, 0); 4916 } 4917 } 4918 4919 non_igmp: 4920 slave_cnt = READ_ONCE(bond->slave_cnt); 4921 if (likely(slave_cnt)) { 4922 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; 4923 return bond_get_slave_by_id(bond, slave_id); 4924 } 4925 return NULL; 4926 } 4927 4928 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, 4929 struct net_device *bond_dev) 4930 { 4931 struct bonding *bond = netdev_priv(bond_dev); 4932 struct slave *slave; 4933 4934 slave = bond_xmit_roundrobin_slave_get(bond, skb); 4935 if (likely(slave)) 4936 return bond_dev_queue_xmit(bond, skb, slave->dev); 4937 4938 return bond_tx_drop(bond_dev, skb); 4939 } 4940 4941 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond) 4942 { 4943 return rcu_dereference(bond->curr_active_slave); 4944 } 4945 4946 /* In active-backup mode, we know that bond->curr_active_slave is always valid if 4947 * the bond has a usable interface. 4948 */ 4949 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, 4950 struct net_device *bond_dev) 4951 { 4952 struct bonding *bond = netdev_priv(bond_dev); 4953 struct slave *slave; 4954 4955 slave = bond_xmit_activebackup_slave_get(bond); 4956 if (slave) 4957 return bond_dev_queue_xmit(bond, skb, slave->dev); 4958 4959 return bond_tx_drop(bond_dev, skb); 4960 } 4961 4962 /* Use this to update slave_array when (a) it's not appropriate to update 4963 * slave_array right away (note that update_slave_array() may sleep) 4964 * and / or (b) RTNL is not held. 4965 */ 4966 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay) 4967 { 4968 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay); 4969 } 4970 4971 /* Slave array work handler. Holds only RTNL */ 4972 static void bond_slave_arr_handler(struct work_struct *work) 4973 { 4974 struct bonding *bond = container_of(work, struct bonding, 4975 slave_arr_work.work); 4976 int ret; 4977 4978 if (!rtnl_trylock()) 4979 goto err; 4980 4981 ret = bond_update_slave_arr(bond, NULL); 4982 rtnl_unlock(); 4983 if (ret) { 4984 pr_warn_ratelimited("Failed to update slave array from WT\n"); 4985 goto err; 4986 } 4987 return; 4988 4989 err: 4990 bond_slave_arr_work_rearm(bond, 1); 4991 } 4992 4993 static void bond_skip_slave(struct bond_up_slave *slaves, 4994 struct slave *skipslave) 4995 { 4996 int idx; 4997 4998 /* Rare situation where caller has asked to skip a specific 4999 * slave but allocation failed (most likely!). BTW this is 5000 * only possible when the call is initiated from 5001 * __bond_release_one(). In this situation; overwrite the 5002 * skipslave entry in the array with the last entry from the 5003 * array to avoid a situation where the xmit path may choose 5004 * this to-be-skipped slave to send a packet out. 5005 */ 5006 for (idx = 0; slaves && idx < slaves->count; idx++) { 5007 if (skipslave == slaves->arr[idx]) { 5008 slaves->arr[idx] = 5009 slaves->arr[slaves->count - 1]; 5010 slaves->count--; 5011 break; 5012 } 5013 } 5014 } 5015 5016 static void bond_set_slave_arr(struct bonding *bond, 5017 struct bond_up_slave *usable_slaves, 5018 struct bond_up_slave *all_slaves) 5019 { 5020 struct bond_up_slave *usable, *all; 5021 5022 usable = rtnl_dereference(bond->usable_slaves); 5023 rcu_assign_pointer(bond->usable_slaves, usable_slaves); 5024 kfree_rcu(usable, rcu); 5025 5026 all = rtnl_dereference(bond->all_slaves); 5027 rcu_assign_pointer(bond->all_slaves, all_slaves); 5028 kfree_rcu(all, rcu); 5029 } 5030 5031 static void bond_reset_slave_arr(struct bonding *bond) 5032 { 5033 struct bond_up_slave *usable, *all; 5034 5035 usable = rtnl_dereference(bond->usable_slaves); 5036 if (usable) { 5037 RCU_INIT_POINTER(bond->usable_slaves, NULL); 5038 kfree_rcu(usable, rcu); 5039 } 5040 5041 all = rtnl_dereference(bond->all_slaves); 5042 if (all) { 5043 RCU_INIT_POINTER(bond->all_slaves, NULL); 5044 kfree_rcu(all, rcu); 5045 } 5046 } 5047 5048 /* Build the usable slaves array in control path for modes that use xmit-hash 5049 * to determine the slave interface - 5050 * (a) BOND_MODE_8023AD 5051 * (b) BOND_MODE_XOR 5052 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0 5053 * 5054 * The caller is expected to hold RTNL only and NO other lock! 5055 */ 5056 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) 5057 { 5058 struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL; 5059 struct slave *slave; 5060 struct list_head *iter; 5061 int agg_id = 0; 5062 int ret = 0; 5063 5064 might_sleep(); 5065 5066 usable_slaves = kzalloc(struct_size(usable_slaves, arr, 5067 bond->slave_cnt), GFP_KERNEL); 5068 all_slaves = kzalloc(struct_size(all_slaves, arr, 5069 bond->slave_cnt), GFP_KERNEL); 5070 if (!usable_slaves || !all_slaves) { 5071 ret = -ENOMEM; 5072 goto out; 5073 } 5074 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 5075 struct ad_info ad_info; 5076 5077 spin_lock_bh(&bond->mode_lock); 5078 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 5079 spin_unlock_bh(&bond->mode_lock); 5080 pr_debug("bond_3ad_get_active_agg_info failed\n"); 5081 /* No active aggragator means it's not safe to use 5082 * the previous array. 5083 */ 5084 bond_reset_slave_arr(bond); 5085 goto out; 5086 } 5087 spin_unlock_bh(&bond->mode_lock); 5088 agg_id = ad_info.aggregator_id; 5089 } 5090 bond_for_each_slave(bond, slave, iter) { 5091 if (skipslave == slave) 5092 continue; 5093 5094 all_slaves->arr[all_slaves->count++] = slave; 5095 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 5096 struct aggregator *agg; 5097 5098 agg = SLAVE_AD_INFO(slave)->port.aggregator; 5099 if (!agg || agg->aggregator_identifier != agg_id) 5100 continue; 5101 } 5102 if (!bond_slave_can_tx(slave)) 5103 continue; 5104 5105 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n", 5106 usable_slaves->count); 5107 5108 usable_slaves->arr[usable_slaves->count++] = slave; 5109 } 5110 5111 bond_set_slave_arr(bond, usable_slaves, all_slaves); 5112 return ret; 5113 out: 5114 if (ret != 0 && skipslave) { 5115 bond_skip_slave(rtnl_dereference(bond->all_slaves), 5116 skipslave); 5117 bond_skip_slave(rtnl_dereference(bond->usable_slaves), 5118 skipslave); 5119 } 5120 kfree_rcu(all_slaves, rcu); 5121 kfree_rcu(usable_slaves, rcu); 5122 5123 return ret; 5124 } 5125 5126 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond, 5127 struct sk_buff *skb, 5128 struct bond_up_slave *slaves) 5129 { 5130 struct slave *slave; 5131 unsigned int count; 5132 u32 hash; 5133 5134 hash = bond_xmit_hash(bond, skb); 5135 count = slaves ? READ_ONCE(slaves->count) : 0; 5136 if (unlikely(!count)) 5137 return NULL; 5138 5139 slave = slaves->arr[hash % count]; 5140 return slave; 5141 } 5142 5143 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond, 5144 struct xdp_buff *xdp) 5145 { 5146 struct bond_up_slave *slaves; 5147 unsigned int count; 5148 u32 hash; 5149 5150 hash = bond_xmit_hash_xdp(bond, xdp); 5151 slaves = rcu_dereference(bond->usable_slaves); 5152 count = slaves ? READ_ONCE(slaves->count) : 0; 5153 if (unlikely(!count)) 5154 return NULL; 5155 5156 return slaves->arr[hash % count]; 5157 } 5158 5159 /* Use this Xmit function for 3AD as well as XOR modes. The current 5160 * usable slave array is formed in the control path. The xmit function 5161 * just calculates hash and sends the packet out. 5162 */ 5163 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, 5164 struct net_device *dev) 5165 { 5166 struct bonding *bond = netdev_priv(dev); 5167 struct bond_up_slave *slaves; 5168 struct slave *slave; 5169 5170 slaves = rcu_dereference(bond->usable_slaves); 5171 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); 5172 if (likely(slave)) 5173 return bond_dev_queue_xmit(bond, skb, slave->dev); 5174 5175 return bond_tx_drop(dev, skb); 5176 } 5177 5178 /* in broadcast mode, we send everything to all usable interfaces. */ 5179 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, 5180 struct net_device *bond_dev) 5181 { 5182 struct bonding *bond = netdev_priv(bond_dev); 5183 struct slave *slave = NULL; 5184 struct list_head *iter; 5185 bool xmit_suc = false; 5186 bool skb_used = false; 5187 5188 bond_for_each_slave_rcu(bond, slave, iter) { 5189 struct sk_buff *skb2; 5190 5191 if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)) 5192 continue; 5193 5194 if (bond_is_last_slave(bond, slave)) { 5195 skb2 = skb; 5196 skb_used = true; 5197 } else { 5198 skb2 = skb_clone(skb, GFP_ATOMIC); 5199 if (!skb2) { 5200 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n", 5201 bond_dev->name, __func__); 5202 continue; 5203 } 5204 } 5205 5206 if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK) 5207 xmit_suc = true; 5208 } 5209 5210 if (!skb_used) 5211 dev_kfree_skb_any(skb); 5212 5213 if (xmit_suc) 5214 return NETDEV_TX_OK; 5215 5216 dev_core_stats_tx_dropped_inc(bond_dev); 5217 return NET_XMIT_DROP; 5218 } 5219 5220 /*------------------------- Device initialization ---------------------------*/ 5221 5222 /* Lookup the slave that corresponds to a qid */ 5223 static inline int bond_slave_override(struct bonding *bond, 5224 struct sk_buff *skb) 5225 { 5226 struct slave *slave = NULL; 5227 struct list_head *iter; 5228 5229 if (!skb_rx_queue_recorded(skb)) 5230 return 1; 5231 5232 /* Find out if any slaves have the same mapping as this skb. */ 5233 bond_for_each_slave_rcu(bond, slave, iter) { 5234 if (slave->queue_id == skb_get_queue_mapping(skb)) { 5235 if (bond_slave_is_up(slave) && 5236 slave->link == BOND_LINK_UP) { 5237 bond_dev_queue_xmit(bond, skb, slave->dev); 5238 return 0; 5239 } 5240 /* If the slave isn't UP, use default transmit policy. */ 5241 break; 5242 } 5243 } 5244 5245 return 1; 5246 } 5247 5248 5249 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 5250 struct net_device *sb_dev) 5251 { 5252 /* This helper function exists to help dev_pick_tx get the correct 5253 * destination queue. Using a helper function skips a call to 5254 * skb_tx_hash and will put the skbs in the queue we expect on their 5255 * way down to the bonding driver. 5256 */ 5257 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 5258 5259 /* Save the original txq to restore before passing to the driver */ 5260 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb); 5261 5262 if (unlikely(txq >= dev->real_num_tx_queues)) { 5263 do { 5264 txq -= dev->real_num_tx_queues; 5265 } while (txq >= dev->real_num_tx_queues); 5266 } 5267 return txq; 5268 } 5269 5270 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev, 5271 struct sk_buff *skb, 5272 bool all_slaves) 5273 { 5274 struct bonding *bond = netdev_priv(master_dev); 5275 struct bond_up_slave *slaves; 5276 struct slave *slave = NULL; 5277 5278 switch (BOND_MODE(bond)) { 5279 case BOND_MODE_ROUNDROBIN: 5280 slave = bond_xmit_roundrobin_slave_get(bond, skb); 5281 break; 5282 case BOND_MODE_ACTIVEBACKUP: 5283 slave = bond_xmit_activebackup_slave_get(bond); 5284 break; 5285 case BOND_MODE_8023AD: 5286 case BOND_MODE_XOR: 5287 if (all_slaves) 5288 slaves = rcu_dereference(bond->all_slaves); 5289 else 5290 slaves = rcu_dereference(bond->usable_slaves); 5291 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); 5292 break; 5293 case BOND_MODE_BROADCAST: 5294 break; 5295 case BOND_MODE_ALB: 5296 slave = bond_xmit_alb_slave_get(bond, skb); 5297 break; 5298 case BOND_MODE_TLB: 5299 slave = bond_xmit_tlb_slave_get(bond, skb); 5300 break; 5301 default: 5302 /* Should never happen, mode already checked */ 5303 WARN_ONCE(true, "Unknown bonding mode"); 5304 break; 5305 } 5306 5307 if (slave) 5308 return slave->dev; 5309 return NULL; 5310 } 5311 5312 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow) 5313 { 5314 switch (sk->sk_family) { 5315 #if IS_ENABLED(CONFIG_IPV6) 5316 case AF_INET6: 5317 if (ipv6_only_sock(sk) || 5318 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { 5319 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 5320 flow->addrs.v6addrs.src = inet6_sk(sk)->saddr; 5321 flow->addrs.v6addrs.dst = sk->sk_v6_daddr; 5322 break; 5323 } 5324 fallthrough; 5325 #endif 5326 default: /* AF_INET */ 5327 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 5328 flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr; 5329 flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr; 5330 break; 5331 } 5332 5333 flow->ports.src = inet_sk(sk)->inet_sport; 5334 flow->ports.dst = inet_sk(sk)->inet_dport; 5335 } 5336 5337 /** 5338 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields 5339 * @sk: socket to use for headers 5340 * 5341 * This function will extract the necessary field from the socket and use 5342 * them to generate a hash based on the LAYER34 xmit_policy. 5343 * Assumes that sk is a TCP or UDP socket. 5344 */ 5345 static u32 bond_sk_hash_l34(struct sock *sk) 5346 { 5347 struct flow_keys flow; 5348 u32 hash; 5349 5350 bond_sk_to_flow(sk, &flow); 5351 5352 /* L4 */ 5353 memcpy(&hash, &flow.ports.ports, sizeof(hash)); 5354 /* L3 */ 5355 return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34); 5356 } 5357 5358 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond, 5359 struct sock *sk) 5360 { 5361 struct bond_up_slave *slaves; 5362 struct slave *slave; 5363 unsigned int count; 5364 u32 hash; 5365 5366 slaves = rcu_dereference(bond->usable_slaves); 5367 count = slaves ? READ_ONCE(slaves->count) : 0; 5368 if (unlikely(!count)) 5369 return NULL; 5370 5371 hash = bond_sk_hash_l34(sk); 5372 slave = slaves->arr[hash % count]; 5373 5374 return slave->dev; 5375 } 5376 5377 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev, 5378 struct sock *sk) 5379 { 5380 struct bonding *bond = netdev_priv(dev); 5381 struct net_device *lower = NULL; 5382 5383 rcu_read_lock(); 5384 if (bond_sk_check(bond)) 5385 lower = __bond_sk_get_lower_dev(bond, sk); 5386 rcu_read_unlock(); 5387 5388 return lower; 5389 } 5390 5391 #if IS_ENABLED(CONFIG_TLS_DEVICE) 5392 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb, 5393 struct net_device *dev) 5394 { 5395 struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev); 5396 5397 /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded 5398 * was true, if tls_device_down is running in parallel, but it's OK, 5399 * because bond_get_slave_by_dev has a NULL check. 5400 */ 5401 if (likely(bond_get_slave_by_dev(bond, tls_netdev))) 5402 return bond_dev_queue_xmit(bond, skb, tls_netdev); 5403 return bond_tx_drop(dev, skb); 5404 } 5405 #endif 5406 5407 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 5408 { 5409 struct bonding *bond = netdev_priv(dev); 5410 5411 if (bond_should_override_tx_queue(bond) && 5412 !bond_slave_override(bond, skb)) 5413 return NETDEV_TX_OK; 5414 5415 #if IS_ENABLED(CONFIG_TLS_DEVICE) 5416 if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk)) 5417 return bond_tls_device_xmit(bond, skb, dev); 5418 #endif 5419 5420 switch (BOND_MODE(bond)) { 5421 case BOND_MODE_ROUNDROBIN: 5422 return bond_xmit_roundrobin(skb, dev); 5423 case BOND_MODE_ACTIVEBACKUP: 5424 return bond_xmit_activebackup(skb, dev); 5425 case BOND_MODE_8023AD: 5426 case BOND_MODE_XOR: 5427 return bond_3ad_xor_xmit(skb, dev); 5428 case BOND_MODE_BROADCAST: 5429 return bond_xmit_broadcast(skb, dev); 5430 case BOND_MODE_ALB: 5431 return bond_alb_xmit(skb, dev); 5432 case BOND_MODE_TLB: 5433 return bond_tlb_xmit(skb, dev); 5434 default: 5435 /* Should never happen, mode already checked */ 5436 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); 5437 WARN_ON_ONCE(1); 5438 return bond_tx_drop(dev, skb); 5439 } 5440 } 5441 5442 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 5443 { 5444 struct bonding *bond = netdev_priv(dev); 5445 netdev_tx_t ret = NETDEV_TX_OK; 5446 5447 /* If we risk deadlock from transmitting this in the 5448 * netpoll path, tell netpoll to queue the frame for later tx 5449 */ 5450 if (unlikely(is_netpoll_tx_blocked(dev))) 5451 return NETDEV_TX_BUSY; 5452 5453 rcu_read_lock(); 5454 if (bond_has_slaves(bond)) 5455 ret = __bond_start_xmit(skb, dev); 5456 else 5457 ret = bond_tx_drop(dev, skb); 5458 rcu_read_unlock(); 5459 5460 return ret; 5461 } 5462 5463 static struct net_device * 5464 bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) 5465 { 5466 struct bonding *bond = netdev_priv(bond_dev); 5467 struct slave *slave; 5468 5469 /* Caller needs to hold rcu_read_lock() */ 5470 5471 switch (BOND_MODE(bond)) { 5472 case BOND_MODE_ROUNDROBIN: 5473 slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp); 5474 break; 5475 5476 case BOND_MODE_ACTIVEBACKUP: 5477 slave = bond_xmit_activebackup_slave_get(bond); 5478 break; 5479 5480 case BOND_MODE_8023AD: 5481 case BOND_MODE_XOR: 5482 slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp); 5483 break; 5484 5485 default: 5486 /* Should never happen. Mode guarded by bond_xdp_check() */ 5487 netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); 5488 WARN_ON_ONCE(1); 5489 return NULL; 5490 } 5491 5492 if (slave) 5493 return slave->dev; 5494 5495 return NULL; 5496 } 5497 5498 static int bond_xdp_xmit(struct net_device *bond_dev, 5499 int n, struct xdp_frame **frames, u32 flags) 5500 { 5501 int nxmit, err = -ENXIO; 5502 5503 rcu_read_lock(); 5504 5505 for (nxmit = 0; nxmit < n; nxmit++) { 5506 struct xdp_frame *frame = frames[nxmit]; 5507 struct xdp_frame *frames1[] = {frame}; 5508 struct net_device *slave_dev; 5509 struct xdp_buff xdp; 5510 5511 xdp_convert_frame_to_buff(frame, &xdp); 5512 5513 slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp); 5514 if (!slave_dev) { 5515 err = -ENXIO; 5516 break; 5517 } 5518 5519 err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags); 5520 if (err < 1) 5521 break; 5522 } 5523 5524 rcu_read_unlock(); 5525 5526 /* If error happened on the first frame then we can pass the error up, otherwise 5527 * report the number of frames that were xmitted. 5528 */ 5529 if (err < 0) 5530 return (nxmit == 0 ? err : nxmit); 5531 5532 return nxmit; 5533 } 5534 5535 static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, 5536 struct netlink_ext_ack *extack) 5537 { 5538 struct bonding *bond = netdev_priv(dev); 5539 struct list_head *iter; 5540 struct slave *slave, *rollback_slave; 5541 struct bpf_prog *old_prog; 5542 struct netdev_bpf xdp = { 5543 .command = XDP_SETUP_PROG, 5544 .flags = 0, 5545 .prog = prog, 5546 .extack = extack, 5547 }; 5548 int err; 5549 5550 ASSERT_RTNL(); 5551 5552 if (!bond_xdp_check(bond)) 5553 return -EOPNOTSUPP; 5554 5555 old_prog = bond->xdp_prog; 5556 bond->xdp_prog = prog; 5557 5558 bond_for_each_slave(bond, slave, iter) { 5559 struct net_device *slave_dev = slave->dev; 5560 5561 if (!slave_dev->netdev_ops->ndo_bpf || 5562 !slave_dev->netdev_ops->ndo_xdp_xmit) { 5563 SLAVE_NL_ERR(dev, slave_dev, extack, 5564 "Slave device does not support XDP"); 5565 err = -EOPNOTSUPP; 5566 goto err; 5567 } 5568 5569 if (dev_xdp_prog_count(slave_dev) > 0) { 5570 SLAVE_NL_ERR(dev, slave_dev, extack, 5571 "Slave has XDP program loaded, please unload before enslaving"); 5572 err = -EOPNOTSUPP; 5573 goto err; 5574 } 5575 5576 err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 5577 if (err < 0) { 5578 /* ndo_bpf() sets extack error message */ 5579 slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err); 5580 goto err; 5581 } 5582 if (prog) 5583 bpf_prog_inc(prog); 5584 } 5585 5586 if (prog) { 5587 static_branch_inc(&bpf_master_redirect_enabled_key); 5588 } else if (old_prog) { 5589 bpf_prog_put(old_prog); 5590 static_branch_dec(&bpf_master_redirect_enabled_key); 5591 } 5592 5593 return 0; 5594 5595 err: 5596 /* unwind the program changes */ 5597 bond->xdp_prog = old_prog; 5598 xdp.prog = old_prog; 5599 xdp.extack = NULL; /* do not overwrite original error */ 5600 5601 bond_for_each_slave(bond, rollback_slave, iter) { 5602 struct net_device *slave_dev = rollback_slave->dev; 5603 int err_unwind; 5604 5605 if (slave == rollback_slave) 5606 break; 5607 5608 err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 5609 if (err_unwind < 0) 5610 slave_err(dev, slave_dev, 5611 "Error %d when unwinding XDP program change\n", err_unwind); 5612 else if (xdp.prog) 5613 bpf_prog_inc(xdp.prog); 5614 } 5615 return err; 5616 } 5617 5618 static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp) 5619 { 5620 switch (xdp->command) { 5621 case XDP_SETUP_PROG: 5622 return bond_xdp_set(dev, xdp->prog, xdp->extack); 5623 default: 5624 return -EINVAL; 5625 } 5626 } 5627 5628 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) 5629 { 5630 if (speed == 0 || speed == SPEED_UNKNOWN) 5631 speed = slave->speed; 5632 else 5633 speed = min(speed, slave->speed); 5634 5635 return speed; 5636 } 5637 5638 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, 5639 struct ethtool_link_ksettings *cmd) 5640 { 5641 struct bonding *bond = netdev_priv(bond_dev); 5642 struct list_head *iter; 5643 struct slave *slave; 5644 u32 speed = 0; 5645 5646 cmd->base.duplex = DUPLEX_UNKNOWN; 5647 cmd->base.port = PORT_OTHER; 5648 5649 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we 5650 * do not need to check mode. Though link speed might not represent 5651 * the true receive or transmit bandwidth (not all modes are symmetric) 5652 * this is an accurate maximum. 5653 */ 5654 bond_for_each_slave(bond, slave, iter) { 5655 if (bond_slave_can_tx(slave)) { 5656 if (slave->speed != SPEED_UNKNOWN) { 5657 if (BOND_MODE(bond) == BOND_MODE_BROADCAST) 5658 speed = bond_mode_bcast_speed(slave, 5659 speed); 5660 else 5661 speed += slave->speed; 5662 } 5663 if (cmd->base.duplex == DUPLEX_UNKNOWN && 5664 slave->duplex != DUPLEX_UNKNOWN) 5665 cmd->base.duplex = slave->duplex; 5666 } 5667 } 5668 cmd->base.speed = speed ? : SPEED_UNKNOWN; 5669 5670 return 0; 5671 } 5672 5673 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, 5674 struct ethtool_drvinfo *drvinfo) 5675 { 5676 strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 5677 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d", 5678 BOND_ABI_VERSION); 5679 } 5680 5681 static int bond_ethtool_get_ts_info(struct net_device *bond_dev, 5682 struct ethtool_ts_info *info) 5683 { 5684 struct bonding *bond = netdev_priv(bond_dev); 5685 const struct ethtool_ops *ops; 5686 struct net_device *real_dev; 5687 struct phy_device *phydev; 5688 int ret = 0; 5689 5690 rcu_read_lock(); 5691 real_dev = bond_option_active_slave_get_rcu(bond); 5692 dev_hold(real_dev); 5693 rcu_read_unlock(); 5694 5695 if (real_dev) { 5696 ops = real_dev->ethtool_ops; 5697 phydev = real_dev->phydev; 5698 5699 if (phy_has_tsinfo(phydev)) { 5700 ret = phy_ts_info(phydev, info); 5701 goto out; 5702 } else if (ops->get_ts_info) { 5703 ret = ops->get_ts_info(real_dev, info); 5704 goto out; 5705 } 5706 } 5707 5708 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | 5709 SOF_TIMESTAMPING_SOFTWARE; 5710 info->phc_index = -1; 5711 5712 out: 5713 dev_put(real_dev); 5714 return ret; 5715 } 5716 5717 static const struct ethtool_ops bond_ethtool_ops = { 5718 .get_drvinfo = bond_ethtool_get_drvinfo, 5719 .get_link = ethtool_op_get_link, 5720 .get_link_ksettings = bond_ethtool_get_link_ksettings, 5721 .get_ts_info = bond_ethtool_get_ts_info, 5722 }; 5723 5724 static const struct net_device_ops bond_netdev_ops = { 5725 .ndo_init = bond_init, 5726 .ndo_uninit = bond_uninit, 5727 .ndo_open = bond_open, 5728 .ndo_stop = bond_close, 5729 .ndo_start_xmit = bond_start_xmit, 5730 .ndo_select_queue = bond_select_queue, 5731 .ndo_get_stats64 = bond_get_stats, 5732 .ndo_eth_ioctl = bond_eth_ioctl, 5733 .ndo_siocbond = bond_do_ioctl, 5734 .ndo_siocdevprivate = bond_siocdevprivate, 5735 .ndo_change_rx_flags = bond_change_rx_flags, 5736 .ndo_set_rx_mode = bond_set_rx_mode, 5737 .ndo_change_mtu = bond_change_mtu, 5738 .ndo_set_mac_address = bond_set_mac_address, 5739 .ndo_neigh_setup = bond_neigh_setup, 5740 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 5741 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 5742 #ifdef CONFIG_NET_POLL_CONTROLLER 5743 .ndo_netpoll_setup = bond_netpoll_setup, 5744 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 5745 .ndo_poll_controller = bond_poll_controller, 5746 #endif 5747 .ndo_add_slave = bond_enslave, 5748 .ndo_del_slave = bond_release, 5749 .ndo_fix_features = bond_fix_features, 5750 .ndo_features_check = passthru_features_check, 5751 .ndo_get_xmit_slave = bond_xmit_get_slave, 5752 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev, 5753 .ndo_bpf = bond_xdp, 5754 .ndo_xdp_xmit = bond_xdp_xmit, 5755 .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave, 5756 }; 5757 5758 static const struct device_type bond_type = { 5759 .name = "bond", 5760 }; 5761 5762 static void bond_destructor(struct net_device *bond_dev) 5763 { 5764 struct bonding *bond = netdev_priv(bond_dev); 5765 5766 if (bond->wq) 5767 destroy_workqueue(bond->wq); 5768 5769 if (bond->rr_tx_counter) 5770 free_percpu(bond->rr_tx_counter); 5771 } 5772 5773 void bond_setup(struct net_device *bond_dev) 5774 { 5775 struct bonding *bond = netdev_priv(bond_dev); 5776 5777 spin_lock_init(&bond->mode_lock); 5778 bond->params = bonding_defaults; 5779 5780 /* Initialize pointers */ 5781 bond->dev = bond_dev; 5782 5783 /* Initialize the device entry points */ 5784 ether_setup(bond_dev); 5785 bond_dev->max_mtu = ETH_MAX_MTU; 5786 bond_dev->netdev_ops = &bond_netdev_ops; 5787 bond_dev->ethtool_ops = &bond_ethtool_ops; 5788 5789 bond_dev->needs_free_netdev = true; 5790 bond_dev->priv_destructor = bond_destructor; 5791 5792 SET_NETDEV_DEVTYPE(bond_dev, &bond_type); 5793 5794 /* Initialize the device options */ 5795 bond_dev->flags |= IFF_MASTER; 5796 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; 5797 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 5798 5799 #ifdef CONFIG_XFRM_OFFLOAD 5800 /* set up xfrm device ops (only supported in active-backup right now) */ 5801 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; 5802 INIT_LIST_HEAD(&bond->ipsec_list); 5803 spin_lock_init(&bond->ipsec_lock); 5804 #endif /* CONFIG_XFRM_OFFLOAD */ 5805 5806 /* don't acquire bond device's netif_tx_lock when transmitting */ 5807 bond_dev->features |= NETIF_F_LLTX; 5808 5809 /* By default, we declare the bond to be fully 5810 * VLAN hardware accelerated capable. Special 5811 * care is taken in the various xmit functions 5812 * when there are slaves that are not hw accel 5813 * capable 5814 */ 5815 5816 /* Don't allow bond devices to change network namespaces. */ 5817 bond_dev->features |= NETIF_F_NETNS_LOCAL; 5818 5819 bond_dev->hw_features = BOND_VLAN_FEATURES | 5820 NETIF_F_HW_VLAN_CTAG_RX | 5821 NETIF_F_HW_VLAN_CTAG_FILTER; 5822 5823 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; 5824 bond_dev->features |= bond_dev->hw_features; 5825 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 5826 #ifdef CONFIG_XFRM_OFFLOAD 5827 bond_dev->hw_features |= BOND_XFRM_FEATURES; 5828 /* Only enable XFRM features if this is an active-backup config */ 5829 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 5830 bond_dev->features |= BOND_XFRM_FEATURES; 5831 #endif /* CONFIG_XFRM_OFFLOAD */ 5832 } 5833 5834 /* Destroy a bonding device. 5835 * Must be under rtnl_lock when this function is called. 5836 */ 5837 static void bond_uninit(struct net_device *bond_dev) 5838 { 5839 struct bonding *bond = netdev_priv(bond_dev); 5840 struct bond_up_slave *usable, *all; 5841 struct list_head *iter; 5842 struct slave *slave; 5843 5844 bond_netpoll_cleanup(bond_dev); 5845 5846 /* Release the bonded slaves */ 5847 bond_for_each_slave(bond, slave, iter) 5848 __bond_release_one(bond_dev, slave->dev, true, true); 5849 netdev_info(bond_dev, "Released all slaves\n"); 5850 5851 usable = rtnl_dereference(bond->usable_slaves); 5852 if (usable) { 5853 RCU_INIT_POINTER(bond->usable_slaves, NULL); 5854 kfree_rcu(usable, rcu); 5855 } 5856 5857 all = rtnl_dereference(bond->all_slaves); 5858 if (all) { 5859 RCU_INIT_POINTER(bond->all_slaves, NULL); 5860 kfree_rcu(all, rcu); 5861 } 5862 5863 list_del(&bond->bond_list); 5864 5865 bond_debug_unregister(bond); 5866 } 5867 5868 /*------------------------- Module initialization ---------------------------*/ 5869 5870 static int bond_check_params(struct bond_params *params) 5871 { 5872 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; 5873 struct bond_opt_value newval; 5874 const struct bond_opt_value *valptr; 5875 int arp_all_targets_value = 0; 5876 u16 ad_actor_sys_prio = 0; 5877 u16 ad_user_port_key = 0; 5878 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 }; 5879 int arp_ip_count; 5880 int bond_mode = BOND_MODE_ROUNDROBIN; 5881 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 5882 int lacp_fast = 0; 5883 int tlb_dynamic_lb; 5884 5885 /* Convert string parameters. */ 5886 if (mode) { 5887 bond_opt_initstr(&newval, mode); 5888 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval); 5889 if (!valptr) { 5890 pr_err("Error: Invalid bonding mode \"%s\"\n", mode); 5891 return -EINVAL; 5892 } 5893 bond_mode = valptr->value; 5894 } 5895 5896 if (xmit_hash_policy) { 5897 if (bond_mode == BOND_MODE_ROUNDROBIN || 5898 bond_mode == BOND_MODE_ACTIVEBACKUP || 5899 bond_mode == BOND_MODE_BROADCAST) { 5900 pr_info("xmit_hash_policy param is irrelevant in mode %s\n", 5901 bond_mode_name(bond_mode)); 5902 } else { 5903 bond_opt_initstr(&newval, xmit_hash_policy); 5904 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH), 5905 &newval); 5906 if (!valptr) { 5907 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n", 5908 xmit_hash_policy); 5909 return -EINVAL; 5910 } 5911 xmit_hashtype = valptr->value; 5912 } 5913 } 5914 5915 if (lacp_rate) { 5916 if (bond_mode != BOND_MODE_8023AD) { 5917 pr_info("lacp_rate param is irrelevant in mode %s\n", 5918 bond_mode_name(bond_mode)); 5919 } else { 5920 bond_opt_initstr(&newval, lacp_rate); 5921 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE), 5922 &newval); 5923 if (!valptr) { 5924 pr_err("Error: Invalid lacp rate \"%s\"\n", 5925 lacp_rate); 5926 return -EINVAL; 5927 } 5928 lacp_fast = valptr->value; 5929 } 5930 } 5931 5932 if (ad_select) { 5933 bond_opt_initstr(&newval, ad_select); 5934 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), 5935 &newval); 5936 if (!valptr) { 5937 pr_err("Error: Invalid ad_select \"%s\"\n", ad_select); 5938 return -EINVAL; 5939 } 5940 params->ad_select = valptr->value; 5941 if (bond_mode != BOND_MODE_8023AD) 5942 pr_warn("ad_select param only affects 802.3ad mode\n"); 5943 } else { 5944 params->ad_select = BOND_AD_STABLE; 5945 } 5946 5947 if (max_bonds < 0) { 5948 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n", 5949 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS); 5950 max_bonds = BOND_DEFAULT_MAX_BONDS; 5951 } 5952 5953 if (miimon < 0) { 5954 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5955 miimon, INT_MAX); 5956 miimon = 0; 5957 } 5958 5959 if (updelay < 0) { 5960 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5961 updelay, INT_MAX); 5962 updelay = 0; 5963 } 5964 5965 if (downdelay < 0) { 5966 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 5967 downdelay, INT_MAX); 5968 downdelay = 0; 5969 } 5970 5971 if ((use_carrier != 0) && (use_carrier != 1)) { 5972 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n", 5973 use_carrier); 5974 use_carrier = 1; 5975 } 5976 5977 if (num_peer_notif < 0 || num_peer_notif > 255) { 5978 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", 5979 num_peer_notif); 5980 num_peer_notif = 1; 5981 } 5982 5983 /* reset values for 802.3ad/TLB/ALB */ 5984 if (!bond_mode_uses_arp(bond_mode)) { 5985 if (!miimon) { 5986 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 5987 pr_warn("Forcing miimon to 100msec\n"); 5988 miimon = BOND_DEFAULT_MIIMON; 5989 } 5990 } 5991 5992 if (tx_queues < 1 || tx_queues > 255) { 5993 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n", 5994 tx_queues, BOND_DEFAULT_TX_QUEUES); 5995 tx_queues = BOND_DEFAULT_TX_QUEUES; 5996 } 5997 5998 if ((all_slaves_active != 0) && (all_slaves_active != 1)) { 5999 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n", 6000 all_slaves_active); 6001 all_slaves_active = 0; 6002 } 6003 6004 if (resend_igmp < 0 || resend_igmp > 255) { 6005 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n", 6006 resend_igmp, BOND_DEFAULT_RESEND_IGMP); 6007 resend_igmp = BOND_DEFAULT_RESEND_IGMP; 6008 } 6009 6010 bond_opt_initval(&newval, packets_per_slave); 6011 if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) { 6012 pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n", 6013 packets_per_slave, USHRT_MAX); 6014 packets_per_slave = 1; 6015 } 6016 6017 if (bond_mode == BOND_MODE_ALB) { 6018 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n", 6019 updelay); 6020 } 6021 6022 if (!miimon) { 6023 if (updelay || downdelay) { 6024 /* just warn the user the up/down delay will have 6025 * no effect since miimon is zero... 6026 */ 6027 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n", 6028 updelay, downdelay); 6029 } 6030 } else { 6031 /* don't allow arp monitoring */ 6032 if (arp_interval) { 6033 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n", 6034 miimon, arp_interval); 6035 arp_interval = 0; 6036 } 6037 6038 if ((updelay % miimon) != 0) { 6039 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n", 6040 updelay, miimon, (updelay / miimon) * miimon); 6041 } 6042 6043 updelay /= miimon; 6044 6045 if ((downdelay % miimon) != 0) { 6046 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n", 6047 downdelay, miimon, 6048 (downdelay / miimon) * miimon); 6049 } 6050 6051 downdelay /= miimon; 6052 } 6053 6054 if (arp_interval < 0) { 6055 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n", 6056 arp_interval, INT_MAX); 6057 arp_interval = 0; 6058 } 6059 6060 for (arp_ip_count = 0, i = 0; 6061 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { 6062 __be32 ip; 6063 6064 /* not a complete check, but good enough to catch mistakes */ 6065 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || 6066 !bond_is_ip_target_ok(ip)) { 6067 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 6068 arp_ip_target[i]); 6069 arp_interval = 0; 6070 } else { 6071 if (bond_get_targets_ip(arp_target, ip) == -1) 6072 arp_target[arp_ip_count++] = ip; 6073 else 6074 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n", 6075 &ip); 6076 } 6077 } 6078 6079 if (arp_interval && !arp_ip_count) { 6080 /* don't allow arping if no arp_ip_target given... */ 6081 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n", 6082 arp_interval); 6083 arp_interval = 0; 6084 } 6085 6086 if (arp_validate) { 6087 if (!arp_interval) { 6088 pr_err("arp_validate requires arp_interval\n"); 6089 return -EINVAL; 6090 } 6091 6092 bond_opt_initstr(&newval, arp_validate); 6093 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE), 6094 &newval); 6095 if (!valptr) { 6096 pr_err("Error: invalid arp_validate \"%s\"\n", 6097 arp_validate); 6098 return -EINVAL; 6099 } 6100 arp_validate_value = valptr->value; 6101 } else { 6102 arp_validate_value = 0; 6103 } 6104 6105 if (arp_all_targets) { 6106 bond_opt_initstr(&newval, arp_all_targets); 6107 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), 6108 &newval); 6109 if (!valptr) { 6110 pr_err("Error: invalid arp_all_targets_value \"%s\"\n", 6111 arp_all_targets); 6112 arp_all_targets_value = 0; 6113 } else { 6114 arp_all_targets_value = valptr->value; 6115 } 6116 } 6117 6118 if (miimon) { 6119 pr_info("MII link monitoring set to %d ms\n", miimon); 6120 } else if (arp_interval) { 6121 valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE, 6122 arp_validate_value); 6123 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", 6124 arp_interval, valptr->string, arp_ip_count); 6125 6126 for (i = 0; i < arp_ip_count; i++) 6127 pr_cont(" %s", arp_ip_target[i]); 6128 6129 pr_cont("\n"); 6130 6131 } else if (max_bonds) { 6132 /* miimon and arp_interval not set, we need one so things 6133 * work as expected, see bonding.txt for details 6134 */ 6135 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n"); 6136 } 6137 6138 if (primary && !bond_mode_uses_primary(bond_mode)) { 6139 /* currently, using a primary only makes sense 6140 * in active backup, TLB or ALB modes 6141 */ 6142 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n", 6143 primary, bond_mode_name(bond_mode)); 6144 primary = NULL; 6145 } 6146 6147 if (primary && primary_reselect) { 6148 bond_opt_initstr(&newval, primary_reselect); 6149 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT), 6150 &newval); 6151 if (!valptr) { 6152 pr_err("Error: Invalid primary_reselect \"%s\"\n", 6153 primary_reselect); 6154 return -EINVAL; 6155 } 6156 primary_reselect_value = valptr->value; 6157 } else { 6158 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS; 6159 } 6160 6161 if (fail_over_mac) { 6162 bond_opt_initstr(&newval, fail_over_mac); 6163 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC), 6164 &newval); 6165 if (!valptr) { 6166 pr_err("Error: invalid fail_over_mac \"%s\"\n", 6167 fail_over_mac); 6168 return -EINVAL; 6169 } 6170 fail_over_mac_value = valptr->value; 6171 if (bond_mode != BOND_MODE_ACTIVEBACKUP) 6172 pr_warn("Warning: fail_over_mac only affects active-backup mode\n"); 6173 } else { 6174 fail_over_mac_value = BOND_FOM_NONE; 6175 } 6176 6177 bond_opt_initstr(&newval, "default"); 6178 valptr = bond_opt_parse( 6179 bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO), 6180 &newval); 6181 if (!valptr) { 6182 pr_err("Error: No ad_actor_sys_prio default value"); 6183 return -EINVAL; 6184 } 6185 ad_actor_sys_prio = valptr->value; 6186 6187 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY), 6188 &newval); 6189 if (!valptr) { 6190 pr_err("Error: No ad_user_port_key default value"); 6191 return -EINVAL; 6192 } 6193 ad_user_port_key = valptr->value; 6194 6195 bond_opt_initstr(&newval, "default"); 6196 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); 6197 if (!valptr) { 6198 pr_err("Error: No tlb_dynamic_lb default value"); 6199 return -EINVAL; 6200 } 6201 tlb_dynamic_lb = valptr->value; 6202 6203 if (lp_interval == 0) { 6204 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", 6205 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL); 6206 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 6207 } 6208 6209 /* fill params struct with the proper values */ 6210 params->mode = bond_mode; 6211 params->xmit_policy = xmit_hashtype; 6212 params->miimon = miimon; 6213 params->num_peer_notif = num_peer_notif; 6214 params->arp_interval = arp_interval; 6215 params->arp_validate = arp_validate_value; 6216 params->arp_all_targets = arp_all_targets_value; 6217 params->missed_max = 2; 6218 params->updelay = updelay; 6219 params->downdelay = downdelay; 6220 params->peer_notif_delay = 0; 6221 params->use_carrier = use_carrier; 6222 params->lacp_active = 1; 6223 params->lacp_fast = lacp_fast; 6224 params->primary[0] = 0; 6225 params->primary_reselect = primary_reselect_value; 6226 params->fail_over_mac = fail_over_mac_value; 6227 params->tx_queues = tx_queues; 6228 params->all_slaves_active = all_slaves_active; 6229 params->resend_igmp = resend_igmp; 6230 params->min_links = min_links; 6231 params->lp_interval = lp_interval; 6232 params->packets_per_slave = packets_per_slave; 6233 params->tlb_dynamic_lb = tlb_dynamic_lb; 6234 params->ad_actor_sys_prio = ad_actor_sys_prio; 6235 eth_zero_addr(params->ad_actor_system); 6236 params->ad_user_port_key = ad_user_port_key; 6237 if (packets_per_slave > 0) { 6238 params->reciprocal_packets_per_slave = 6239 reciprocal_value(packets_per_slave); 6240 } else { 6241 /* reciprocal_packets_per_slave is unused if 6242 * packets_per_slave is 0 or 1, just initialize it 6243 */ 6244 params->reciprocal_packets_per_slave = 6245 (struct reciprocal_value) { 0 }; 6246 } 6247 6248 if (primary) 6249 strscpy_pad(params->primary, primary, sizeof(params->primary)); 6250 6251 memcpy(params->arp_targets, arp_target, sizeof(arp_target)); 6252 #if IS_ENABLED(CONFIG_IPV6) 6253 memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS); 6254 #endif 6255 6256 return 0; 6257 } 6258 6259 /* Called from registration process */ 6260 static int bond_init(struct net_device *bond_dev) 6261 { 6262 struct bonding *bond = netdev_priv(bond_dev); 6263 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); 6264 6265 netdev_dbg(bond_dev, "Begin bond_init\n"); 6266 6267 bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM); 6268 if (!bond->wq) 6269 return -ENOMEM; 6270 6271 spin_lock_init(&bond->stats_lock); 6272 netdev_lockdep_set_classes(bond_dev); 6273 6274 list_add_tail(&bond->bond_list, &bn->dev_list); 6275 6276 bond_prepare_sysfs_group(bond); 6277 6278 bond_debug_register(bond); 6279 6280 /* Ensure valid dev_addr */ 6281 if (is_zero_ether_addr(bond_dev->dev_addr) && 6282 bond_dev->addr_assign_type == NET_ADDR_PERM) 6283 eth_hw_addr_random(bond_dev); 6284 6285 return 0; 6286 } 6287 6288 unsigned int bond_get_num_tx_queues(void) 6289 { 6290 return tx_queues; 6291 } 6292 6293 /* Create a new bond based on the specified name and bonding parameters. 6294 * If name is NULL, obtain a suitable "bond%d" name for us. 6295 * Caller must NOT hold rtnl_lock; we need to release it here before we 6296 * set up our sysfs entries. 6297 */ 6298 int bond_create(struct net *net, const char *name) 6299 { 6300 struct net_device *bond_dev; 6301 struct bonding *bond; 6302 int res = -ENOMEM; 6303 6304 rtnl_lock(); 6305 6306 bond_dev = alloc_netdev_mq(sizeof(struct bonding), 6307 name ? name : "bond%d", NET_NAME_UNKNOWN, 6308 bond_setup, tx_queues); 6309 if (!bond_dev) 6310 goto out; 6311 6312 bond = netdev_priv(bond_dev); 6313 dev_net_set(bond_dev, net); 6314 bond_dev->rtnl_link_ops = &bond_link_ops; 6315 6316 res = register_netdevice(bond_dev); 6317 if (res < 0) { 6318 free_netdev(bond_dev); 6319 goto out; 6320 } 6321 6322 netif_carrier_off(bond_dev); 6323 6324 bond_work_init_all(bond); 6325 6326 out: 6327 rtnl_unlock(); 6328 return res; 6329 } 6330 6331 static int __net_init bond_net_init(struct net *net) 6332 { 6333 struct bond_net *bn = net_generic(net, bond_net_id); 6334 6335 bn->net = net; 6336 INIT_LIST_HEAD(&bn->dev_list); 6337 6338 bond_create_proc_dir(bn); 6339 bond_create_sysfs(bn); 6340 6341 return 0; 6342 } 6343 6344 static void __net_exit bond_net_exit_batch(struct list_head *net_list) 6345 { 6346 struct bond_net *bn; 6347 struct net *net; 6348 LIST_HEAD(list); 6349 6350 list_for_each_entry(net, net_list, exit_list) { 6351 bn = net_generic(net, bond_net_id); 6352 bond_destroy_sysfs(bn); 6353 } 6354 6355 /* Kill off any bonds created after unregistering bond rtnl ops */ 6356 rtnl_lock(); 6357 list_for_each_entry(net, net_list, exit_list) { 6358 struct bonding *bond, *tmp_bond; 6359 6360 bn = net_generic(net, bond_net_id); 6361 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) 6362 unregister_netdevice_queue(bond->dev, &list); 6363 } 6364 unregister_netdevice_many(&list); 6365 rtnl_unlock(); 6366 6367 list_for_each_entry(net, net_list, exit_list) { 6368 bn = net_generic(net, bond_net_id); 6369 bond_destroy_proc_dir(bn); 6370 } 6371 } 6372 6373 static struct pernet_operations bond_net_ops = { 6374 .init = bond_net_init, 6375 .exit_batch = bond_net_exit_batch, 6376 .id = &bond_net_id, 6377 .size = sizeof(struct bond_net), 6378 }; 6379 6380 static int __init bonding_init(void) 6381 { 6382 int i; 6383 int res; 6384 6385 res = bond_check_params(&bonding_defaults); 6386 if (res) 6387 goto out; 6388 6389 res = register_pernet_subsys(&bond_net_ops); 6390 if (res) 6391 goto out; 6392 6393 res = bond_netlink_init(); 6394 if (res) 6395 goto err_link; 6396 6397 bond_create_debugfs(); 6398 6399 for (i = 0; i < max_bonds; i++) { 6400 res = bond_create(&init_net, NULL); 6401 if (res) 6402 goto err; 6403 } 6404 6405 skb_flow_dissector_init(&flow_keys_bonding, 6406 flow_keys_bonding_keys, 6407 ARRAY_SIZE(flow_keys_bonding_keys)); 6408 6409 register_netdevice_notifier(&bond_netdev_notifier); 6410 out: 6411 return res; 6412 err: 6413 bond_destroy_debugfs(); 6414 bond_netlink_fini(); 6415 err_link: 6416 unregister_pernet_subsys(&bond_net_ops); 6417 goto out; 6418 6419 } 6420 6421 static void __exit bonding_exit(void) 6422 { 6423 unregister_netdevice_notifier(&bond_netdev_notifier); 6424 6425 bond_destroy_debugfs(); 6426 6427 bond_netlink_fini(); 6428 unregister_pernet_subsys(&bond_net_ops); 6429 6430 #ifdef CONFIG_NET_POLL_CONTROLLER 6431 /* Make sure we don't have an imbalance on our netpoll blocking */ 6432 WARN_ON(atomic_read(&netpoll_block_tx)); 6433 #endif 6434 } 6435 6436 module_init(bonding_init); 6437 module_exit(bonding_exit); 6438 MODULE_LICENSE("GPL"); 6439 MODULE_DESCRIPTION(DRV_DESCRIPTION); 6440 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); 6441