xref: /openbmc/linux/drivers/net/bonding/bond_main.c (revision 1f0d40d8)
1 /*
2  * originally based on the dummy device.
3  *
4  * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
5  * Licensed under the GPL. Based on dummy.c, and eql.c devices.
6  *
7  * bonding.c: an Ethernet Bonding driver
8  *
9  * This is useful to talk to a Cisco EtherChannel compatible equipment:
10  *	Cisco 5500
11  *	Sun Trunking (Solaris)
12  *	Alteon AceDirector Trunks
13  *	Linux Bonding
14  *	and probably many L2 switches ...
15  *
16  * How it works:
17  *    ifconfig bond0 ipaddress netmask up
18  *      will setup a network device, with an ip address.  No mac address
19  *	will be assigned at this time.  The hw mac address will come from
20  *	the first slave bonded to the channel.  All slaves will then use
21  *	this hw mac address.
22  *
23  *    ifconfig bond0 down
24  *         will release all slaves, marking them as down.
25  *
26  *    ifenslave bond0 eth0
27  *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
28  *	a: be used as initial mac address
29  *	b: if a hw mac address already is there, eth0's hw mac address
30  *	   will then be set from bond0.
31  *
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/fcntl.h>
38 #include <linux/filter.h>
39 #include <linux/interrupt.h>
40 #include <linux/ptrace.h>
41 #include <linux/ioport.h>
42 #include <linux/in.h>
43 #include <net/ip.h>
44 #include <linux/ip.h>
45 #include <linux/icmp.h>
46 #include <linux/icmpv6.h>
47 #include <linux/tcp.h>
48 #include <linux/udp.h>
49 #include <linux/slab.h>
50 #include <linux/string.h>
51 #include <linux/init.h>
52 #include <linux/timer.h>
53 #include <linux/socket.h>
54 #include <linux/ctype.h>
55 #include <linux/inet.h>
56 #include <linux/bitops.h>
57 #include <linux/io.h>
58 #include <asm/dma.h>
59 #include <linux/uaccess.h>
60 #include <linux/errno.h>
61 #include <linux/netdevice.h>
62 #include <linux/inetdevice.h>
63 #include <linux/igmp.h>
64 #include <linux/etherdevice.h>
65 #include <linux/skbuff.h>
66 #include <net/sock.h>
67 #include <linux/rtnetlink.h>
68 #include <linux/smp.h>
69 #include <linux/if_ether.h>
70 #include <net/arp.h>
71 #include <linux/mii.h>
72 #include <linux/ethtool.h>
73 #include <linux/if_vlan.h>
74 #include <linux/if_bonding.h>
75 #include <linux/phy.h>
76 #include <linux/jiffies.h>
77 #include <linux/preempt.h>
78 #include <net/route.h>
79 #include <net/net_namespace.h>
80 #include <net/netns/generic.h>
81 #include <net/pkt_sched.h>
82 #include <linux/rculist.h>
83 #include <net/flow_dissector.h>
84 #include <net/xfrm.h>
85 #include <net/bonding.h>
86 #include <net/bond_3ad.h>
87 #include <net/bond_alb.h>
88 #if IS_ENABLED(CONFIG_TLS_DEVICE)
89 #include <net/tls.h>
90 #endif
91 #include <net/ip6_route.h>
92 
93 #include "bonding_priv.h"
94 
95 /*---------------------------- Module parameters ----------------------------*/
96 
97 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
98 
99 static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
100 static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
101 static int num_peer_notif = 1;
102 static int miimon;
103 static int updelay;
104 static int downdelay;
105 static int use_carrier	= 1;
106 static char *mode;
107 static char *primary;
108 static char *primary_reselect;
109 static char *lacp_rate;
110 static int min_links;
111 static char *ad_select;
112 static char *xmit_hash_policy;
113 static int arp_interval;
114 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
115 static char *arp_validate;
116 static char *arp_all_targets;
117 static char *fail_over_mac;
118 static int all_slaves_active;
119 static struct bond_params bonding_defaults;
120 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
121 static int packets_per_slave = 1;
122 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
123 
124 module_param(max_bonds, int, 0);
125 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
126 module_param(tx_queues, int, 0);
127 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
128 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
129 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
130 			       "failover event (alias of num_unsol_na)");
131 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
132 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
133 			       "failover event (alias of num_grat_arp)");
134 module_param(miimon, int, 0);
135 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
136 module_param(updelay, int, 0);
137 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
138 module_param(downdelay, int, 0);
139 MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
140 			    "in milliseconds");
141 module_param(use_carrier, int, 0);
142 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
143 			      "0 for off, 1 for on (default)");
144 module_param(mode, charp, 0);
145 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
146 		       "1 for active-backup, 2 for balance-xor, "
147 		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
148 		       "6 for balance-alb");
149 module_param(primary, charp, 0);
150 MODULE_PARM_DESC(primary, "Primary network device to use");
151 module_param(primary_reselect, charp, 0);
152 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
153 				   "once it comes up; "
154 				   "0 for always (default), "
155 				   "1 for only if speed of primary is "
156 				   "better, "
157 				   "2 for only on active slave "
158 				   "failure");
159 module_param(lacp_rate, charp, 0);
160 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
161 			    "0 for slow, 1 for fast");
162 module_param(ad_select, charp, 0);
163 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
164 			    "0 for stable (default), 1 for bandwidth, "
165 			    "2 for count");
166 module_param(min_links, int, 0);
167 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
168 
169 module_param(xmit_hash_policy, charp, 0);
170 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
171 				   "0 for layer 2 (default), 1 for layer 3+4, "
172 				   "2 for layer 2+3, 3 for encap layer 2+3, "
173 				   "4 for encap layer 3+4, 5 for vlan+srcmac");
174 module_param(arp_interval, int, 0);
175 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
176 module_param_array(arp_ip_target, charp, NULL, 0);
177 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
178 module_param(arp_validate, charp, 0);
179 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
180 			       "0 for none (default), 1 for active, "
181 			       "2 for backup, 3 for all");
182 module_param(arp_all_targets, charp, 0);
183 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
184 module_param(fail_over_mac, charp, 0);
185 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
186 				"the same MAC; 0 for none (default), "
187 				"1 for active, 2 for follow");
188 module_param(all_slaves_active, int, 0);
189 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
190 				     "by setting active flag for all slaves; "
191 				     "0 for never (default), 1 for always.");
192 module_param(resend_igmp, int, 0);
193 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
194 			      "link failure");
195 module_param(packets_per_slave, int, 0);
196 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
197 				    "mode; 0 for a random slave, 1 packet per "
198 				    "slave (default), >1 packets per slave.");
199 module_param(lp_interval, uint, 0);
200 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
201 			      "the bonding driver sends learning packets to "
202 			      "each slaves peer switch. The default is 1.");
203 
204 /*----------------------------- Global variables ----------------------------*/
205 
206 #ifdef CONFIG_NET_POLL_CONTROLLER
207 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
208 #endif
209 
210 unsigned int bond_net_id __read_mostly;
211 
212 static const struct flow_dissector_key flow_keys_bonding_keys[] = {
213 	{
214 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
215 		.offset = offsetof(struct flow_keys, control),
216 	},
217 	{
218 		.key_id = FLOW_DISSECTOR_KEY_BASIC,
219 		.offset = offsetof(struct flow_keys, basic),
220 	},
221 	{
222 		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
223 		.offset = offsetof(struct flow_keys, addrs.v4addrs),
224 	},
225 	{
226 		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
227 		.offset = offsetof(struct flow_keys, addrs.v6addrs),
228 	},
229 	{
230 		.key_id = FLOW_DISSECTOR_KEY_TIPC,
231 		.offset = offsetof(struct flow_keys, addrs.tipckey),
232 	},
233 	{
234 		.key_id = FLOW_DISSECTOR_KEY_PORTS,
235 		.offset = offsetof(struct flow_keys, ports),
236 	},
237 	{
238 		.key_id = FLOW_DISSECTOR_KEY_ICMP,
239 		.offset = offsetof(struct flow_keys, icmp),
240 	},
241 	{
242 		.key_id = FLOW_DISSECTOR_KEY_VLAN,
243 		.offset = offsetof(struct flow_keys, vlan),
244 	},
245 	{
246 		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
247 		.offset = offsetof(struct flow_keys, tags),
248 	},
249 	{
250 		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
251 		.offset = offsetof(struct flow_keys, keyid),
252 	},
253 };
254 
255 static struct flow_dissector flow_keys_bonding __read_mostly;
256 
257 /*-------------------------- Forward declarations ---------------------------*/
258 
259 static int bond_init(struct net_device *bond_dev);
260 static void bond_uninit(struct net_device *bond_dev);
261 static void bond_get_stats(struct net_device *bond_dev,
262 			   struct rtnl_link_stats64 *stats);
263 static void bond_slave_arr_handler(struct work_struct *work);
264 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
265 				  int mod);
266 static void bond_netdev_notify_work(struct work_struct *work);
267 
268 /*---------------------------- General routines -----------------------------*/
269 
270 const char *bond_mode_name(int mode)
271 {
272 	static const char *names[] = {
273 		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
274 		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
275 		[BOND_MODE_XOR] = "load balancing (xor)",
276 		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
277 		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
278 		[BOND_MODE_TLB] = "transmit load balancing",
279 		[BOND_MODE_ALB] = "adaptive load balancing",
280 	};
281 
282 	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
283 		return "unknown";
284 
285 	return names[mode];
286 }
287 
288 /**
289  * bond_dev_queue_xmit - Prepare skb for xmit.
290  *
291  * @bond: bond device that got this skb for tx.
292  * @skb: hw accel VLAN tagged skb to transmit
293  * @slave_dev: slave that is supposed to xmit this skbuff
294  */
295 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
296 			struct net_device *slave_dev)
297 {
298 	skb->dev = slave_dev;
299 
300 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
301 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
302 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
303 
304 	if (unlikely(netpoll_tx_running(bond->dev)))
305 		return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
306 
307 	return dev_queue_xmit(skb);
308 }
309 
310 static bool bond_sk_check(struct bonding *bond)
311 {
312 	switch (BOND_MODE(bond)) {
313 	case BOND_MODE_8023AD:
314 	case BOND_MODE_XOR:
315 		if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
316 			return true;
317 		fallthrough;
318 	default:
319 		return false;
320 	}
321 }
322 
323 static bool bond_xdp_check(struct bonding *bond)
324 {
325 	switch (BOND_MODE(bond)) {
326 	case BOND_MODE_ROUNDROBIN:
327 	case BOND_MODE_ACTIVEBACKUP:
328 		return true;
329 	case BOND_MODE_8023AD:
330 	case BOND_MODE_XOR:
331 		/* vlan+srcmac is not supported with XDP as in most cases the 802.1q
332 		 * payload is not in the packet due to hardware offload.
333 		 */
334 		if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
335 			return true;
336 		fallthrough;
337 	default:
338 		return false;
339 	}
340 }
341 
342 /*---------------------------------- VLAN -----------------------------------*/
343 
344 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
345  * We don't protect the slave list iteration with a lock because:
346  * a. This operation is performed in IOCTL context,
347  * b. The operation is protected by the RTNL semaphore in the 8021q code,
348  * c. Holding a lock with BH disabled while directly calling a base driver
349  *    entry point is generally a BAD idea.
350  *
351  * The design of synchronization/protection for this operation in the 8021q
352  * module is good for one or more VLAN devices over a single physical device
353  * and cannot be extended for a teaming solution like bonding, so there is a
354  * potential race condition here where a net device from the vlan group might
355  * be referenced (either by a base driver or the 8021q code) while it is being
356  * removed from the system. However, it turns out we're not making matters
357  * worse, and if it works for regular VLAN usage it will work here too.
358 */
359 
360 /**
361  * bond_vlan_rx_add_vid - Propagates adding an id to slaves
362  * @bond_dev: bonding net device that got called
363  * @proto: network protocol ID
364  * @vid: vlan id being added
365  */
366 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
367 				__be16 proto, u16 vid)
368 {
369 	struct bonding *bond = netdev_priv(bond_dev);
370 	struct slave *slave, *rollback_slave;
371 	struct list_head *iter;
372 	int res;
373 
374 	bond_for_each_slave(bond, slave, iter) {
375 		res = vlan_vid_add(slave->dev, proto, vid);
376 		if (res)
377 			goto unwind;
378 	}
379 
380 	return 0;
381 
382 unwind:
383 	/* unwind to the slave that failed */
384 	bond_for_each_slave(bond, rollback_slave, iter) {
385 		if (rollback_slave == slave)
386 			break;
387 
388 		vlan_vid_del(rollback_slave->dev, proto, vid);
389 	}
390 
391 	return res;
392 }
393 
394 /**
395  * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
396  * @bond_dev: bonding net device that got called
397  * @proto: network protocol ID
398  * @vid: vlan id being removed
399  */
400 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
401 				 __be16 proto, u16 vid)
402 {
403 	struct bonding *bond = netdev_priv(bond_dev);
404 	struct list_head *iter;
405 	struct slave *slave;
406 
407 	bond_for_each_slave(bond, slave, iter)
408 		vlan_vid_del(slave->dev, proto, vid);
409 
410 	if (bond_is_lb(bond))
411 		bond_alb_clear_vlan(bond, vid);
412 
413 	return 0;
414 }
415 
416 /*---------------------------------- XFRM -----------------------------------*/
417 
418 #ifdef CONFIG_XFRM_OFFLOAD
419 /**
420  * bond_ipsec_add_sa - program device with a security association
421  * @xs: pointer to transformer state struct
422  * @extack: extack point to fill failure reason
423  **/
424 static int bond_ipsec_add_sa(struct xfrm_state *xs,
425 			     struct netlink_ext_ack *extack)
426 {
427 	struct net_device *bond_dev = xs->xso.dev;
428 	struct bond_ipsec *ipsec;
429 	struct bonding *bond;
430 	struct slave *slave;
431 	int err;
432 
433 	if (!bond_dev)
434 		return -EINVAL;
435 
436 	rcu_read_lock();
437 	bond = netdev_priv(bond_dev);
438 	slave = rcu_dereference(bond->curr_active_slave);
439 	if (!slave) {
440 		rcu_read_unlock();
441 		return -ENODEV;
442 	}
443 
444 	if (!slave->dev->xfrmdev_ops ||
445 	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
446 	    netif_is_bond_master(slave->dev)) {
447 		NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
448 		rcu_read_unlock();
449 		return -EINVAL;
450 	}
451 
452 	ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
453 	if (!ipsec) {
454 		rcu_read_unlock();
455 		return -ENOMEM;
456 	}
457 	xs->xso.real_dev = slave->dev;
458 
459 	err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
460 	if (!err) {
461 		ipsec->xs = xs;
462 		INIT_LIST_HEAD(&ipsec->list);
463 		spin_lock_bh(&bond->ipsec_lock);
464 		list_add(&ipsec->list, &bond->ipsec_list);
465 		spin_unlock_bh(&bond->ipsec_lock);
466 	} else {
467 		kfree(ipsec);
468 	}
469 	rcu_read_unlock();
470 	return err;
471 }
472 
473 static void bond_ipsec_add_sa_all(struct bonding *bond)
474 {
475 	struct net_device *bond_dev = bond->dev;
476 	struct bond_ipsec *ipsec;
477 	struct slave *slave;
478 
479 	rcu_read_lock();
480 	slave = rcu_dereference(bond->curr_active_slave);
481 	if (!slave)
482 		goto out;
483 
484 	if (!slave->dev->xfrmdev_ops ||
485 	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
486 	    netif_is_bond_master(slave->dev)) {
487 		spin_lock_bh(&bond->ipsec_lock);
488 		if (!list_empty(&bond->ipsec_list))
489 			slave_warn(bond_dev, slave->dev,
490 				   "%s: no slave xdo_dev_state_add\n",
491 				   __func__);
492 		spin_unlock_bh(&bond->ipsec_lock);
493 		goto out;
494 	}
495 
496 	spin_lock_bh(&bond->ipsec_lock);
497 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
498 		ipsec->xs->xso.real_dev = slave->dev;
499 		if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
500 			slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
501 			ipsec->xs->xso.real_dev = NULL;
502 		}
503 	}
504 	spin_unlock_bh(&bond->ipsec_lock);
505 out:
506 	rcu_read_unlock();
507 }
508 
509 /**
510  * bond_ipsec_del_sa - clear out this specific SA
511  * @xs: pointer to transformer state struct
512  **/
513 static void bond_ipsec_del_sa(struct xfrm_state *xs)
514 {
515 	struct net_device *bond_dev = xs->xso.dev;
516 	struct bond_ipsec *ipsec;
517 	struct bonding *bond;
518 	struct slave *slave;
519 
520 	if (!bond_dev)
521 		return;
522 
523 	rcu_read_lock();
524 	bond = netdev_priv(bond_dev);
525 	slave = rcu_dereference(bond->curr_active_slave);
526 
527 	if (!slave)
528 		goto out;
529 
530 	if (!xs->xso.real_dev)
531 		goto out;
532 
533 	WARN_ON(xs->xso.real_dev != slave->dev);
534 
535 	if (!slave->dev->xfrmdev_ops ||
536 	    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
537 	    netif_is_bond_master(slave->dev)) {
538 		slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
539 		goto out;
540 	}
541 
542 	slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
543 out:
544 	spin_lock_bh(&bond->ipsec_lock);
545 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
546 		if (ipsec->xs == xs) {
547 			list_del(&ipsec->list);
548 			kfree(ipsec);
549 			break;
550 		}
551 	}
552 	spin_unlock_bh(&bond->ipsec_lock);
553 	rcu_read_unlock();
554 }
555 
556 static void bond_ipsec_del_sa_all(struct bonding *bond)
557 {
558 	struct net_device *bond_dev = bond->dev;
559 	struct bond_ipsec *ipsec;
560 	struct slave *slave;
561 
562 	rcu_read_lock();
563 	slave = rcu_dereference(bond->curr_active_slave);
564 	if (!slave) {
565 		rcu_read_unlock();
566 		return;
567 	}
568 
569 	spin_lock_bh(&bond->ipsec_lock);
570 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
571 		if (!ipsec->xs->xso.real_dev)
572 			continue;
573 
574 		if (!slave->dev->xfrmdev_ops ||
575 		    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
576 		    netif_is_bond_master(slave->dev)) {
577 			slave_warn(bond_dev, slave->dev,
578 				   "%s: no slave xdo_dev_state_delete\n",
579 				   __func__);
580 		} else {
581 			slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
582 		}
583 		ipsec->xs->xso.real_dev = NULL;
584 	}
585 	spin_unlock_bh(&bond->ipsec_lock);
586 	rcu_read_unlock();
587 }
588 
589 /**
590  * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
591  * @skb: current data packet
592  * @xs: pointer to transformer state struct
593  **/
594 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
595 {
596 	struct net_device *bond_dev = xs->xso.dev;
597 	struct net_device *real_dev;
598 	struct slave *curr_active;
599 	struct bonding *bond;
600 	int err;
601 
602 	bond = netdev_priv(bond_dev);
603 	rcu_read_lock();
604 	curr_active = rcu_dereference(bond->curr_active_slave);
605 	real_dev = curr_active->dev;
606 
607 	if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
608 		err = false;
609 		goto out;
610 	}
611 
612 	if (!xs->xso.real_dev) {
613 		err = false;
614 		goto out;
615 	}
616 
617 	if (!real_dev->xfrmdev_ops ||
618 	    !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
619 	    netif_is_bond_master(real_dev)) {
620 		err = false;
621 		goto out;
622 	}
623 
624 	err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
625 out:
626 	rcu_read_unlock();
627 	return err;
628 }
629 
630 static const struct xfrmdev_ops bond_xfrmdev_ops = {
631 	.xdo_dev_state_add = bond_ipsec_add_sa,
632 	.xdo_dev_state_delete = bond_ipsec_del_sa,
633 	.xdo_dev_offload_ok = bond_ipsec_offload_ok,
634 };
635 #endif /* CONFIG_XFRM_OFFLOAD */
636 
637 /*------------------------------- Link status -------------------------------*/
638 
639 /* Set the carrier state for the master according to the state of its
640  * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
641  * do special 802.3ad magic.
642  *
643  * Returns zero if carrier state does not change, nonzero if it does.
644  */
645 int bond_set_carrier(struct bonding *bond)
646 {
647 	struct list_head *iter;
648 	struct slave *slave;
649 
650 	if (!bond_has_slaves(bond))
651 		goto down;
652 
653 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
654 		return bond_3ad_set_carrier(bond);
655 
656 	bond_for_each_slave(bond, slave, iter) {
657 		if (slave->link == BOND_LINK_UP) {
658 			if (!netif_carrier_ok(bond->dev)) {
659 				netif_carrier_on(bond->dev);
660 				return 1;
661 			}
662 			return 0;
663 		}
664 	}
665 
666 down:
667 	if (netif_carrier_ok(bond->dev)) {
668 		netif_carrier_off(bond->dev);
669 		return 1;
670 	}
671 	return 0;
672 }
673 
674 /* Get link speed and duplex from the slave's base driver
675  * using ethtool. If for some reason the call fails or the
676  * values are invalid, set speed and duplex to -1,
677  * and return. Return 1 if speed or duplex settings are
678  * UNKNOWN; 0 otherwise.
679  */
680 static int bond_update_speed_duplex(struct slave *slave)
681 {
682 	struct net_device *slave_dev = slave->dev;
683 	struct ethtool_link_ksettings ecmd;
684 	int res;
685 
686 	slave->speed = SPEED_UNKNOWN;
687 	slave->duplex = DUPLEX_UNKNOWN;
688 
689 	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
690 	if (res < 0)
691 		return 1;
692 	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
693 		return 1;
694 	switch (ecmd.base.duplex) {
695 	case DUPLEX_FULL:
696 	case DUPLEX_HALF:
697 		break;
698 	default:
699 		return 1;
700 	}
701 
702 	slave->speed = ecmd.base.speed;
703 	slave->duplex = ecmd.base.duplex;
704 
705 	return 0;
706 }
707 
708 const char *bond_slave_link_status(s8 link)
709 {
710 	switch (link) {
711 	case BOND_LINK_UP:
712 		return "up";
713 	case BOND_LINK_FAIL:
714 		return "going down";
715 	case BOND_LINK_DOWN:
716 		return "down";
717 	case BOND_LINK_BACK:
718 		return "going back";
719 	default:
720 		return "unknown";
721 	}
722 }
723 
724 /* if <dev> supports MII link status reporting, check its link status.
725  *
726  * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
727  * depending upon the setting of the use_carrier parameter.
728  *
729  * Return either BMSR_LSTATUS, meaning that the link is up (or we
730  * can't tell and just pretend it is), or 0, meaning that the link is
731  * down.
732  *
733  * If reporting is non-zero, instead of faking link up, return -1 if
734  * both ETHTOOL and MII ioctls fail (meaning the device does not
735  * support them).  If use_carrier is set, return whatever it says.
736  * It'd be nice if there was a good way to tell if a driver supports
737  * netif_carrier, but there really isn't.
738  */
739 static int bond_check_dev_link(struct bonding *bond,
740 			       struct net_device *slave_dev, int reporting)
741 {
742 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
743 	int (*ioctl)(struct net_device *, struct ifreq *, int);
744 	struct ifreq ifr;
745 	struct mii_ioctl_data *mii;
746 
747 	if (!reporting && !netif_running(slave_dev))
748 		return 0;
749 
750 	if (bond->params.use_carrier)
751 		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
752 
753 	/* Try to get link status using Ethtool first. */
754 	if (slave_dev->ethtool_ops->get_link)
755 		return slave_dev->ethtool_ops->get_link(slave_dev) ?
756 			BMSR_LSTATUS : 0;
757 
758 	/* Ethtool can't be used, fallback to MII ioctls. */
759 	ioctl = slave_ops->ndo_eth_ioctl;
760 	if (ioctl) {
761 		/* TODO: set pointer to correct ioctl on a per team member
762 		 *       bases to make this more efficient. that is, once
763 		 *       we determine the correct ioctl, we will always
764 		 *       call it and not the others for that team
765 		 *       member.
766 		 */
767 
768 		/* We cannot assume that SIOCGMIIPHY will also read a
769 		 * register; not all network drivers (e.g., e100)
770 		 * support that.
771 		 */
772 
773 		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
774 		strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
775 		mii = if_mii(&ifr);
776 		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
777 			mii->reg_num = MII_BMSR;
778 			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
779 				return mii->val_out & BMSR_LSTATUS;
780 		}
781 	}
782 
783 	/* If reporting, report that either there's no ndo_eth_ioctl,
784 	 * or both SIOCGMIIREG and get_link failed (meaning that we
785 	 * cannot report link status).  If not reporting, pretend
786 	 * we're ok.
787 	 */
788 	return reporting ? -1 : BMSR_LSTATUS;
789 }
790 
791 /*----------------------------- Multicast list ------------------------------*/
792 
793 /* Push the promiscuity flag down to appropriate slaves */
794 static int bond_set_promiscuity(struct bonding *bond, int inc)
795 {
796 	struct list_head *iter;
797 	int err = 0;
798 
799 	if (bond_uses_primary(bond)) {
800 		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
801 
802 		if (curr_active)
803 			err = dev_set_promiscuity(curr_active->dev, inc);
804 	} else {
805 		struct slave *slave;
806 
807 		bond_for_each_slave(bond, slave, iter) {
808 			err = dev_set_promiscuity(slave->dev, inc);
809 			if (err)
810 				return err;
811 		}
812 	}
813 	return err;
814 }
815 
816 /* Push the allmulti flag down to all slaves */
817 static int bond_set_allmulti(struct bonding *bond, int inc)
818 {
819 	struct list_head *iter;
820 	int err = 0;
821 
822 	if (bond_uses_primary(bond)) {
823 		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
824 
825 		if (curr_active)
826 			err = dev_set_allmulti(curr_active->dev, inc);
827 	} else {
828 		struct slave *slave;
829 
830 		bond_for_each_slave(bond, slave, iter) {
831 			err = dev_set_allmulti(slave->dev, inc);
832 			if (err)
833 				return err;
834 		}
835 	}
836 	return err;
837 }
838 
839 /* Retrieve the list of registered multicast addresses for the bonding
840  * device and retransmit an IGMP JOIN request to the current active
841  * slave.
842  */
843 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
844 {
845 	struct bonding *bond = container_of(work, struct bonding,
846 					    mcast_work.work);
847 
848 	if (!rtnl_trylock()) {
849 		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
850 		return;
851 	}
852 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
853 
854 	if (bond->igmp_retrans > 1) {
855 		bond->igmp_retrans--;
856 		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
857 	}
858 	rtnl_unlock();
859 }
860 
861 /* Flush bond's hardware addresses from slave */
862 static void bond_hw_addr_flush(struct net_device *bond_dev,
863 			       struct net_device *slave_dev)
864 {
865 	struct bonding *bond = netdev_priv(bond_dev);
866 
867 	dev_uc_unsync(slave_dev, bond_dev);
868 	dev_mc_unsync(slave_dev, bond_dev);
869 
870 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
871 		dev_mc_del(slave_dev, lacpdu_mcast_addr);
872 }
873 
874 /*--------------------------- Active slave change ---------------------------*/
875 
876 /* Update the hardware address list and promisc/allmulti for the new and
877  * old active slaves (if any).  Modes that are not using primary keep all
878  * slaves up date at all times; only the modes that use primary need to call
879  * this function to swap these settings during a failover.
880  */
881 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
882 			      struct slave *old_active)
883 {
884 	if (old_active) {
885 		if (bond->dev->flags & IFF_PROMISC)
886 			dev_set_promiscuity(old_active->dev, -1);
887 
888 		if (bond->dev->flags & IFF_ALLMULTI)
889 			dev_set_allmulti(old_active->dev, -1);
890 
891 		if (bond->dev->flags & IFF_UP)
892 			bond_hw_addr_flush(bond->dev, old_active->dev);
893 	}
894 
895 	if (new_active) {
896 		/* FIXME: Signal errors upstream. */
897 		if (bond->dev->flags & IFF_PROMISC)
898 			dev_set_promiscuity(new_active->dev, 1);
899 
900 		if (bond->dev->flags & IFF_ALLMULTI)
901 			dev_set_allmulti(new_active->dev, 1);
902 
903 		if (bond->dev->flags & IFF_UP) {
904 			netif_addr_lock_bh(bond->dev);
905 			dev_uc_sync(new_active->dev, bond->dev);
906 			dev_mc_sync(new_active->dev, bond->dev);
907 			netif_addr_unlock_bh(bond->dev);
908 		}
909 	}
910 }
911 
912 /**
913  * bond_set_dev_addr - clone slave's address to bond
914  * @bond_dev: bond net device
915  * @slave_dev: slave net device
916  *
917  * Should be called with RTNL held.
918  */
919 static int bond_set_dev_addr(struct net_device *bond_dev,
920 			     struct net_device *slave_dev)
921 {
922 	int err;
923 
924 	slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
925 		  bond_dev, slave_dev, slave_dev->addr_len);
926 	err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
927 	if (err)
928 		return err;
929 
930 	__dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
931 	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
932 	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
933 	return 0;
934 }
935 
936 static struct slave *bond_get_old_active(struct bonding *bond,
937 					 struct slave *new_active)
938 {
939 	struct slave *slave;
940 	struct list_head *iter;
941 
942 	bond_for_each_slave(bond, slave, iter) {
943 		if (slave == new_active)
944 			continue;
945 
946 		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
947 			return slave;
948 	}
949 
950 	return NULL;
951 }
952 
953 /* bond_do_fail_over_mac
954  *
955  * Perform special MAC address swapping for fail_over_mac settings
956  *
957  * Called with RTNL
958  */
959 static void bond_do_fail_over_mac(struct bonding *bond,
960 				  struct slave *new_active,
961 				  struct slave *old_active)
962 {
963 	u8 tmp_mac[MAX_ADDR_LEN];
964 	struct sockaddr_storage ss;
965 	int rv;
966 
967 	switch (bond->params.fail_over_mac) {
968 	case BOND_FOM_ACTIVE:
969 		if (new_active) {
970 			rv = bond_set_dev_addr(bond->dev, new_active->dev);
971 			if (rv)
972 				slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
973 					  -rv);
974 		}
975 		break;
976 	case BOND_FOM_FOLLOW:
977 		/* if new_active && old_active, swap them
978 		 * if just old_active, do nothing (going to no active slave)
979 		 * if just new_active, set new_active to bond's MAC
980 		 */
981 		if (!new_active)
982 			return;
983 
984 		if (!old_active)
985 			old_active = bond_get_old_active(bond, new_active);
986 
987 		if (old_active) {
988 			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
989 					  new_active->dev->addr_len);
990 			bond_hw_addr_copy(ss.__data,
991 					  old_active->dev->dev_addr,
992 					  old_active->dev->addr_len);
993 			ss.ss_family = new_active->dev->type;
994 		} else {
995 			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
996 					  bond->dev->addr_len);
997 			ss.ss_family = bond->dev->type;
998 		}
999 
1000 		rv = dev_set_mac_address(new_active->dev,
1001 					 (struct sockaddr *)&ss, NULL);
1002 		if (rv) {
1003 			slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
1004 				  -rv);
1005 			goto out;
1006 		}
1007 
1008 		if (!old_active)
1009 			goto out;
1010 
1011 		bond_hw_addr_copy(ss.__data, tmp_mac,
1012 				  new_active->dev->addr_len);
1013 		ss.ss_family = old_active->dev->type;
1014 
1015 		rv = dev_set_mac_address(old_active->dev,
1016 					 (struct sockaddr *)&ss, NULL);
1017 		if (rv)
1018 			slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
1019 				  -rv);
1020 out:
1021 		break;
1022 	default:
1023 		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
1024 			   bond->params.fail_over_mac);
1025 		break;
1026 	}
1027 
1028 }
1029 
1030 /**
1031  * bond_choose_primary_or_current - select the primary or high priority slave
1032  * @bond: our bonding struct
1033  *
1034  * - Check if there is a primary link. If the primary link was set and is up,
1035  *   go on and do link reselection.
1036  *
1037  * - If primary link is not set or down, find the highest priority link.
1038  *   If the highest priority link is not current slave, set it as primary
1039  *   link and do link reselection.
1040  */
1041 static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1042 {
1043 	struct slave *prim = rtnl_dereference(bond->primary_slave);
1044 	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1045 	struct slave *slave, *hprio = NULL;
1046 	struct list_head *iter;
1047 
1048 	if (!prim || prim->link != BOND_LINK_UP) {
1049 		bond_for_each_slave(bond, slave, iter) {
1050 			if (slave->link == BOND_LINK_UP) {
1051 				hprio = hprio ?: slave;
1052 				if (slave->prio > hprio->prio)
1053 					hprio = slave;
1054 			}
1055 		}
1056 
1057 		if (hprio && hprio != curr) {
1058 			prim = hprio;
1059 			goto link_reselect;
1060 		}
1061 
1062 		if (!curr || curr->link != BOND_LINK_UP)
1063 			return NULL;
1064 		return curr;
1065 	}
1066 
1067 	if (bond->force_primary) {
1068 		bond->force_primary = false;
1069 		return prim;
1070 	}
1071 
1072 link_reselect:
1073 	if (!curr || curr->link != BOND_LINK_UP)
1074 		return prim;
1075 
1076 	/* At this point, prim and curr are both up */
1077 	switch (bond->params.primary_reselect) {
1078 	case BOND_PRI_RESELECT_ALWAYS:
1079 		return prim;
1080 	case BOND_PRI_RESELECT_BETTER:
1081 		if (prim->speed < curr->speed)
1082 			return curr;
1083 		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
1084 			return curr;
1085 		return prim;
1086 	case BOND_PRI_RESELECT_FAILURE:
1087 		return curr;
1088 	default:
1089 		netdev_err(bond->dev, "impossible primary_reselect %d\n",
1090 			   bond->params.primary_reselect);
1091 		return curr;
1092 	}
1093 }
1094 
1095 /**
1096  * bond_find_best_slave - select the best available slave to be the active one
1097  * @bond: our bonding struct
1098  */
1099 static struct slave *bond_find_best_slave(struct bonding *bond)
1100 {
1101 	struct slave *slave, *bestslave = NULL;
1102 	struct list_head *iter;
1103 	int mintime = bond->params.updelay;
1104 
1105 	slave = bond_choose_primary_or_current(bond);
1106 	if (slave)
1107 		return slave;
1108 
1109 	bond_for_each_slave(bond, slave, iter) {
1110 		if (slave->link == BOND_LINK_UP)
1111 			return slave;
1112 		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1113 		    slave->delay < mintime) {
1114 			mintime = slave->delay;
1115 			bestslave = slave;
1116 		}
1117 	}
1118 
1119 	return bestslave;
1120 }
1121 
1122 static bool bond_should_notify_peers(struct bonding *bond)
1123 {
1124 	struct slave *slave;
1125 
1126 	rcu_read_lock();
1127 	slave = rcu_dereference(bond->curr_active_slave);
1128 	rcu_read_unlock();
1129 
1130 	if (!slave || !bond->send_peer_notif ||
1131 	    bond->send_peer_notif %
1132 	    max(1, bond->params.peer_notif_delay) != 0 ||
1133 	    !netif_carrier_ok(bond->dev) ||
1134 	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1135 		return false;
1136 
1137 	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
1138 		   slave ? slave->dev->name : "NULL");
1139 
1140 	return true;
1141 }
1142 
1143 /**
1144  * bond_change_active_slave - change the active slave into the specified one
1145  * @bond: our bonding struct
1146  * @new_active: the new slave to make the active one
1147  *
1148  * Set the new slave to the bond's settings and unset them on the old
1149  * curr_active_slave.
1150  * Setting include flags, mc-list, promiscuity, allmulti, etc.
1151  *
1152  * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
1153  * because it is apparently the best available slave we have, even though its
1154  * updelay hasn't timed out yet.
1155  *
1156  * Caller must hold RTNL.
1157  */
1158 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1159 {
1160 	struct slave *old_active;
1161 
1162 	ASSERT_RTNL();
1163 
1164 	old_active = rtnl_dereference(bond->curr_active_slave);
1165 
1166 	if (old_active == new_active)
1167 		return;
1168 
1169 #ifdef CONFIG_XFRM_OFFLOAD
1170 	bond_ipsec_del_sa_all(bond);
1171 #endif /* CONFIG_XFRM_OFFLOAD */
1172 
1173 	if (new_active) {
1174 		new_active->last_link_up = jiffies;
1175 
1176 		if (new_active->link == BOND_LINK_BACK) {
1177 			if (bond_uses_primary(bond)) {
1178 				slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1179 					   (bond->params.updelay - new_active->delay) * bond->params.miimon);
1180 			}
1181 
1182 			new_active->delay = 0;
1183 			bond_set_slave_link_state(new_active, BOND_LINK_UP,
1184 						  BOND_SLAVE_NOTIFY_NOW);
1185 
1186 			if (BOND_MODE(bond) == BOND_MODE_8023AD)
1187 				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1188 
1189 			if (bond_is_lb(bond))
1190 				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1191 		} else {
1192 			if (bond_uses_primary(bond))
1193 				slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
1194 		}
1195 	}
1196 
1197 	if (bond_uses_primary(bond))
1198 		bond_hw_addr_swap(bond, new_active, old_active);
1199 
1200 	if (bond_is_lb(bond)) {
1201 		bond_alb_handle_active_change(bond, new_active);
1202 		if (old_active)
1203 			bond_set_slave_inactive_flags(old_active,
1204 						      BOND_SLAVE_NOTIFY_NOW);
1205 		if (new_active)
1206 			bond_set_slave_active_flags(new_active,
1207 						    BOND_SLAVE_NOTIFY_NOW);
1208 	} else {
1209 		rcu_assign_pointer(bond->curr_active_slave, new_active);
1210 	}
1211 
1212 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1213 		if (old_active)
1214 			bond_set_slave_inactive_flags(old_active,
1215 						      BOND_SLAVE_NOTIFY_NOW);
1216 
1217 		if (new_active) {
1218 			bool should_notify_peers = false;
1219 
1220 			bond_set_slave_active_flags(new_active,
1221 						    BOND_SLAVE_NOTIFY_NOW);
1222 
1223 			if (bond->params.fail_over_mac)
1224 				bond_do_fail_over_mac(bond, new_active,
1225 						      old_active);
1226 
1227 			if (netif_running(bond->dev)) {
1228 				bond->send_peer_notif =
1229 					bond->params.num_peer_notif *
1230 					max(1, bond->params.peer_notif_delay);
1231 				should_notify_peers =
1232 					bond_should_notify_peers(bond);
1233 			}
1234 
1235 			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1236 			if (should_notify_peers) {
1237 				bond->send_peer_notif--;
1238 				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1239 							 bond->dev);
1240 			}
1241 		}
1242 	}
1243 
1244 #ifdef CONFIG_XFRM_OFFLOAD
1245 	bond_ipsec_add_sa_all(bond);
1246 #endif /* CONFIG_XFRM_OFFLOAD */
1247 
1248 	/* resend IGMP joins since active slave has changed or
1249 	 * all were sent on curr_active_slave.
1250 	 * resend only if bond is brought up with the affected
1251 	 * bonding modes and the retransmission is enabled
1252 	 */
1253 	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1254 	    ((bond_uses_primary(bond) && new_active) ||
1255 	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1256 		bond->igmp_retrans = bond->params.resend_igmp;
1257 		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1258 	}
1259 }
1260 
1261 /**
1262  * bond_select_active_slave - select a new active slave, if needed
1263  * @bond: our bonding struct
1264  *
1265  * This functions should be called when one of the following occurs:
1266  * - The old curr_active_slave has been released or lost its link.
1267  * - The primary_slave has got its link back.
1268  * - A slave has got its link back and there's no old curr_active_slave.
1269  *
1270  * Caller must hold RTNL.
1271  */
1272 void bond_select_active_slave(struct bonding *bond)
1273 {
1274 	struct slave *best_slave;
1275 	int rv;
1276 
1277 	ASSERT_RTNL();
1278 
1279 	best_slave = bond_find_best_slave(bond);
1280 	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1281 		bond_change_active_slave(bond, best_slave);
1282 		rv = bond_set_carrier(bond);
1283 		if (!rv)
1284 			return;
1285 
1286 		if (netif_carrier_ok(bond->dev))
1287 			netdev_info(bond->dev, "active interface up!\n");
1288 		else
1289 			netdev_info(bond->dev, "now running without any active interface!\n");
1290 	}
1291 }
1292 
1293 #ifdef CONFIG_NET_POLL_CONTROLLER
1294 static inline int slave_enable_netpoll(struct slave *slave)
1295 {
1296 	struct netpoll *np;
1297 	int err = 0;
1298 
1299 	np = kzalloc(sizeof(*np), GFP_KERNEL);
1300 	err = -ENOMEM;
1301 	if (!np)
1302 		goto out;
1303 
1304 	err = __netpoll_setup(np, slave->dev);
1305 	if (err) {
1306 		kfree(np);
1307 		goto out;
1308 	}
1309 	slave->np = np;
1310 out:
1311 	return err;
1312 }
1313 static inline void slave_disable_netpoll(struct slave *slave)
1314 {
1315 	struct netpoll *np = slave->np;
1316 
1317 	if (!np)
1318 		return;
1319 
1320 	slave->np = NULL;
1321 
1322 	__netpoll_free(np);
1323 }
1324 
1325 static void bond_poll_controller(struct net_device *bond_dev)
1326 {
1327 	struct bonding *bond = netdev_priv(bond_dev);
1328 	struct slave *slave = NULL;
1329 	struct list_head *iter;
1330 	struct ad_info ad_info;
1331 
1332 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1333 		if (bond_3ad_get_active_agg_info(bond, &ad_info))
1334 			return;
1335 
1336 	bond_for_each_slave_rcu(bond, slave, iter) {
1337 		if (!bond_slave_is_up(slave))
1338 			continue;
1339 
1340 		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1341 			struct aggregator *agg =
1342 			    SLAVE_AD_INFO(slave)->port.aggregator;
1343 
1344 			if (agg &&
1345 			    agg->aggregator_identifier != ad_info.aggregator_id)
1346 				continue;
1347 		}
1348 
1349 		netpoll_poll_dev(slave->dev);
1350 	}
1351 }
1352 
1353 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1354 {
1355 	struct bonding *bond = netdev_priv(bond_dev);
1356 	struct list_head *iter;
1357 	struct slave *slave;
1358 
1359 	bond_for_each_slave(bond, slave, iter)
1360 		if (bond_slave_is_up(slave))
1361 			slave_disable_netpoll(slave);
1362 }
1363 
1364 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1365 {
1366 	struct bonding *bond = netdev_priv(dev);
1367 	struct list_head *iter;
1368 	struct slave *slave;
1369 	int err = 0;
1370 
1371 	bond_for_each_slave(bond, slave, iter) {
1372 		err = slave_enable_netpoll(slave);
1373 		if (err) {
1374 			bond_netpoll_cleanup(dev);
1375 			break;
1376 		}
1377 	}
1378 	return err;
1379 }
1380 #else
1381 static inline int slave_enable_netpoll(struct slave *slave)
1382 {
1383 	return 0;
1384 }
1385 static inline void slave_disable_netpoll(struct slave *slave)
1386 {
1387 }
1388 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1389 {
1390 }
1391 #endif
1392 
1393 /*---------------------------------- IOCTL ----------------------------------*/
1394 
1395 static netdev_features_t bond_fix_features(struct net_device *dev,
1396 					   netdev_features_t features)
1397 {
1398 	struct bonding *bond = netdev_priv(dev);
1399 	struct list_head *iter;
1400 	netdev_features_t mask;
1401 	struct slave *slave;
1402 
1403 	mask = features;
1404 
1405 	features &= ~NETIF_F_ONE_FOR_ALL;
1406 	features |= NETIF_F_ALL_FOR_ALL;
1407 
1408 	bond_for_each_slave(bond, slave, iter) {
1409 		features = netdev_increment_features(features,
1410 						     slave->dev->features,
1411 						     mask);
1412 	}
1413 	features = netdev_add_tso_features(features, mask);
1414 
1415 	return features;
1416 }
1417 
1418 #define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1419 				 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1420 				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1421 
1422 #define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1423 				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1424 
1425 #define BOND_MPLS_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1426 				 NETIF_F_GSO_SOFTWARE)
1427 
1428 
1429 static void bond_compute_features(struct bonding *bond)
1430 {
1431 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1432 					IFF_XMIT_DST_RELEASE_PERM;
1433 	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1434 	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1435 #ifdef CONFIG_XFRM_OFFLOAD
1436 	netdev_features_t xfrm_features  = BOND_XFRM_FEATURES;
1437 #endif /* CONFIG_XFRM_OFFLOAD */
1438 	netdev_features_t mpls_features  = BOND_MPLS_FEATURES;
1439 	struct net_device *bond_dev = bond->dev;
1440 	struct list_head *iter;
1441 	struct slave *slave;
1442 	unsigned short max_hard_header_len = ETH_HLEN;
1443 	unsigned int tso_max_size = TSO_MAX_SIZE;
1444 	u16 tso_max_segs = TSO_MAX_SEGS;
1445 
1446 	if (!bond_has_slaves(bond))
1447 		goto done;
1448 	vlan_features &= NETIF_F_ALL_FOR_ALL;
1449 	mpls_features &= NETIF_F_ALL_FOR_ALL;
1450 
1451 	bond_for_each_slave(bond, slave, iter) {
1452 		vlan_features = netdev_increment_features(vlan_features,
1453 			slave->dev->vlan_features, BOND_VLAN_FEATURES);
1454 
1455 		enc_features = netdev_increment_features(enc_features,
1456 							 slave->dev->hw_enc_features,
1457 							 BOND_ENC_FEATURES);
1458 
1459 #ifdef CONFIG_XFRM_OFFLOAD
1460 		xfrm_features = netdev_increment_features(xfrm_features,
1461 							  slave->dev->hw_enc_features,
1462 							  BOND_XFRM_FEATURES);
1463 #endif /* CONFIG_XFRM_OFFLOAD */
1464 
1465 		mpls_features = netdev_increment_features(mpls_features,
1466 							  slave->dev->mpls_features,
1467 							  BOND_MPLS_FEATURES);
1468 
1469 		dst_release_flag &= slave->dev->priv_flags;
1470 		if (slave->dev->hard_header_len > max_hard_header_len)
1471 			max_hard_header_len = slave->dev->hard_header_len;
1472 
1473 		tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
1474 		tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
1475 	}
1476 	bond_dev->hard_header_len = max_hard_header_len;
1477 
1478 done:
1479 	bond_dev->vlan_features = vlan_features;
1480 	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1481 				    NETIF_F_HW_VLAN_CTAG_TX |
1482 				    NETIF_F_HW_VLAN_STAG_TX;
1483 #ifdef CONFIG_XFRM_OFFLOAD
1484 	bond_dev->hw_enc_features |= xfrm_features;
1485 #endif /* CONFIG_XFRM_OFFLOAD */
1486 	bond_dev->mpls_features = mpls_features;
1487 	netif_set_tso_max_segs(bond_dev, tso_max_segs);
1488 	netif_set_tso_max_size(bond_dev, tso_max_size);
1489 
1490 	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1491 	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1492 	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1493 		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1494 
1495 	netdev_change_features(bond_dev);
1496 }
1497 
1498 static void bond_setup_by_slave(struct net_device *bond_dev,
1499 				struct net_device *slave_dev)
1500 {
1501 	bond_dev->header_ops	    = slave_dev->header_ops;
1502 
1503 	bond_dev->type		    = slave_dev->type;
1504 	bond_dev->hard_header_len   = slave_dev->hard_header_len;
1505 	bond_dev->needed_headroom   = slave_dev->needed_headroom;
1506 	bond_dev->addr_len	    = slave_dev->addr_len;
1507 
1508 	memcpy(bond_dev->broadcast, slave_dev->broadcast,
1509 		slave_dev->addr_len);
1510 }
1511 
1512 /* On bonding slaves other than the currently active slave, suppress
1513  * duplicates except for alb non-mcast/bcast.
1514  */
1515 static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1516 					    struct slave *slave,
1517 					    struct bonding *bond)
1518 {
1519 	if (bond_is_slave_inactive(slave)) {
1520 		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1521 		    skb->pkt_type != PACKET_BROADCAST &&
1522 		    skb->pkt_type != PACKET_MULTICAST)
1523 			return false;
1524 		return true;
1525 	}
1526 	return false;
1527 }
1528 
1529 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1530 {
1531 	struct sk_buff *skb = *pskb;
1532 	struct slave *slave;
1533 	struct bonding *bond;
1534 	int (*recv_probe)(const struct sk_buff *, struct bonding *,
1535 			  struct slave *);
1536 	int ret = RX_HANDLER_ANOTHER;
1537 
1538 	skb = skb_share_check(skb, GFP_ATOMIC);
1539 	if (unlikely(!skb))
1540 		return RX_HANDLER_CONSUMED;
1541 
1542 	*pskb = skb;
1543 
1544 	slave = bond_slave_get_rcu(skb->dev);
1545 	bond = slave->bond;
1546 
1547 	recv_probe = READ_ONCE(bond->recv_probe);
1548 	if (recv_probe) {
1549 		ret = recv_probe(skb, bond, slave);
1550 		if (ret == RX_HANDLER_CONSUMED) {
1551 			consume_skb(skb);
1552 			return ret;
1553 		}
1554 	}
1555 
1556 	/*
1557 	 * For packets determined by bond_should_deliver_exact_match() call to
1558 	 * be suppressed we want to make an exception for link-local packets.
1559 	 * This is necessary for e.g. LLDP daemons to be able to monitor
1560 	 * inactive slave links without being forced to bind to them
1561 	 * explicitly.
1562 	 *
1563 	 * At the same time, packets that are passed to the bonding master
1564 	 * (including link-local ones) can have their originating interface
1565 	 * determined via PACKET_ORIGDEV socket option.
1566 	 */
1567 	if (bond_should_deliver_exact_match(skb, slave, bond)) {
1568 		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1569 			return RX_HANDLER_PASS;
1570 		return RX_HANDLER_EXACT;
1571 	}
1572 
1573 	skb->dev = bond->dev;
1574 
1575 	if (BOND_MODE(bond) == BOND_MODE_ALB &&
1576 	    netif_is_bridge_port(bond->dev) &&
1577 	    skb->pkt_type == PACKET_HOST) {
1578 
1579 		if (unlikely(skb_cow_head(skb,
1580 					  skb->data - skb_mac_header(skb)))) {
1581 			kfree_skb(skb);
1582 			return RX_HANDLER_CONSUMED;
1583 		}
1584 		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1585 				  bond->dev->addr_len);
1586 	}
1587 
1588 	return ret;
1589 }
1590 
1591 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1592 {
1593 	switch (BOND_MODE(bond)) {
1594 	case BOND_MODE_ROUNDROBIN:
1595 		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1596 	case BOND_MODE_ACTIVEBACKUP:
1597 		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1598 	case BOND_MODE_BROADCAST:
1599 		return NETDEV_LAG_TX_TYPE_BROADCAST;
1600 	case BOND_MODE_XOR:
1601 	case BOND_MODE_8023AD:
1602 		return NETDEV_LAG_TX_TYPE_HASH;
1603 	default:
1604 		return NETDEV_LAG_TX_TYPE_UNKNOWN;
1605 	}
1606 }
1607 
1608 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1609 					       enum netdev_lag_tx_type type)
1610 {
1611 	if (type != NETDEV_LAG_TX_TYPE_HASH)
1612 		return NETDEV_LAG_HASH_NONE;
1613 
1614 	switch (bond->params.xmit_policy) {
1615 	case BOND_XMIT_POLICY_LAYER2:
1616 		return NETDEV_LAG_HASH_L2;
1617 	case BOND_XMIT_POLICY_LAYER34:
1618 		return NETDEV_LAG_HASH_L34;
1619 	case BOND_XMIT_POLICY_LAYER23:
1620 		return NETDEV_LAG_HASH_L23;
1621 	case BOND_XMIT_POLICY_ENCAP23:
1622 		return NETDEV_LAG_HASH_E23;
1623 	case BOND_XMIT_POLICY_ENCAP34:
1624 		return NETDEV_LAG_HASH_E34;
1625 	case BOND_XMIT_POLICY_VLAN_SRCMAC:
1626 		return NETDEV_LAG_HASH_VLAN_SRCMAC;
1627 	default:
1628 		return NETDEV_LAG_HASH_UNKNOWN;
1629 	}
1630 }
1631 
1632 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1633 				      struct netlink_ext_ack *extack)
1634 {
1635 	struct netdev_lag_upper_info lag_upper_info;
1636 	enum netdev_lag_tx_type type;
1637 	int err;
1638 
1639 	type = bond_lag_tx_type(bond);
1640 	lag_upper_info.tx_type = type;
1641 	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1642 
1643 	err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1644 					   &lag_upper_info, extack);
1645 	if (err)
1646 		return err;
1647 
1648 	slave->dev->flags |= IFF_SLAVE;
1649 	return 0;
1650 }
1651 
1652 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1653 {
1654 	netdev_upper_dev_unlink(slave->dev, bond->dev);
1655 	slave->dev->flags &= ~IFF_SLAVE;
1656 }
1657 
1658 static void slave_kobj_release(struct kobject *kobj)
1659 {
1660 	struct slave *slave = to_slave(kobj);
1661 	struct bonding *bond = bond_get_bond_by_slave(slave);
1662 
1663 	cancel_delayed_work_sync(&slave->notify_work);
1664 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1665 		kfree(SLAVE_AD_INFO(slave));
1666 
1667 	kfree(slave);
1668 }
1669 
1670 static struct kobj_type slave_ktype = {
1671 	.release = slave_kobj_release,
1672 #ifdef CONFIG_SYSFS
1673 	.sysfs_ops = &slave_sysfs_ops,
1674 #endif
1675 };
1676 
1677 static int bond_kobj_init(struct slave *slave)
1678 {
1679 	int err;
1680 
1681 	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1682 				   &(slave->dev->dev.kobj), "bonding_slave");
1683 	if (err)
1684 		kobject_put(&slave->kobj);
1685 
1686 	return err;
1687 }
1688 
1689 static struct slave *bond_alloc_slave(struct bonding *bond,
1690 				      struct net_device *slave_dev)
1691 {
1692 	struct slave *slave = NULL;
1693 
1694 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1695 	if (!slave)
1696 		return NULL;
1697 
1698 	slave->bond = bond;
1699 	slave->dev = slave_dev;
1700 	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1701 
1702 	if (bond_kobj_init(slave))
1703 		return NULL;
1704 
1705 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1706 		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1707 					       GFP_KERNEL);
1708 		if (!SLAVE_AD_INFO(slave)) {
1709 			kobject_put(&slave->kobj);
1710 			return NULL;
1711 		}
1712 	}
1713 
1714 	return slave;
1715 }
1716 
1717 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1718 {
1719 	info->bond_mode = BOND_MODE(bond);
1720 	info->miimon = bond->params.miimon;
1721 	info->num_slaves = bond->slave_cnt;
1722 }
1723 
1724 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1725 {
1726 	strcpy(info->slave_name, slave->dev->name);
1727 	info->link = slave->link;
1728 	info->state = bond_slave_state(slave);
1729 	info->link_failure_count = slave->link_failure_count;
1730 }
1731 
1732 static void bond_netdev_notify_work(struct work_struct *_work)
1733 {
1734 	struct slave *slave = container_of(_work, struct slave,
1735 					   notify_work.work);
1736 
1737 	if (rtnl_trylock()) {
1738 		struct netdev_bonding_info binfo;
1739 
1740 		bond_fill_ifslave(slave, &binfo.slave);
1741 		bond_fill_ifbond(slave->bond, &binfo.master);
1742 		netdev_bonding_info_change(slave->dev, &binfo);
1743 		rtnl_unlock();
1744 	} else {
1745 		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1746 	}
1747 }
1748 
1749 void bond_queue_slave_event(struct slave *slave)
1750 {
1751 	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1752 }
1753 
1754 void bond_lower_state_changed(struct slave *slave)
1755 {
1756 	struct netdev_lag_lower_state_info info;
1757 
1758 	info.link_up = slave->link == BOND_LINK_UP ||
1759 		       slave->link == BOND_LINK_FAIL;
1760 	info.tx_enabled = bond_is_active_slave(slave);
1761 	netdev_lower_state_changed(slave->dev, &info);
1762 }
1763 
1764 #define BOND_NL_ERR(bond_dev, extack, errmsg) do {		\
1765 	if (extack)						\
1766 		NL_SET_ERR_MSG(extack, errmsg);			\
1767 	else							\
1768 		netdev_err(bond_dev, "Error: %s\n", errmsg);	\
1769 } while (0)
1770 
1771 #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do {		\
1772 	if (extack)							\
1773 		NL_SET_ERR_MSG(extack, errmsg);				\
1774 	else								\
1775 		slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg);	\
1776 } while (0)
1777 
1778 /* The bonding driver uses ether_setup() to convert a master bond device
1779  * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
1780  * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
1781  */
1782 static void bond_ether_setup(struct net_device *bond_dev)
1783 {
1784 	unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
1785 
1786 	ether_setup(bond_dev);
1787 	bond_dev->flags |= IFF_MASTER | slave_flag;
1788 	bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1789 }
1790 
1791 /* enslave device <slave> to bond device <master> */
1792 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1793 		 struct netlink_ext_ack *extack)
1794 {
1795 	struct bonding *bond = netdev_priv(bond_dev);
1796 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1797 	struct slave *new_slave = NULL, *prev_slave;
1798 	struct sockaddr_storage ss;
1799 	int link_reporting;
1800 	int res = 0, i;
1801 
1802 	if (slave_dev->flags & IFF_MASTER &&
1803 	    !netif_is_bond_master(slave_dev)) {
1804 		BOND_NL_ERR(bond_dev, extack,
1805 			    "Device type (master device) cannot be enslaved");
1806 		return -EPERM;
1807 	}
1808 
1809 	if (!bond->params.use_carrier &&
1810 	    slave_dev->ethtool_ops->get_link == NULL &&
1811 	    slave_ops->ndo_eth_ioctl == NULL) {
1812 		slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1813 	}
1814 
1815 	/* already in-use? */
1816 	if (netdev_is_rx_handler_busy(slave_dev)) {
1817 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1818 			     "Device is in use and cannot be enslaved");
1819 		return -EBUSY;
1820 	}
1821 
1822 	if (bond_dev == slave_dev) {
1823 		BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
1824 		return -EPERM;
1825 	}
1826 
1827 	/* vlan challenged mutual exclusion */
1828 	/* no need to lock since we're protected by rtnl_lock */
1829 	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1830 		slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1831 		if (vlan_uses_dev(bond_dev)) {
1832 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1833 				     "Can not enslave VLAN challenged device to VLAN enabled bond");
1834 			return -EPERM;
1835 		} else {
1836 			slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1837 		}
1838 	} else {
1839 		slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1840 	}
1841 
1842 	if (slave_dev->features & NETIF_F_HW_ESP)
1843 		slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1844 
1845 	/* Old ifenslave binaries are no longer supported.  These can
1846 	 * be identified with moderate accuracy by the state of the slave:
1847 	 * the current ifenslave will set the interface down prior to
1848 	 * enslaving it; the old ifenslave will not.
1849 	 */
1850 	if (slave_dev->flags & IFF_UP) {
1851 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1852 			     "Device can not be enslaved while up");
1853 		return -EPERM;
1854 	}
1855 
1856 	/* set bonding device ether type by slave - bonding netdevices are
1857 	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1858 	 * there is a need to override some of the type dependent attribs/funcs.
1859 	 *
1860 	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1861 	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1862 	 */
1863 	if (!bond_has_slaves(bond)) {
1864 		if (bond_dev->type != slave_dev->type) {
1865 			slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1866 				  bond_dev->type, slave_dev->type);
1867 
1868 			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1869 						       bond_dev);
1870 			res = notifier_to_errno(res);
1871 			if (res) {
1872 				slave_err(bond_dev, slave_dev, "refused to change device type\n");
1873 				return -EBUSY;
1874 			}
1875 
1876 			/* Flush unicast and multicast addresses */
1877 			dev_uc_flush(bond_dev);
1878 			dev_mc_flush(bond_dev);
1879 
1880 			if (slave_dev->type != ARPHRD_ETHER)
1881 				bond_setup_by_slave(bond_dev, slave_dev);
1882 			else
1883 				bond_ether_setup(bond_dev);
1884 
1885 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1886 						 bond_dev);
1887 		}
1888 	} else if (bond_dev->type != slave_dev->type) {
1889 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1890 			     "Device type is different from other slaves");
1891 		return -EINVAL;
1892 	}
1893 
1894 	if (slave_dev->type == ARPHRD_INFINIBAND &&
1895 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1896 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1897 			     "Only active-backup mode is supported for infiniband slaves");
1898 		res = -EOPNOTSUPP;
1899 		goto err_undo_flags;
1900 	}
1901 
1902 	if (!slave_ops->ndo_set_mac_address ||
1903 	    slave_dev->type == ARPHRD_INFINIBAND) {
1904 		slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1905 		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1906 		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1907 			if (!bond_has_slaves(bond)) {
1908 				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1909 				slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1910 			} else {
1911 				SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1912 					     "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1913 				res = -EOPNOTSUPP;
1914 				goto err_undo_flags;
1915 			}
1916 		}
1917 	}
1918 
1919 	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1920 
1921 	/* If this is the first slave, then we need to set the master's hardware
1922 	 * address to be the same as the slave's.
1923 	 */
1924 	if (!bond_has_slaves(bond) &&
1925 	    bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1926 		res = bond_set_dev_addr(bond->dev, slave_dev);
1927 		if (res)
1928 			goto err_undo_flags;
1929 	}
1930 
1931 	new_slave = bond_alloc_slave(bond, slave_dev);
1932 	if (!new_slave) {
1933 		res = -ENOMEM;
1934 		goto err_undo_flags;
1935 	}
1936 
1937 	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1938 	 * is set via sysfs or module option if desired.
1939 	 */
1940 	new_slave->queue_id = 0;
1941 
1942 	/* Save slave's original mtu and then set it to match the bond */
1943 	new_slave->original_mtu = slave_dev->mtu;
1944 	res = dev_set_mtu(slave_dev, bond->dev->mtu);
1945 	if (res) {
1946 		slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1947 		goto err_free;
1948 	}
1949 
1950 	/* Save slave's original ("permanent") mac address for modes
1951 	 * that need it, and for restoring it upon release, and then
1952 	 * set it to the master's address
1953 	 */
1954 	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1955 			  slave_dev->addr_len);
1956 
1957 	if (!bond->params.fail_over_mac ||
1958 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1959 		/* Set slave to master's mac address.  The application already
1960 		 * set the master's mac address to that of the first slave
1961 		 */
1962 		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1963 		ss.ss_family = slave_dev->type;
1964 		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1965 					  extack);
1966 		if (res) {
1967 			slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1968 			goto err_restore_mtu;
1969 		}
1970 	}
1971 
1972 	/* set no_addrconf flag before open to prevent IPv6 addrconf */
1973 	slave_dev->priv_flags |= IFF_NO_ADDRCONF;
1974 
1975 	/* open the slave since the application closed it */
1976 	res = dev_open(slave_dev, extack);
1977 	if (res) {
1978 		slave_err(bond_dev, slave_dev, "Opening slave failed\n");
1979 		goto err_restore_mac;
1980 	}
1981 
1982 	slave_dev->priv_flags |= IFF_BONDING;
1983 	/* initialize slave stats */
1984 	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
1985 
1986 	if (bond_is_lb(bond)) {
1987 		/* bond_alb_init_slave() must be called before all other stages since
1988 		 * it might fail and we do not want to have to undo everything
1989 		 */
1990 		res = bond_alb_init_slave(bond, new_slave);
1991 		if (res)
1992 			goto err_close;
1993 	}
1994 
1995 	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1996 	if (res) {
1997 		slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
1998 		goto err_close;
1999 	}
2000 
2001 	prev_slave = bond_last_slave(bond);
2002 
2003 	new_slave->delay = 0;
2004 	new_slave->link_failure_count = 0;
2005 
2006 	if (bond_update_speed_duplex(new_slave) &&
2007 	    bond_needs_speed_duplex(bond))
2008 		new_slave->link = BOND_LINK_DOWN;
2009 
2010 	new_slave->last_rx = jiffies -
2011 		(msecs_to_jiffies(bond->params.arp_interval) + 1);
2012 	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
2013 		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
2014 
2015 	new_slave->last_tx = new_slave->last_rx;
2016 
2017 	if (bond->params.miimon && !bond->params.use_carrier) {
2018 		link_reporting = bond_check_dev_link(bond, slave_dev, 1);
2019 
2020 		if ((link_reporting == -1) && !bond->params.arp_interval) {
2021 			/* miimon is set but a bonded network driver
2022 			 * does not support ETHTOOL/MII and
2023 			 * arp_interval is not set.  Note: if
2024 			 * use_carrier is enabled, we will never go
2025 			 * here (because netif_carrier is always
2026 			 * supported); thus, we don't need to change
2027 			 * the messages for netif_carrier.
2028 			 */
2029 			slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
2030 		} else if (link_reporting == -1) {
2031 			/* unable get link status using mii/ethtool */
2032 			slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
2033 		}
2034 	}
2035 
2036 	/* check for initial state */
2037 	new_slave->link = BOND_LINK_NOCHANGE;
2038 	if (bond->params.miimon) {
2039 		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
2040 			if (bond->params.updelay) {
2041 				bond_set_slave_link_state(new_slave,
2042 							  BOND_LINK_BACK,
2043 							  BOND_SLAVE_NOTIFY_NOW);
2044 				new_slave->delay = bond->params.updelay;
2045 			} else {
2046 				bond_set_slave_link_state(new_slave,
2047 							  BOND_LINK_UP,
2048 							  BOND_SLAVE_NOTIFY_NOW);
2049 			}
2050 		} else {
2051 			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
2052 						  BOND_SLAVE_NOTIFY_NOW);
2053 		}
2054 	} else if (bond->params.arp_interval) {
2055 		bond_set_slave_link_state(new_slave,
2056 					  (netif_carrier_ok(slave_dev) ?
2057 					  BOND_LINK_UP : BOND_LINK_DOWN),
2058 					  BOND_SLAVE_NOTIFY_NOW);
2059 	} else {
2060 		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
2061 					  BOND_SLAVE_NOTIFY_NOW);
2062 	}
2063 
2064 	if (new_slave->link != BOND_LINK_DOWN)
2065 		new_slave->last_link_up = jiffies;
2066 	slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
2067 		  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
2068 		  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
2069 
2070 	if (bond_uses_primary(bond) && bond->params.primary[0]) {
2071 		/* if there is a primary slave, remember it */
2072 		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2073 			rcu_assign_pointer(bond->primary_slave, new_slave);
2074 			bond->force_primary = true;
2075 		}
2076 	}
2077 
2078 	switch (BOND_MODE(bond)) {
2079 	case BOND_MODE_ACTIVEBACKUP:
2080 		bond_set_slave_inactive_flags(new_slave,
2081 					      BOND_SLAVE_NOTIFY_NOW);
2082 		break;
2083 	case BOND_MODE_8023AD:
2084 		/* in 802.3ad mode, the internal mechanism
2085 		 * will activate the slaves in the selected
2086 		 * aggregator
2087 		 */
2088 		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2089 		/* if this is the first slave */
2090 		if (!prev_slave) {
2091 			SLAVE_AD_INFO(new_slave)->id = 1;
2092 			/* Initialize AD with the number of times that the AD timer is called in 1 second
2093 			 * can be called only after the mac address of the bond is set
2094 			 */
2095 			bond_3ad_initialize(bond);
2096 		} else {
2097 			SLAVE_AD_INFO(new_slave)->id =
2098 				SLAVE_AD_INFO(prev_slave)->id + 1;
2099 		}
2100 
2101 		bond_3ad_bind_slave(new_slave);
2102 		break;
2103 	case BOND_MODE_TLB:
2104 	case BOND_MODE_ALB:
2105 		bond_set_active_slave(new_slave);
2106 		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2107 		break;
2108 	default:
2109 		slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
2110 
2111 		/* always active in trunk mode */
2112 		bond_set_active_slave(new_slave);
2113 
2114 		/* In trunking mode there is little meaning to curr_active_slave
2115 		 * anyway (it holds no special properties of the bond device),
2116 		 * so we can change it without calling change_active_interface()
2117 		 */
2118 		if (!rcu_access_pointer(bond->curr_active_slave) &&
2119 		    new_slave->link == BOND_LINK_UP)
2120 			rcu_assign_pointer(bond->curr_active_slave, new_slave);
2121 
2122 		break;
2123 	} /* switch(bond_mode) */
2124 
2125 #ifdef CONFIG_NET_POLL_CONTROLLER
2126 	if (bond->dev->npinfo) {
2127 		if (slave_enable_netpoll(new_slave)) {
2128 			slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2129 			res = -EBUSY;
2130 			goto err_detach;
2131 		}
2132 	}
2133 #endif
2134 
2135 	if (!(bond_dev->features & NETIF_F_LRO))
2136 		dev_disable_lro(slave_dev);
2137 
2138 	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
2139 					 new_slave);
2140 	if (res) {
2141 		slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2142 		goto err_detach;
2143 	}
2144 
2145 	res = bond_master_upper_dev_link(bond, new_slave, extack);
2146 	if (res) {
2147 		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2148 		goto err_unregister;
2149 	}
2150 
2151 	bond_lower_state_changed(new_slave);
2152 
2153 	res = bond_sysfs_slave_add(new_slave);
2154 	if (res) {
2155 		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2156 		goto err_upper_unlink;
2157 	}
2158 
2159 	/* If the mode uses primary, then the following is handled by
2160 	 * bond_change_active_slave().
2161 	 */
2162 	if (!bond_uses_primary(bond)) {
2163 		/* set promiscuity level to new slave */
2164 		if (bond_dev->flags & IFF_PROMISC) {
2165 			res = dev_set_promiscuity(slave_dev, 1);
2166 			if (res)
2167 				goto err_sysfs_del;
2168 		}
2169 
2170 		/* set allmulti level to new slave */
2171 		if (bond_dev->flags & IFF_ALLMULTI) {
2172 			res = dev_set_allmulti(slave_dev, 1);
2173 			if (res) {
2174 				if (bond_dev->flags & IFF_PROMISC)
2175 					dev_set_promiscuity(slave_dev, -1);
2176 				goto err_sysfs_del;
2177 			}
2178 		}
2179 
2180 		if (bond_dev->flags & IFF_UP) {
2181 			netif_addr_lock_bh(bond_dev);
2182 			dev_mc_sync_multiple(slave_dev, bond_dev);
2183 			dev_uc_sync_multiple(slave_dev, bond_dev);
2184 			netif_addr_unlock_bh(bond_dev);
2185 
2186 			if (BOND_MODE(bond) == BOND_MODE_8023AD)
2187 				dev_mc_add(slave_dev, lacpdu_mcast_addr);
2188 		}
2189 	}
2190 
2191 	bond->slave_cnt++;
2192 	bond_compute_features(bond);
2193 	bond_set_carrier(bond);
2194 
2195 	if (bond_uses_primary(bond)) {
2196 		block_netpoll_tx();
2197 		bond_select_active_slave(bond);
2198 		unblock_netpoll_tx();
2199 	}
2200 
2201 	if (bond_mode_can_use_xmit_hash(bond))
2202 		bond_update_slave_arr(bond, NULL);
2203 
2204 
2205 	if (!slave_dev->netdev_ops->ndo_bpf ||
2206 	    !slave_dev->netdev_ops->ndo_xdp_xmit) {
2207 		if (bond->xdp_prog) {
2208 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2209 				     "Slave does not support XDP");
2210 			res = -EOPNOTSUPP;
2211 			goto err_sysfs_del;
2212 		}
2213 	} else if (bond->xdp_prog) {
2214 		struct netdev_bpf xdp = {
2215 			.command = XDP_SETUP_PROG,
2216 			.flags   = 0,
2217 			.prog    = bond->xdp_prog,
2218 			.extack  = extack,
2219 		};
2220 
2221 		if (dev_xdp_prog_count(slave_dev) > 0) {
2222 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2223 				     "Slave has XDP program loaded, please unload before enslaving");
2224 			res = -EOPNOTSUPP;
2225 			goto err_sysfs_del;
2226 		}
2227 
2228 		res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
2229 		if (res < 0) {
2230 			/* ndo_bpf() sets extack error message */
2231 			slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
2232 			goto err_sysfs_del;
2233 		}
2234 		if (bond->xdp_prog)
2235 			bpf_prog_inc(bond->xdp_prog);
2236 	}
2237 
2238 	slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2239 		   bond_is_active_slave(new_slave) ? "an active" : "a backup",
2240 		   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2241 
2242 	/* enslave is successful */
2243 	bond_queue_slave_event(new_slave);
2244 	return 0;
2245 
2246 /* Undo stages on error */
2247 err_sysfs_del:
2248 	bond_sysfs_slave_del(new_slave);
2249 
2250 err_upper_unlink:
2251 	bond_upper_dev_unlink(bond, new_slave);
2252 
2253 err_unregister:
2254 	netdev_rx_handler_unregister(slave_dev);
2255 
2256 err_detach:
2257 	vlan_vids_del_by_dev(slave_dev, bond_dev);
2258 	if (rcu_access_pointer(bond->primary_slave) == new_slave)
2259 		RCU_INIT_POINTER(bond->primary_slave, NULL);
2260 	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2261 		block_netpoll_tx();
2262 		bond_change_active_slave(bond, NULL);
2263 		bond_select_active_slave(bond);
2264 		unblock_netpoll_tx();
2265 	}
2266 	/* either primary_slave or curr_active_slave might've changed */
2267 	synchronize_rcu();
2268 	slave_disable_netpoll(new_slave);
2269 
2270 err_close:
2271 	if (!netif_is_bond_master(slave_dev))
2272 		slave_dev->priv_flags &= ~IFF_BONDING;
2273 	dev_close(slave_dev);
2274 
2275 err_restore_mac:
2276 	slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2277 	if (!bond->params.fail_over_mac ||
2278 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2279 		/* XXX TODO - fom follow mode needs to change master's
2280 		 * MAC if this slave's MAC is in use by the bond, or at
2281 		 * least print a warning.
2282 		 */
2283 		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2284 				  new_slave->dev->addr_len);
2285 		ss.ss_family = slave_dev->type;
2286 		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2287 	}
2288 
2289 err_restore_mtu:
2290 	dev_set_mtu(slave_dev, new_slave->original_mtu);
2291 
2292 err_free:
2293 	kobject_put(&new_slave->kobj);
2294 
2295 err_undo_flags:
2296 	/* Enslave of first slave has failed and we need to fix master's mac */
2297 	if (!bond_has_slaves(bond)) {
2298 		if (ether_addr_equal_64bits(bond_dev->dev_addr,
2299 					    slave_dev->dev_addr))
2300 			eth_hw_addr_random(bond_dev);
2301 		if (bond_dev->type != ARPHRD_ETHER) {
2302 			dev_close(bond_dev);
2303 			bond_ether_setup(bond_dev);
2304 		}
2305 	}
2306 
2307 	return res;
2308 }
2309 
2310 /* Try to release the slave device <slave> from the bond device <master>
2311  * It is legal to access curr_active_slave without a lock because all the function
2312  * is RTNL-locked. If "all" is true it means that the function is being called
2313  * while destroying a bond interface and all slaves are being released.
2314  *
2315  * The rules for slave state should be:
2316  *   for Active/Backup:
2317  *     Active stays on all backups go down
2318  *   for Bonded connections:
2319  *     The first up interface should be left on and all others downed.
2320  */
2321 static int __bond_release_one(struct net_device *bond_dev,
2322 			      struct net_device *slave_dev,
2323 			      bool all, bool unregister)
2324 {
2325 	struct bonding *bond = netdev_priv(bond_dev);
2326 	struct slave *slave, *oldcurrent;
2327 	struct sockaddr_storage ss;
2328 	int old_flags = bond_dev->flags;
2329 	netdev_features_t old_features = bond_dev->features;
2330 
2331 	/* slave is not a slave or master is not master of this slave */
2332 	if (!(slave_dev->flags & IFF_SLAVE) ||
2333 	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
2334 		slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2335 		return -EINVAL;
2336 	}
2337 
2338 	block_netpoll_tx();
2339 
2340 	slave = bond_get_slave_by_dev(bond, slave_dev);
2341 	if (!slave) {
2342 		/* not a slave of this bond */
2343 		slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2344 		unblock_netpoll_tx();
2345 		return -EINVAL;
2346 	}
2347 
2348 	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2349 
2350 	bond_sysfs_slave_del(slave);
2351 
2352 	/* recompute stats just before removing the slave */
2353 	bond_get_stats(bond->dev, &bond->bond_stats);
2354 
2355 	if (bond->xdp_prog) {
2356 		struct netdev_bpf xdp = {
2357 			.command = XDP_SETUP_PROG,
2358 			.flags   = 0,
2359 			.prog	 = NULL,
2360 			.extack  = NULL,
2361 		};
2362 		if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
2363 			slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
2364 	}
2365 
2366 	/* unregister rx_handler early so bond_handle_frame wouldn't be called
2367 	 * for this slave anymore.
2368 	 */
2369 	netdev_rx_handler_unregister(slave_dev);
2370 
2371 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
2372 		bond_3ad_unbind_slave(slave);
2373 
2374 	bond_upper_dev_unlink(bond, slave);
2375 
2376 	if (bond_mode_can_use_xmit_hash(bond))
2377 		bond_update_slave_arr(bond, slave);
2378 
2379 	slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2380 		    bond_is_active_slave(slave) ? "active" : "backup");
2381 
2382 	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2383 
2384 	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2385 
2386 	if (!all && (!bond->params.fail_over_mac ||
2387 		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2388 		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2389 		    bond_has_slaves(bond))
2390 			slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2391 				   slave->perm_hwaddr);
2392 	}
2393 
2394 	if (rtnl_dereference(bond->primary_slave) == slave)
2395 		RCU_INIT_POINTER(bond->primary_slave, NULL);
2396 
2397 	if (oldcurrent == slave)
2398 		bond_change_active_slave(bond, NULL);
2399 
2400 	if (bond_is_lb(bond)) {
2401 		/* Must be called only after the slave has been
2402 		 * detached from the list and the curr_active_slave
2403 		 * has been cleared (if our_slave == old_current),
2404 		 * but before a new active slave is selected.
2405 		 */
2406 		bond_alb_deinit_slave(bond, slave);
2407 	}
2408 
2409 	if (all) {
2410 		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2411 	} else if (oldcurrent == slave) {
2412 		/* Note that we hold RTNL over this sequence, so there
2413 		 * is no concern that another slave add/remove event
2414 		 * will interfere.
2415 		 */
2416 		bond_select_active_slave(bond);
2417 	}
2418 
2419 	bond_set_carrier(bond);
2420 	if (!bond_has_slaves(bond))
2421 		eth_hw_addr_random(bond_dev);
2422 
2423 	unblock_netpoll_tx();
2424 	synchronize_rcu();
2425 	bond->slave_cnt--;
2426 
2427 	if (!bond_has_slaves(bond)) {
2428 		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2429 		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2430 	}
2431 
2432 	bond_compute_features(bond);
2433 	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2434 	    (old_features & NETIF_F_VLAN_CHALLENGED))
2435 		slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2436 
2437 	vlan_vids_del_by_dev(slave_dev, bond_dev);
2438 
2439 	/* If the mode uses primary, then this case was handled above by
2440 	 * bond_change_active_slave(..., NULL)
2441 	 */
2442 	if (!bond_uses_primary(bond)) {
2443 		/* unset promiscuity level from slave
2444 		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2445 		 * of the IFF_PROMISC flag in the bond_dev, but we need the
2446 		 * value of that flag before that change, as that was the value
2447 		 * when this slave was attached, so we cache at the start of the
2448 		 * function and use it here. Same goes for ALLMULTI below
2449 		 */
2450 		if (old_flags & IFF_PROMISC)
2451 			dev_set_promiscuity(slave_dev, -1);
2452 
2453 		/* unset allmulti level from slave */
2454 		if (old_flags & IFF_ALLMULTI)
2455 			dev_set_allmulti(slave_dev, -1);
2456 
2457 		if (old_flags & IFF_UP)
2458 			bond_hw_addr_flush(bond_dev, slave_dev);
2459 	}
2460 
2461 	slave_disable_netpoll(slave);
2462 
2463 	/* close slave before restoring its mac address */
2464 	dev_close(slave_dev);
2465 
2466 	slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2467 
2468 	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2469 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2470 		/* restore original ("permanent") mac address */
2471 		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2472 				  slave->dev->addr_len);
2473 		ss.ss_family = slave_dev->type;
2474 		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2475 	}
2476 
2477 	if (unregister)
2478 		__dev_set_mtu(slave_dev, slave->original_mtu);
2479 	else
2480 		dev_set_mtu(slave_dev, slave->original_mtu);
2481 
2482 	if (!netif_is_bond_master(slave_dev))
2483 		slave_dev->priv_flags &= ~IFF_BONDING;
2484 
2485 	kobject_put(&slave->kobj);
2486 
2487 	return 0;
2488 }
2489 
2490 /* A wrapper used because of ndo_del_link */
2491 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2492 {
2493 	return __bond_release_one(bond_dev, slave_dev, false, false);
2494 }
2495 
2496 /* First release a slave and then destroy the bond if no more slaves are left.
2497  * Must be under rtnl_lock when this function is called.
2498  */
2499 static int bond_release_and_destroy(struct net_device *bond_dev,
2500 				    struct net_device *slave_dev)
2501 {
2502 	struct bonding *bond = netdev_priv(bond_dev);
2503 	int ret;
2504 
2505 	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2506 	if (ret == 0 && !bond_has_slaves(bond) &&
2507 	    bond_dev->reg_state != NETREG_UNREGISTERING) {
2508 		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2509 		netdev_info(bond_dev, "Destroying bond\n");
2510 		bond_remove_proc_entry(bond);
2511 		unregister_netdevice(bond_dev);
2512 	}
2513 	return ret;
2514 }
2515 
2516 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2517 {
2518 	struct bonding *bond = netdev_priv(bond_dev);
2519 
2520 	bond_fill_ifbond(bond, info);
2521 }
2522 
2523 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2524 {
2525 	struct bonding *bond = netdev_priv(bond_dev);
2526 	struct list_head *iter;
2527 	int i = 0, res = -ENODEV;
2528 	struct slave *slave;
2529 
2530 	bond_for_each_slave(bond, slave, iter) {
2531 		if (i++ == (int)info->slave_id) {
2532 			res = 0;
2533 			bond_fill_ifslave(slave, info);
2534 			break;
2535 		}
2536 	}
2537 
2538 	return res;
2539 }
2540 
2541 /*-------------------------------- Monitoring -------------------------------*/
2542 
2543 /* called with rcu_read_lock() */
2544 static int bond_miimon_inspect(struct bonding *bond)
2545 {
2546 	bool ignore_updelay = false;
2547 	int link_state, commit = 0;
2548 	struct list_head *iter;
2549 	struct slave *slave;
2550 
2551 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
2552 		ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2553 	} else {
2554 		struct bond_up_slave *usable_slaves;
2555 
2556 		usable_slaves = rcu_dereference(bond->usable_slaves);
2557 
2558 		if (usable_slaves && usable_slaves->count == 0)
2559 			ignore_updelay = true;
2560 	}
2561 
2562 	bond_for_each_slave_rcu(bond, slave, iter) {
2563 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2564 
2565 		link_state = bond_check_dev_link(bond, slave->dev, 0);
2566 
2567 		switch (slave->link) {
2568 		case BOND_LINK_UP:
2569 			if (link_state)
2570 				continue;
2571 
2572 			bond_propose_link_state(slave, BOND_LINK_FAIL);
2573 			commit++;
2574 			slave->delay = bond->params.downdelay;
2575 			if (slave->delay) {
2576 				slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2577 					   (BOND_MODE(bond) ==
2578 					    BOND_MODE_ACTIVEBACKUP) ?
2579 					    (bond_is_active_slave(slave) ?
2580 					     "active " : "backup ") : "",
2581 					   bond->params.downdelay * bond->params.miimon);
2582 			}
2583 			fallthrough;
2584 		case BOND_LINK_FAIL:
2585 			if (link_state) {
2586 				/* recovered before downdelay expired */
2587 				bond_propose_link_state(slave, BOND_LINK_UP);
2588 				slave->last_link_up = jiffies;
2589 				slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2590 					   (bond->params.downdelay - slave->delay) *
2591 					   bond->params.miimon);
2592 				commit++;
2593 				continue;
2594 			}
2595 
2596 			if (slave->delay <= 0) {
2597 				bond_propose_link_state(slave, BOND_LINK_DOWN);
2598 				commit++;
2599 				continue;
2600 			}
2601 
2602 			slave->delay--;
2603 			break;
2604 
2605 		case BOND_LINK_DOWN:
2606 			if (!link_state)
2607 				continue;
2608 
2609 			bond_propose_link_state(slave, BOND_LINK_BACK);
2610 			commit++;
2611 			slave->delay = bond->params.updelay;
2612 
2613 			if (slave->delay) {
2614 				slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2615 					   ignore_updelay ? 0 :
2616 					   bond->params.updelay *
2617 					   bond->params.miimon);
2618 			}
2619 			fallthrough;
2620 		case BOND_LINK_BACK:
2621 			if (!link_state) {
2622 				bond_propose_link_state(slave, BOND_LINK_DOWN);
2623 				slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2624 					   (bond->params.updelay - slave->delay) *
2625 					   bond->params.miimon);
2626 				commit++;
2627 				continue;
2628 			}
2629 
2630 			if (ignore_updelay)
2631 				slave->delay = 0;
2632 
2633 			if (slave->delay <= 0) {
2634 				bond_propose_link_state(slave, BOND_LINK_UP);
2635 				commit++;
2636 				ignore_updelay = false;
2637 				continue;
2638 			}
2639 
2640 			slave->delay--;
2641 			break;
2642 		}
2643 	}
2644 
2645 	return commit;
2646 }
2647 
2648 static void bond_miimon_link_change(struct bonding *bond,
2649 				    struct slave *slave,
2650 				    char link)
2651 {
2652 	switch (BOND_MODE(bond)) {
2653 	case BOND_MODE_8023AD:
2654 		bond_3ad_handle_link_change(slave, link);
2655 		break;
2656 	case BOND_MODE_TLB:
2657 	case BOND_MODE_ALB:
2658 		bond_alb_handle_link_change(bond, slave, link);
2659 		break;
2660 	case BOND_MODE_XOR:
2661 		bond_update_slave_arr(bond, NULL);
2662 		break;
2663 	}
2664 }
2665 
2666 static void bond_miimon_commit(struct bonding *bond)
2667 {
2668 	struct slave *slave, *primary, *active;
2669 	bool do_failover = false;
2670 	struct list_head *iter;
2671 
2672 	ASSERT_RTNL();
2673 
2674 	bond_for_each_slave(bond, slave, iter) {
2675 		switch (slave->link_new_state) {
2676 		case BOND_LINK_NOCHANGE:
2677 			/* For 802.3ad mode, check current slave speed and
2678 			 * duplex again in case its port was disabled after
2679 			 * invalid speed/duplex reporting but recovered before
2680 			 * link monitoring could make a decision on the actual
2681 			 * link status
2682 			 */
2683 			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2684 			    slave->link == BOND_LINK_UP)
2685 				bond_3ad_adapter_speed_duplex_changed(slave);
2686 			continue;
2687 
2688 		case BOND_LINK_UP:
2689 			if (bond_update_speed_duplex(slave) &&
2690 			    bond_needs_speed_duplex(bond)) {
2691 				slave->link = BOND_LINK_DOWN;
2692 				if (net_ratelimit())
2693 					slave_warn(bond->dev, slave->dev,
2694 						   "failed to get link speed/duplex\n");
2695 				continue;
2696 			}
2697 			bond_set_slave_link_state(slave, BOND_LINK_UP,
2698 						  BOND_SLAVE_NOTIFY_NOW);
2699 			slave->last_link_up = jiffies;
2700 
2701 			primary = rtnl_dereference(bond->primary_slave);
2702 			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2703 				/* prevent it from being the active one */
2704 				bond_set_backup_slave(slave);
2705 			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2706 				/* make it immediately active */
2707 				bond_set_active_slave(slave);
2708 			}
2709 
2710 			slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2711 				   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2712 				   slave->duplex ? "full" : "half");
2713 
2714 			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2715 
2716 			active = rtnl_dereference(bond->curr_active_slave);
2717 			if (!active || slave == primary || slave->prio > active->prio)
2718 				do_failover = true;
2719 
2720 			continue;
2721 
2722 		case BOND_LINK_DOWN:
2723 			if (slave->link_failure_count < UINT_MAX)
2724 				slave->link_failure_count++;
2725 
2726 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2727 						  BOND_SLAVE_NOTIFY_NOW);
2728 
2729 			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2730 			    BOND_MODE(bond) == BOND_MODE_8023AD)
2731 				bond_set_slave_inactive_flags(slave,
2732 							      BOND_SLAVE_NOTIFY_NOW);
2733 
2734 			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2735 
2736 			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2737 
2738 			if (slave == rcu_access_pointer(bond->curr_active_slave))
2739 				do_failover = true;
2740 
2741 			continue;
2742 
2743 		default:
2744 			slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2745 				  slave->link_new_state);
2746 			bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2747 
2748 			continue;
2749 		}
2750 	}
2751 
2752 	if (do_failover) {
2753 		block_netpoll_tx();
2754 		bond_select_active_slave(bond);
2755 		unblock_netpoll_tx();
2756 	}
2757 
2758 	bond_set_carrier(bond);
2759 }
2760 
2761 /* bond_mii_monitor
2762  *
2763  * Really a wrapper that splits the mii monitor into two phases: an
2764  * inspection, then (if inspection indicates something needs to be done)
2765  * an acquisition of appropriate locks followed by a commit phase to
2766  * implement whatever link state changes are indicated.
2767  */
2768 static void bond_mii_monitor(struct work_struct *work)
2769 {
2770 	struct bonding *bond = container_of(work, struct bonding,
2771 					    mii_work.work);
2772 	bool should_notify_peers = false;
2773 	bool commit;
2774 	unsigned long delay;
2775 	struct slave *slave;
2776 	struct list_head *iter;
2777 
2778 	delay = msecs_to_jiffies(bond->params.miimon);
2779 
2780 	if (!bond_has_slaves(bond))
2781 		goto re_arm;
2782 
2783 	rcu_read_lock();
2784 	should_notify_peers = bond_should_notify_peers(bond);
2785 	commit = !!bond_miimon_inspect(bond);
2786 	if (bond->send_peer_notif) {
2787 		rcu_read_unlock();
2788 		if (rtnl_trylock()) {
2789 			bond->send_peer_notif--;
2790 			rtnl_unlock();
2791 		}
2792 	} else {
2793 		rcu_read_unlock();
2794 	}
2795 
2796 	if (commit) {
2797 		/* Race avoidance with bond_close cancel of workqueue */
2798 		if (!rtnl_trylock()) {
2799 			delay = 1;
2800 			should_notify_peers = false;
2801 			goto re_arm;
2802 		}
2803 
2804 		bond_for_each_slave(bond, slave, iter) {
2805 			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2806 		}
2807 		bond_miimon_commit(bond);
2808 
2809 		rtnl_unlock();	/* might sleep, hold no other locks */
2810 	}
2811 
2812 re_arm:
2813 	if (bond->params.miimon)
2814 		queue_delayed_work(bond->wq, &bond->mii_work, delay);
2815 
2816 	if (should_notify_peers) {
2817 		if (!rtnl_trylock())
2818 			return;
2819 		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2820 		rtnl_unlock();
2821 	}
2822 }
2823 
2824 static int bond_upper_dev_walk(struct net_device *upper,
2825 			       struct netdev_nested_priv *priv)
2826 {
2827 	__be32 ip = *(__be32 *)priv->data;
2828 
2829 	return ip == bond_confirm_addr(upper, 0, ip);
2830 }
2831 
2832 static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2833 {
2834 	struct netdev_nested_priv priv = {
2835 		.data = (void *)&ip,
2836 	};
2837 	bool ret = false;
2838 
2839 	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2840 		return true;
2841 
2842 	rcu_read_lock();
2843 	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2844 		ret = true;
2845 	rcu_read_unlock();
2846 
2847 	return ret;
2848 }
2849 
2850 static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
2851 			     struct sk_buff *skb)
2852 {
2853 	struct net_device *bond_dev = slave->bond->dev;
2854 	struct net_device *slave_dev = slave->dev;
2855 	struct bond_vlan_tag *outer_tag = tags;
2856 
2857 	if (!tags || tags->vlan_proto == VLAN_N_VID)
2858 		return true;
2859 
2860 	tags++;
2861 
2862 	/* Go through all the tags backwards and add them to the packet */
2863 	while (tags->vlan_proto != VLAN_N_VID) {
2864 		if (!tags->vlan_id) {
2865 			tags++;
2866 			continue;
2867 		}
2868 
2869 		slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2870 			  ntohs(outer_tag->vlan_proto), tags->vlan_id);
2871 		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2872 						tags->vlan_id);
2873 		if (!skb) {
2874 			net_err_ratelimited("failed to insert inner VLAN tag\n");
2875 			return false;
2876 		}
2877 
2878 		tags++;
2879 	}
2880 	/* Set the outer tag */
2881 	if (outer_tag->vlan_id) {
2882 		slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2883 			  ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2884 		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2885 				       outer_tag->vlan_id);
2886 	}
2887 
2888 	return true;
2889 }
2890 
2891 /* We go to the (large) trouble of VLAN tagging ARP frames because
2892  * switches in VLAN mode (especially if ports are configured as
2893  * "native" to a VLAN) might not pass non-tagged frames.
2894  */
2895 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2896 			  __be32 src_ip, struct bond_vlan_tag *tags)
2897 {
2898 	struct net_device *bond_dev = slave->bond->dev;
2899 	struct net_device *slave_dev = slave->dev;
2900 	struct sk_buff *skb;
2901 
2902 	slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2903 		  arp_op, &dest_ip, &src_ip);
2904 
2905 	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2906 			 NULL, slave_dev->dev_addr, NULL);
2907 
2908 	if (!skb) {
2909 		net_err_ratelimited("ARP packet allocation failed\n");
2910 		return;
2911 	}
2912 
2913 	if (bond_handle_vlan(slave, tags, skb)) {
2914 		slave_update_last_tx(slave);
2915 		arp_xmit(skb);
2916 	}
2917 
2918 	return;
2919 }
2920 
2921 /* Validate the device path between the @start_dev and the @end_dev.
2922  * The path is valid if the @end_dev is reachable through device
2923  * stacking.
2924  * When the path is validated, collect any vlan information in the
2925  * path.
2926  */
2927 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2928 					      struct net_device *end_dev,
2929 					      int level)
2930 {
2931 	struct bond_vlan_tag *tags;
2932 	struct net_device *upper;
2933 	struct list_head  *iter;
2934 
2935 	if (start_dev == end_dev) {
2936 		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2937 		if (!tags)
2938 			return ERR_PTR(-ENOMEM);
2939 		tags[level].vlan_proto = VLAN_N_VID;
2940 		return tags;
2941 	}
2942 
2943 	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2944 		tags = bond_verify_device_path(upper, end_dev, level + 1);
2945 		if (IS_ERR_OR_NULL(tags)) {
2946 			if (IS_ERR(tags))
2947 				return tags;
2948 			continue;
2949 		}
2950 		if (is_vlan_dev(upper)) {
2951 			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2952 			tags[level].vlan_id = vlan_dev_vlan_id(upper);
2953 		}
2954 
2955 		return tags;
2956 	}
2957 
2958 	return NULL;
2959 }
2960 
2961 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2962 {
2963 	struct rtable *rt;
2964 	struct bond_vlan_tag *tags;
2965 	__be32 *targets = bond->params.arp_targets, addr;
2966 	int i;
2967 
2968 	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2969 		slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
2970 			  __func__, &targets[i]);
2971 		tags = NULL;
2972 
2973 		/* Find out through which dev should the packet go */
2974 		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2975 				     RTO_ONLINK, 0);
2976 		if (IS_ERR(rt)) {
2977 			/* there's no route to target - try to send arp
2978 			 * probe to generate any traffic (arp_validate=0)
2979 			 */
2980 			if (bond->params.arp_validate)
2981 				pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2982 					     bond->dev->name,
2983 					     &targets[i]);
2984 			bond_arp_send(slave, ARPOP_REQUEST, targets[i],
2985 				      0, tags);
2986 			continue;
2987 		}
2988 
2989 		/* bond device itself */
2990 		if (rt->dst.dev == bond->dev)
2991 			goto found;
2992 
2993 		rcu_read_lock();
2994 		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2995 		rcu_read_unlock();
2996 
2997 		if (!IS_ERR_OR_NULL(tags))
2998 			goto found;
2999 
3000 		/* Not our device - skip */
3001 		slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
3002 			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
3003 
3004 		ip_rt_put(rt);
3005 		continue;
3006 
3007 found:
3008 		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
3009 		ip_rt_put(rt);
3010 		bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
3011 		kfree(tags);
3012 	}
3013 }
3014 
3015 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
3016 {
3017 	int i;
3018 
3019 	if (!sip || !bond_has_this_ip(bond, tip)) {
3020 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
3021 			   __func__, &sip, &tip);
3022 		return;
3023 	}
3024 
3025 	i = bond_get_targets_ip(bond->params.arp_targets, sip);
3026 	if (i == -1) {
3027 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
3028 			   __func__, &sip);
3029 		return;
3030 	}
3031 	slave->last_rx = jiffies;
3032 	slave->target_last_arp_rx[i] = jiffies;
3033 }
3034 
3035 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
3036 			struct slave *slave)
3037 {
3038 	struct arphdr *arp = (struct arphdr *)skb->data;
3039 	struct slave *curr_active_slave, *curr_arp_slave;
3040 	unsigned char *arp_ptr;
3041 	__be32 sip, tip;
3042 	unsigned int alen;
3043 
3044 	alen = arp_hdr_len(bond->dev);
3045 
3046 	if (alen > skb_headlen(skb)) {
3047 		arp = kmalloc(alen, GFP_ATOMIC);
3048 		if (!arp)
3049 			goto out_unlock;
3050 		if (skb_copy_bits(skb, 0, arp, alen) < 0)
3051 			goto out_unlock;
3052 	}
3053 
3054 	if (arp->ar_hln != bond->dev->addr_len ||
3055 	    skb->pkt_type == PACKET_OTHERHOST ||
3056 	    skb->pkt_type == PACKET_LOOPBACK ||
3057 	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
3058 	    arp->ar_pro != htons(ETH_P_IP) ||
3059 	    arp->ar_pln != 4)
3060 		goto out_unlock;
3061 
3062 	arp_ptr = (unsigned char *)(arp + 1);
3063 	arp_ptr += bond->dev->addr_len;
3064 	memcpy(&sip, arp_ptr, 4);
3065 	arp_ptr += 4 + bond->dev->addr_len;
3066 	memcpy(&tip, arp_ptr, 4);
3067 
3068 	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
3069 		  __func__, slave->dev->name, bond_slave_state(slave),
3070 		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3071 		  &sip, &tip);
3072 
3073 	curr_active_slave = rcu_dereference(bond->curr_active_slave);
3074 	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3075 
3076 	/* We 'trust' the received ARP enough to validate it if:
3077 	 *
3078 	 * (a) the slave receiving the ARP is active (which includes the
3079 	 * current ARP slave, if any), or
3080 	 *
3081 	 * (b) the receiving slave isn't active, but there is a currently
3082 	 * active slave and it received valid arp reply(s) after it became
3083 	 * the currently active slave, or
3084 	 *
3085 	 * (c) there is an ARP slave that sent an ARP during the prior ARP
3086 	 * interval, and we receive an ARP reply on any slave.  We accept
3087 	 * these because switch FDB update delays may deliver the ARP
3088 	 * reply to a slave other than the sender of the ARP request.
3089 	 *
3090 	 * Note: for (b), backup slaves are receiving the broadcast ARP
3091 	 * request, not a reply.  This request passes from the sending
3092 	 * slave through the L2 switch(es) to the receiving slave.  Since
3093 	 * this is checking the request, sip/tip are swapped for
3094 	 * validation.
3095 	 *
3096 	 * This is done to avoid endless looping when we can't reach the
3097 	 * arp_ip_target and fool ourselves with our own arp requests.
3098 	 */
3099 	if (bond_is_active_slave(slave))
3100 		bond_validate_arp(bond, slave, sip, tip);
3101 	else if (curr_active_slave &&
3102 		 time_after(slave_last_rx(bond, curr_active_slave),
3103 			    curr_active_slave->last_link_up))
3104 		bond_validate_arp(bond, slave, tip, sip);
3105 	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
3106 		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3107 		bond_validate_arp(bond, slave, sip, tip);
3108 
3109 out_unlock:
3110 	if (arp != (struct arphdr *)skb->data)
3111 		kfree(arp);
3112 	return RX_HANDLER_ANOTHER;
3113 }
3114 
3115 #if IS_ENABLED(CONFIG_IPV6)
3116 static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
3117 			 const struct in6_addr *saddr, struct bond_vlan_tag *tags)
3118 {
3119 	struct net_device *bond_dev = slave->bond->dev;
3120 	struct net_device *slave_dev = slave->dev;
3121 	struct in6_addr mcaddr;
3122 	struct sk_buff *skb;
3123 
3124 	slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
3125 		  daddr, saddr);
3126 
3127 	skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
3128 	if (!skb) {
3129 		net_err_ratelimited("NS packet allocation failed\n");
3130 		return;
3131 	}
3132 
3133 	addrconf_addr_solict_mult(daddr, &mcaddr);
3134 	if (bond_handle_vlan(slave, tags, skb)) {
3135 		slave_update_last_tx(slave);
3136 		ndisc_send_skb(skb, &mcaddr, saddr);
3137 	}
3138 }
3139 
3140 static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
3141 {
3142 	struct in6_addr *targets = bond->params.ns_targets;
3143 	struct bond_vlan_tag *tags;
3144 	struct dst_entry *dst;
3145 	struct in6_addr saddr;
3146 	struct flowi6 fl6;
3147 	int i;
3148 
3149 	for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
3150 		slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
3151 			  __func__, &targets[i]);
3152 		tags = NULL;
3153 
3154 		/* Find out through which dev should the packet go */
3155 		memset(&fl6, 0, sizeof(struct flowi6));
3156 		fl6.daddr = targets[i];
3157 		fl6.flowi6_oif = bond->dev->ifindex;
3158 
3159 		dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
3160 		if (dst->error) {
3161 			dst_release(dst);
3162 			/* there's no route to target - try to send arp
3163 			 * probe to generate any traffic (arp_validate=0)
3164 			 */
3165 			if (bond->params.arp_validate)
3166 				pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
3167 					     bond->dev->name,
3168 					     &targets[i]);
3169 			bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3170 			continue;
3171 		}
3172 
3173 		/* bond device itself */
3174 		if (dst->dev == bond->dev)
3175 			goto found;
3176 
3177 		rcu_read_lock();
3178 		tags = bond_verify_device_path(bond->dev, dst->dev, 0);
3179 		rcu_read_unlock();
3180 
3181 		if (!IS_ERR_OR_NULL(tags))
3182 			goto found;
3183 
3184 		/* Not our device - skip */
3185 		slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
3186 			  &targets[i], dst->dev ? dst->dev->name : "NULL");
3187 
3188 		dst_release(dst);
3189 		continue;
3190 
3191 found:
3192 		if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
3193 			bond_ns_send(slave, &targets[i], &saddr, tags);
3194 		else
3195 			bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3196 
3197 		dst_release(dst);
3198 		kfree(tags);
3199 	}
3200 }
3201 
3202 static int bond_confirm_addr6(struct net_device *dev,
3203 			      struct netdev_nested_priv *priv)
3204 {
3205 	struct in6_addr *addr = (struct in6_addr *)priv->data;
3206 
3207 	return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
3208 }
3209 
3210 static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
3211 {
3212 	struct netdev_nested_priv priv = {
3213 		.data = addr,
3214 	};
3215 	int ret = false;
3216 
3217 	if (bond_confirm_addr6(bond->dev, &priv))
3218 		return true;
3219 
3220 	rcu_read_lock();
3221 	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
3222 		ret = true;
3223 	rcu_read_unlock();
3224 
3225 	return ret;
3226 }
3227 
3228 static void bond_validate_na(struct bonding *bond, struct slave *slave,
3229 			     struct in6_addr *saddr, struct in6_addr *daddr)
3230 {
3231 	int i;
3232 
3233 	/* Ignore NAs that:
3234 	 * 1. Source address is unspecified address.
3235 	 * 2. Dest address is neither all-nodes multicast address nor
3236 	 *    exist on bond interface.
3237 	 */
3238 	if (ipv6_addr_any(saddr) ||
3239 	    (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
3240 	     !bond_has_this_ip6(bond, daddr))) {
3241 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
3242 			  __func__, saddr, daddr);
3243 		return;
3244 	}
3245 
3246 	i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
3247 	if (i == -1) {
3248 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
3249 			  __func__, saddr);
3250 		return;
3251 	}
3252 	slave->last_rx = jiffies;
3253 	slave->target_last_arp_rx[i] = jiffies;
3254 }
3255 
3256 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
3257 		       struct slave *slave)
3258 {
3259 	struct slave *curr_active_slave, *curr_arp_slave;
3260 	struct in6_addr *saddr, *daddr;
3261 	struct {
3262 		struct ipv6hdr ip6;
3263 		struct icmp6hdr icmp6;
3264 	} *combined, _combined;
3265 
3266 	if (skb->pkt_type == PACKET_OTHERHOST ||
3267 	    skb->pkt_type == PACKET_LOOPBACK)
3268 		goto out;
3269 
3270 	combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
3271 	if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
3272 	    combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
3273 		goto out;
3274 
3275 	saddr = &combined->ip6.saddr;
3276 	daddr = &combined->ip6.daddr;
3277 
3278 	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
3279 		  __func__, slave->dev->name, bond_slave_state(slave),
3280 		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3281 		  saddr, daddr);
3282 
3283 	curr_active_slave = rcu_dereference(bond->curr_active_slave);
3284 	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3285 
3286 	/* We 'trust' the received ARP enough to validate it if:
3287 	 * see bond_arp_rcv().
3288 	 */
3289 	if (bond_is_active_slave(slave))
3290 		bond_validate_na(bond, slave, saddr, daddr);
3291 	else if (curr_active_slave &&
3292 		 time_after(slave_last_rx(bond, curr_active_slave),
3293 			    curr_active_slave->last_link_up))
3294 		bond_validate_na(bond, slave, saddr, daddr);
3295 	else if (curr_arp_slave &&
3296 		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3297 		bond_validate_na(bond, slave, saddr, daddr);
3298 
3299 out:
3300 	return RX_HANDLER_ANOTHER;
3301 }
3302 #endif
3303 
3304 int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
3305 		      struct slave *slave)
3306 {
3307 #if IS_ENABLED(CONFIG_IPV6)
3308 	bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
3309 #endif
3310 	bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
3311 
3312 	slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
3313 		  __func__, skb->dev->name);
3314 
3315 	/* Use arp validate logic for both ARP and NS */
3316 	if (!slave_do_arp_validate(bond, slave)) {
3317 		if ((slave_do_arp_validate_only(bond) && is_arp) ||
3318 #if IS_ENABLED(CONFIG_IPV6)
3319 		    (slave_do_arp_validate_only(bond) && is_ipv6) ||
3320 #endif
3321 		    !slave_do_arp_validate_only(bond))
3322 			slave->last_rx = jiffies;
3323 		return RX_HANDLER_ANOTHER;
3324 	} else if (is_arp) {
3325 		return bond_arp_rcv(skb, bond, slave);
3326 #if IS_ENABLED(CONFIG_IPV6)
3327 	} else if (is_ipv6) {
3328 		return bond_na_rcv(skb, bond, slave);
3329 #endif
3330 	} else {
3331 		return RX_HANDLER_ANOTHER;
3332 	}
3333 }
3334 
3335 static void bond_send_validate(struct bonding *bond, struct slave *slave)
3336 {
3337 	bond_arp_send_all(bond, slave);
3338 #if IS_ENABLED(CONFIG_IPV6)
3339 	bond_ns_send_all(bond, slave);
3340 #endif
3341 }
3342 
3343 /* function to verify if we're in the arp_interval timeslice, returns true if
3344  * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
3345  * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
3346  */
3347 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
3348 				  int mod)
3349 {
3350 	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3351 
3352 	return time_in_range(jiffies,
3353 			     last_act - delta_in_ticks,
3354 			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
3355 }
3356 
3357 /* This function is called regularly to monitor each slave's link
3358  * ensuring that traffic is being sent and received when arp monitoring
3359  * is used in load-balancing mode. if the adapter has been dormant, then an
3360  * arp is transmitted to generate traffic. see activebackup_arp_monitor for
3361  * arp monitoring in active backup mode.
3362  */
3363 static void bond_loadbalance_arp_mon(struct bonding *bond)
3364 {
3365 	struct slave *slave, *oldcurrent;
3366 	struct list_head *iter;
3367 	int do_failover = 0, slave_state_changed = 0;
3368 
3369 	if (!bond_has_slaves(bond))
3370 		goto re_arm;
3371 
3372 	rcu_read_lock();
3373 
3374 	oldcurrent = rcu_dereference(bond->curr_active_slave);
3375 	/* see if any of the previous devices are up now (i.e. they have
3376 	 * xmt and rcv traffic). the curr_active_slave does not come into
3377 	 * the picture unless it is null. also, slave->last_link_up is not
3378 	 * needed here because we send an arp on each slave and give a slave
3379 	 * as long as it needs to get the tx/rx within the delta.
3380 	 * TODO: what about up/down delay in arp mode? it wasn't here before
3381 	 *       so it can wait
3382 	 */
3383 	bond_for_each_slave_rcu(bond, slave, iter) {
3384 		unsigned long last_tx = slave_last_tx(slave);
3385 
3386 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3387 
3388 		if (slave->link != BOND_LINK_UP) {
3389 			if (bond_time_in_interval(bond, last_tx, 1) &&
3390 			    bond_time_in_interval(bond, slave->last_rx, 1)) {
3391 
3392 				bond_propose_link_state(slave, BOND_LINK_UP);
3393 				slave_state_changed = 1;
3394 
3395 				/* primary_slave has no meaning in round-robin
3396 				 * mode. the window of a slave being up and
3397 				 * curr_active_slave being null after enslaving
3398 				 * is closed.
3399 				 */
3400 				if (!oldcurrent) {
3401 					slave_info(bond->dev, slave->dev, "link status definitely up\n");
3402 					do_failover = 1;
3403 				} else {
3404 					slave_info(bond->dev, slave->dev, "interface is now up\n");
3405 				}
3406 			}
3407 		} else {
3408 			/* slave->link == BOND_LINK_UP */
3409 
3410 			/* not all switches will respond to an arp request
3411 			 * when the source ip is 0, so don't take the link down
3412 			 * if we don't know our ip yet
3413 			 */
3414 			if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3415 			    !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
3416 
3417 				bond_propose_link_state(slave, BOND_LINK_DOWN);
3418 				slave_state_changed = 1;
3419 
3420 				if (slave->link_failure_count < UINT_MAX)
3421 					slave->link_failure_count++;
3422 
3423 				slave_info(bond->dev, slave->dev, "interface is now down\n");
3424 
3425 				if (slave == oldcurrent)
3426 					do_failover = 1;
3427 			}
3428 		}
3429 
3430 		/* note: if switch is in round-robin mode, all links
3431 		 * must tx arp to ensure all links rx an arp - otherwise
3432 		 * links may oscillate or not come up at all; if switch is
3433 		 * in something like xor mode, there is nothing we can
3434 		 * do - all replies will be rx'ed on same link causing slaves
3435 		 * to be unstable during low/no traffic periods
3436 		 */
3437 		if (bond_slave_is_up(slave))
3438 			bond_send_validate(bond, slave);
3439 	}
3440 
3441 	rcu_read_unlock();
3442 
3443 	if (do_failover || slave_state_changed) {
3444 		if (!rtnl_trylock())
3445 			goto re_arm;
3446 
3447 		bond_for_each_slave(bond, slave, iter) {
3448 			if (slave->link_new_state != BOND_LINK_NOCHANGE)
3449 				slave->link = slave->link_new_state;
3450 		}
3451 
3452 		if (slave_state_changed) {
3453 			bond_slave_state_change(bond);
3454 			if (BOND_MODE(bond) == BOND_MODE_XOR)
3455 				bond_update_slave_arr(bond, NULL);
3456 		}
3457 		if (do_failover) {
3458 			block_netpoll_tx();
3459 			bond_select_active_slave(bond);
3460 			unblock_netpoll_tx();
3461 		}
3462 		rtnl_unlock();
3463 	}
3464 
3465 re_arm:
3466 	if (bond->params.arp_interval)
3467 		queue_delayed_work(bond->wq, &bond->arp_work,
3468 				   msecs_to_jiffies(bond->params.arp_interval));
3469 }
3470 
3471 /* Called to inspect slaves for active-backup mode ARP monitor link state
3472  * changes.  Sets proposed link state in slaves to specify what action
3473  * should take place for the slave.  Returns 0 if no changes are found, >0
3474  * if changes to link states must be committed.
3475  *
3476  * Called with rcu_read_lock held.
3477  */
3478 static int bond_ab_arp_inspect(struct bonding *bond)
3479 {
3480 	unsigned long last_tx, last_rx;
3481 	struct list_head *iter;
3482 	struct slave *slave;
3483 	int commit = 0;
3484 
3485 	bond_for_each_slave_rcu(bond, slave, iter) {
3486 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3487 		last_rx = slave_last_rx(bond, slave);
3488 
3489 		if (slave->link != BOND_LINK_UP) {
3490 			if (bond_time_in_interval(bond, last_rx, 1)) {
3491 				bond_propose_link_state(slave, BOND_LINK_UP);
3492 				commit++;
3493 			} else if (slave->link == BOND_LINK_BACK) {
3494 				bond_propose_link_state(slave, BOND_LINK_FAIL);
3495 				commit++;
3496 			}
3497 			continue;
3498 		}
3499 
3500 		/* Give slaves 2*delta after being enslaved or made
3501 		 * active.  This avoids bouncing, as the last receive
3502 		 * times need a full ARP monitor cycle to be updated.
3503 		 */
3504 		if (bond_time_in_interval(bond, slave->last_link_up, 2))
3505 			continue;
3506 
3507 		/* Backup slave is down if:
3508 		 * - No current_arp_slave AND
3509 		 * - more than (missed_max+1)*delta since last receive AND
3510 		 * - the bond has an IP address
3511 		 *
3512 		 * Note: a non-null current_arp_slave indicates
3513 		 * the curr_active_slave went down and we are
3514 		 * searching for a new one; under this condition
3515 		 * we only take the curr_active_slave down - this
3516 		 * gives each slave a chance to tx/rx traffic
3517 		 * before being taken out
3518 		 */
3519 		if (!bond_is_active_slave(slave) &&
3520 		    !rcu_access_pointer(bond->current_arp_slave) &&
3521 		    !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
3522 			bond_propose_link_state(slave, BOND_LINK_DOWN);
3523 			commit++;
3524 		}
3525 
3526 		/* Active slave is down if:
3527 		 * - more than missed_max*delta since transmitting OR
3528 		 * - (more than missed_max*delta since receive AND
3529 		 *    the bond has an IP address)
3530 		 */
3531 		last_tx = slave_last_tx(slave);
3532 		if (bond_is_active_slave(slave) &&
3533 		    (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3534 		     !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
3535 			bond_propose_link_state(slave, BOND_LINK_DOWN);
3536 			commit++;
3537 		}
3538 	}
3539 
3540 	return commit;
3541 }
3542 
3543 /* Called to commit link state changes noted by inspection step of
3544  * active-backup mode ARP monitor.
3545  *
3546  * Called with RTNL hold.
3547  */
3548 static void bond_ab_arp_commit(struct bonding *bond)
3549 {
3550 	bool do_failover = false;
3551 	struct list_head *iter;
3552 	unsigned long last_tx;
3553 	struct slave *slave;
3554 
3555 	bond_for_each_slave(bond, slave, iter) {
3556 		switch (slave->link_new_state) {
3557 		case BOND_LINK_NOCHANGE:
3558 			continue;
3559 
3560 		case BOND_LINK_UP:
3561 			last_tx = slave_last_tx(slave);
3562 			if (rtnl_dereference(bond->curr_active_slave) != slave ||
3563 			    (!rtnl_dereference(bond->curr_active_slave) &&
3564 			     bond_time_in_interval(bond, last_tx, 1))) {
3565 				struct slave *current_arp_slave;
3566 
3567 				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3568 				bond_set_slave_link_state(slave, BOND_LINK_UP,
3569 							  BOND_SLAVE_NOTIFY_NOW);
3570 				if (current_arp_slave) {
3571 					bond_set_slave_inactive_flags(
3572 						current_arp_slave,
3573 						BOND_SLAVE_NOTIFY_NOW);
3574 					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3575 				}
3576 
3577 				slave_info(bond->dev, slave->dev, "link status definitely up\n");
3578 
3579 				if (!rtnl_dereference(bond->curr_active_slave) ||
3580 				    slave == rtnl_dereference(bond->primary_slave) ||
3581 				    slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
3582 					do_failover = true;
3583 
3584 			}
3585 
3586 			continue;
3587 
3588 		case BOND_LINK_DOWN:
3589 			if (slave->link_failure_count < UINT_MAX)
3590 				slave->link_failure_count++;
3591 
3592 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3593 						  BOND_SLAVE_NOTIFY_NOW);
3594 			bond_set_slave_inactive_flags(slave,
3595 						      BOND_SLAVE_NOTIFY_NOW);
3596 
3597 			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3598 
3599 			if (slave == rtnl_dereference(bond->curr_active_slave)) {
3600 				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3601 				do_failover = true;
3602 			}
3603 
3604 			continue;
3605 
3606 		case BOND_LINK_FAIL:
3607 			bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3608 						  BOND_SLAVE_NOTIFY_NOW);
3609 			bond_set_slave_inactive_flags(slave,
3610 						      BOND_SLAVE_NOTIFY_NOW);
3611 
3612 			/* A slave has just been enslaved and has become
3613 			 * the current active slave.
3614 			 */
3615 			if (rtnl_dereference(bond->curr_active_slave))
3616 				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3617 			continue;
3618 
3619 		default:
3620 			slave_err(bond->dev, slave->dev,
3621 				  "impossible: link_new_state %d on slave\n",
3622 				  slave->link_new_state);
3623 			continue;
3624 		}
3625 	}
3626 
3627 	if (do_failover) {
3628 		block_netpoll_tx();
3629 		bond_select_active_slave(bond);
3630 		unblock_netpoll_tx();
3631 	}
3632 
3633 	bond_set_carrier(bond);
3634 }
3635 
3636 /* Send ARP probes for active-backup mode ARP monitor.
3637  *
3638  * Called with rcu_read_lock held.
3639  */
3640 static bool bond_ab_arp_probe(struct bonding *bond)
3641 {
3642 	struct slave *slave, *before = NULL, *new_slave = NULL,
3643 		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3644 		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3645 	struct list_head *iter;
3646 	bool found = false;
3647 	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3648 
3649 	if (curr_arp_slave && curr_active_slave)
3650 		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3651 			    curr_arp_slave->dev->name,
3652 			    curr_active_slave->dev->name);
3653 
3654 	if (curr_active_slave) {
3655 		bond_send_validate(bond, curr_active_slave);
3656 		return should_notify_rtnl;
3657 	}
3658 
3659 	/* if we don't have a curr_active_slave, search for the next available
3660 	 * backup slave from the current_arp_slave and make it the candidate
3661 	 * for becoming the curr_active_slave
3662 	 */
3663 
3664 	if (!curr_arp_slave) {
3665 		curr_arp_slave = bond_first_slave_rcu(bond);
3666 		if (!curr_arp_slave)
3667 			return should_notify_rtnl;
3668 	}
3669 
3670 	bond_for_each_slave_rcu(bond, slave, iter) {
3671 		if (!found && !before && bond_slave_is_up(slave))
3672 			before = slave;
3673 
3674 		if (found && !new_slave && bond_slave_is_up(slave))
3675 			new_slave = slave;
3676 		/* if the link state is up at this point, we
3677 		 * mark it down - this can happen if we have
3678 		 * simultaneous link failures and
3679 		 * reselect_active_interface doesn't make this
3680 		 * one the current slave so it is still marked
3681 		 * up when it is actually down
3682 		 */
3683 		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3684 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3685 						  BOND_SLAVE_NOTIFY_LATER);
3686 			if (slave->link_failure_count < UINT_MAX)
3687 				slave->link_failure_count++;
3688 
3689 			bond_set_slave_inactive_flags(slave,
3690 						      BOND_SLAVE_NOTIFY_LATER);
3691 
3692 			slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3693 		}
3694 		if (slave == curr_arp_slave)
3695 			found = true;
3696 	}
3697 
3698 	if (!new_slave && before)
3699 		new_slave = before;
3700 
3701 	if (!new_slave)
3702 		goto check_state;
3703 
3704 	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3705 				  BOND_SLAVE_NOTIFY_LATER);
3706 	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3707 	bond_send_validate(bond, new_slave);
3708 	new_slave->last_link_up = jiffies;
3709 	rcu_assign_pointer(bond->current_arp_slave, new_slave);
3710 
3711 check_state:
3712 	bond_for_each_slave_rcu(bond, slave, iter) {
3713 		if (slave->should_notify || slave->should_notify_link) {
3714 			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3715 			break;
3716 		}
3717 	}
3718 	return should_notify_rtnl;
3719 }
3720 
3721 static void bond_activebackup_arp_mon(struct bonding *bond)
3722 {
3723 	bool should_notify_peers = false;
3724 	bool should_notify_rtnl = false;
3725 	int delta_in_ticks;
3726 
3727 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3728 
3729 	if (!bond_has_slaves(bond))
3730 		goto re_arm;
3731 
3732 	rcu_read_lock();
3733 
3734 	should_notify_peers = bond_should_notify_peers(bond);
3735 
3736 	if (bond_ab_arp_inspect(bond)) {
3737 		rcu_read_unlock();
3738 
3739 		/* Race avoidance with bond_close flush of workqueue */
3740 		if (!rtnl_trylock()) {
3741 			delta_in_ticks = 1;
3742 			should_notify_peers = false;
3743 			goto re_arm;
3744 		}
3745 
3746 		bond_ab_arp_commit(bond);
3747 
3748 		rtnl_unlock();
3749 		rcu_read_lock();
3750 	}
3751 
3752 	should_notify_rtnl = bond_ab_arp_probe(bond);
3753 	rcu_read_unlock();
3754 
3755 re_arm:
3756 	if (bond->params.arp_interval)
3757 		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3758 
3759 	if (should_notify_peers || should_notify_rtnl) {
3760 		if (!rtnl_trylock())
3761 			return;
3762 
3763 		if (should_notify_peers) {
3764 			bond->send_peer_notif--;
3765 			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3766 						 bond->dev);
3767 		}
3768 		if (should_notify_rtnl) {
3769 			bond_slave_state_notify(bond);
3770 			bond_slave_link_notify(bond);
3771 		}
3772 
3773 		rtnl_unlock();
3774 	}
3775 }
3776 
3777 static void bond_arp_monitor(struct work_struct *work)
3778 {
3779 	struct bonding *bond = container_of(work, struct bonding,
3780 					    arp_work.work);
3781 
3782 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3783 		bond_activebackup_arp_mon(bond);
3784 	else
3785 		bond_loadbalance_arp_mon(bond);
3786 }
3787 
3788 /*-------------------------- netdev event handling --------------------------*/
3789 
3790 /* Change device name */
3791 static int bond_event_changename(struct bonding *bond)
3792 {
3793 	bond_remove_proc_entry(bond);
3794 	bond_create_proc_entry(bond);
3795 
3796 	bond_debug_reregister(bond);
3797 
3798 	return NOTIFY_DONE;
3799 }
3800 
3801 static int bond_master_netdev_event(unsigned long event,
3802 				    struct net_device *bond_dev)
3803 {
3804 	struct bonding *event_bond = netdev_priv(bond_dev);
3805 
3806 	netdev_dbg(bond_dev, "%s called\n", __func__);
3807 
3808 	switch (event) {
3809 	case NETDEV_CHANGENAME:
3810 		return bond_event_changename(event_bond);
3811 	case NETDEV_UNREGISTER:
3812 		bond_remove_proc_entry(event_bond);
3813 #ifdef CONFIG_XFRM_OFFLOAD
3814 		xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
3815 #endif /* CONFIG_XFRM_OFFLOAD */
3816 		break;
3817 	case NETDEV_REGISTER:
3818 		bond_create_proc_entry(event_bond);
3819 		break;
3820 	default:
3821 		break;
3822 	}
3823 
3824 	return NOTIFY_DONE;
3825 }
3826 
3827 static int bond_slave_netdev_event(unsigned long event,
3828 				   struct net_device *slave_dev)
3829 {
3830 	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3831 	struct bonding *bond;
3832 	struct net_device *bond_dev;
3833 
3834 	/* A netdev event can be generated while enslaving a device
3835 	 * before netdev_rx_handler_register is called in which case
3836 	 * slave will be NULL
3837 	 */
3838 	if (!slave) {
3839 		netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3840 		return NOTIFY_DONE;
3841 	}
3842 
3843 	bond_dev = slave->bond->dev;
3844 	bond = slave->bond;
3845 	primary = rtnl_dereference(bond->primary_slave);
3846 
3847 	slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3848 
3849 	switch (event) {
3850 	case NETDEV_UNREGISTER:
3851 		if (bond_dev->type != ARPHRD_ETHER)
3852 			bond_release_and_destroy(bond_dev, slave_dev);
3853 		else
3854 			__bond_release_one(bond_dev, slave_dev, false, true);
3855 		break;
3856 	case NETDEV_UP:
3857 	case NETDEV_CHANGE:
3858 		/* For 802.3ad mode only:
3859 		 * Getting invalid Speed/Duplex values here will put slave
3860 		 * in weird state. Mark it as link-fail if the link was
3861 		 * previously up or link-down if it hasn't yet come up, and
3862 		 * let link-monitoring (miimon) set it right when correct
3863 		 * speeds/duplex are available.
3864 		 */
3865 		if (bond_update_speed_duplex(slave) &&
3866 		    BOND_MODE(bond) == BOND_MODE_8023AD) {
3867 			if (slave->last_link_up)
3868 				slave->link = BOND_LINK_FAIL;
3869 			else
3870 				slave->link = BOND_LINK_DOWN;
3871 		}
3872 
3873 		if (BOND_MODE(bond) == BOND_MODE_8023AD)
3874 			bond_3ad_adapter_speed_duplex_changed(slave);
3875 		fallthrough;
3876 	case NETDEV_DOWN:
3877 		/* Refresh slave-array if applicable!
3878 		 * If the setup does not use miimon or arpmon (mode-specific!),
3879 		 * then these events will not cause the slave-array to be
3880 		 * refreshed. This will cause xmit to use a slave that is not
3881 		 * usable. Avoid such situation by refeshing the array at these
3882 		 * events. If these (miimon/arpmon) parameters are configured
3883 		 * then array gets refreshed twice and that should be fine!
3884 		 */
3885 		if (bond_mode_can_use_xmit_hash(bond))
3886 			bond_update_slave_arr(bond, NULL);
3887 		break;
3888 	case NETDEV_CHANGEMTU:
3889 		/* TODO: Should slaves be allowed to
3890 		 * independently alter their MTU?  For
3891 		 * an active-backup bond, slaves need
3892 		 * not be the same type of device, so
3893 		 * MTUs may vary.  For other modes,
3894 		 * slaves arguably should have the
3895 		 * same MTUs. To do this, we'd need to
3896 		 * take over the slave's change_mtu
3897 		 * function for the duration of their
3898 		 * servitude.
3899 		 */
3900 		break;
3901 	case NETDEV_CHANGENAME:
3902 		/* we don't care if we don't have primary set */
3903 		if (!bond_uses_primary(bond) ||
3904 		    !bond->params.primary[0])
3905 			break;
3906 
3907 		if (slave == primary) {
3908 			/* slave's name changed - he's no longer primary */
3909 			RCU_INIT_POINTER(bond->primary_slave, NULL);
3910 		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
3911 			/* we have a new primary slave */
3912 			rcu_assign_pointer(bond->primary_slave, slave);
3913 		} else { /* we didn't change primary - exit */
3914 			break;
3915 		}
3916 
3917 		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3918 			    primary ? slave_dev->name : "none");
3919 
3920 		block_netpoll_tx();
3921 		bond_select_active_slave(bond);
3922 		unblock_netpoll_tx();
3923 		break;
3924 	case NETDEV_FEAT_CHANGE:
3925 		bond_compute_features(bond);
3926 		break;
3927 	case NETDEV_RESEND_IGMP:
3928 		/* Propagate to master device */
3929 		call_netdevice_notifiers(event, slave->bond->dev);
3930 		break;
3931 	default:
3932 		break;
3933 	}
3934 
3935 	return NOTIFY_DONE;
3936 }
3937 
3938 /* bond_netdev_event: handle netdev notifier chain events.
3939  *
3940  * This function receives events for the netdev chain.  The caller (an
3941  * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3942  * locks for us to safely manipulate the slave devices (RTNL lock,
3943  * dev_probe_lock).
3944  */
3945 static int bond_netdev_event(struct notifier_block *this,
3946 			     unsigned long event, void *ptr)
3947 {
3948 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3949 
3950 	netdev_dbg(event_dev, "%s received %s\n",
3951 		   __func__, netdev_cmd_to_name(event));
3952 
3953 	if (!(event_dev->priv_flags & IFF_BONDING))
3954 		return NOTIFY_DONE;
3955 
3956 	if (event_dev->flags & IFF_MASTER) {
3957 		int ret;
3958 
3959 		ret = bond_master_netdev_event(event, event_dev);
3960 		if (ret != NOTIFY_DONE)
3961 			return ret;
3962 	}
3963 
3964 	if (event_dev->flags & IFF_SLAVE)
3965 		return bond_slave_netdev_event(event, event_dev);
3966 
3967 	return NOTIFY_DONE;
3968 }
3969 
3970 static struct notifier_block bond_netdev_notifier = {
3971 	.notifier_call = bond_netdev_event,
3972 };
3973 
3974 /*---------------------------- Hashing Policies -----------------------------*/
3975 
3976 /* Helper to access data in a packet, with or without a backing skb.
3977  * If skb is given the data is linearized if necessary via pskb_may_pull.
3978  */
3979 static inline const void *bond_pull_data(struct sk_buff *skb,
3980 					 const void *data, int hlen, int n)
3981 {
3982 	if (likely(n <= hlen))
3983 		return data;
3984 	else if (skb && likely(pskb_may_pull(skb, n)))
3985 		return skb->head;
3986 
3987 	return NULL;
3988 }
3989 
3990 /* L2 hash helper */
3991 static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
3992 {
3993 	struct ethhdr *ep;
3994 
3995 	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
3996 	if (!data)
3997 		return 0;
3998 
3999 	ep = (struct ethhdr *)(data + mhoff);
4000 	return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
4001 }
4002 
4003 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
4004 			 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
4005 {
4006 	const struct ipv6hdr *iph6;
4007 	const struct iphdr *iph;
4008 
4009 	if (l2_proto == htons(ETH_P_IP)) {
4010 		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
4011 		if (!data)
4012 			return false;
4013 
4014 		iph = (const struct iphdr *)(data + *nhoff);
4015 		iph_to_flow_copy_v4addrs(fk, iph);
4016 		*nhoff += iph->ihl << 2;
4017 		if (!ip_is_fragment(iph))
4018 			*ip_proto = iph->protocol;
4019 	} else if (l2_proto == htons(ETH_P_IPV6)) {
4020 		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
4021 		if (!data)
4022 			return false;
4023 
4024 		iph6 = (const struct ipv6hdr *)(data + *nhoff);
4025 		iph_to_flow_copy_v6addrs(fk, iph6);
4026 		*nhoff += sizeof(*iph6);
4027 		*ip_proto = iph6->nexthdr;
4028 	} else {
4029 		return false;
4030 	}
4031 
4032 	if (l34 && *ip_proto >= 0)
4033 		fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
4034 
4035 	return true;
4036 }
4037 
4038 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4039 {
4040 	u32 srcmac_vendor = 0, srcmac_dev = 0;
4041 	struct ethhdr *mac_hdr;
4042 	u16 vlan = 0;
4043 	int i;
4044 
4045 	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4046 	if (!data)
4047 		return 0;
4048 	mac_hdr = (struct ethhdr *)(data + mhoff);
4049 
4050 	for (i = 0; i < 3; i++)
4051 		srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
4052 
4053 	for (i = 3; i < ETH_ALEN; i++)
4054 		srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
4055 
4056 	if (skb && skb_vlan_tag_present(skb))
4057 		vlan = skb_vlan_tag_get(skb);
4058 
4059 	return vlan ^ srcmac_vendor ^ srcmac_dev;
4060 }
4061 
4062 /* Extract the appropriate headers based on bond's xmit policy */
4063 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
4064 			      __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
4065 {
4066 	bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
4067 	int ip_proto = -1;
4068 
4069 	switch (bond->params.xmit_policy) {
4070 	case BOND_XMIT_POLICY_ENCAP23:
4071 	case BOND_XMIT_POLICY_ENCAP34:
4072 		memset(fk, 0, sizeof(*fk));
4073 		return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
4074 					  fk, data, l2_proto, nhoff, hlen, 0);
4075 	default:
4076 		break;
4077 	}
4078 
4079 	fk->ports.ports = 0;
4080 	memset(&fk->icmp, 0, sizeof(fk->icmp));
4081 	if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
4082 		return false;
4083 
4084 	/* ICMP error packets contains at least 8 bytes of the header
4085 	 * of the packet which generated the error. Use this information
4086 	 * to correlate ICMP error packets within the same flow which
4087 	 * generated the error.
4088 	 */
4089 	if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
4090 		skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
4091 		if (ip_proto == IPPROTO_ICMP) {
4092 			if (!icmp_is_err(fk->icmp.type))
4093 				return true;
4094 
4095 			nhoff += sizeof(struct icmphdr);
4096 		} else if (ip_proto == IPPROTO_ICMPV6) {
4097 			if (!icmpv6_is_err(fk->icmp.type))
4098 				return true;
4099 
4100 			nhoff += sizeof(struct icmp6hdr);
4101 		}
4102 		return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
4103 	}
4104 
4105 	return true;
4106 }
4107 
4108 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
4109 {
4110 	hash ^= (__force u32)flow_get_u32_dst(flow) ^
4111 		(__force u32)flow_get_u32_src(flow);
4112 	hash ^= (hash >> 16);
4113 	hash ^= (hash >> 8);
4114 
4115 	/* discard lowest hash bit to deal with the common even ports pattern */
4116 	if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
4117 		xmit_policy == BOND_XMIT_POLICY_ENCAP34)
4118 		return hash >> 1;
4119 
4120 	return hash;
4121 }
4122 
4123 /* Generate hash based on xmit policy. If @skb is given it is used to linearize
4124  * the data as required, but this function can be used without it if the data is
4125  * known to be linear (e.g. with xdp_buff).
4126  */
4127 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
4128 			    __be16 l2_proto, int mhoff, int nhoff, int hlen)
4129 {
4130 	struct flow_keys flow;
4131 	u32 hash;
4132 
4133 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
4134 		return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
4135 
4136 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
4137 	    !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
4138 		return bond_eth_hash(skb, data, mhoff, hlen);
4139 
4140 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
4141 	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
4142 		hash = bond_eth_hash(skb, data, mhoff, hlen);
4143 	} else {
4144 		if (flow.icmp.id)
4145 			memcpy(&hash, &flow.icmp, sizeof(hash));
4146 		else
4147 			memcpy(&hash, &flow.ports.ports, sizeof(hash));
4148 	}
4149 
4150 	return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
4151 }
4152 
4153 /**
4154  * bond_xmit_hash - generate a hash value based on the xmit policy
4155  * @bond: bonding device
4156  * @skb: buffer to use for headers
4157  *
4158  * This function will extract the necessary headers from the skb buffer and use
4159  * them to generate a hash based on the xmit_policy set in the bonding device
4160  */
4161 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
4162 {
4163 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
4164 	    skb->l4_hash)
4165 		return skb->hash;
4166 
4167 	return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
4168 				skb_mac_offset(skb), skb_network_offset(skb),
4169 				skb_headlen(skb));
4170 }
4171 
4172 /**
4173  * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
4174  * @bond: bonding device
4175  * @xdp: buffer to use for headers
4176  *
4177  * The XDP variant of bond_xmit_hash.
4178  */
4179 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
4180 {
4181 	struct ethhdr *eth;
4182 
4183 	if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
4184 		return 0;
4185 
4186 	eth = (struct ethhdr *)xdp->data;
4187 
4188 	return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
4189 				sizeof(struct ethhdr), xdp->data_end - xdp->data);
4190 }
4191 
4192 /*-------------------------- Device entry points ----------------------------*/
4193 
4194 void bond_work_init_all(struct bonding *bond)
4195 {
4196 	INIT_DELAYED_WORK(&bond->mcast_work,
4197 			  bond_resend_igmp_join_requests_delayed);
4198 	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
4199 	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
4200 	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
4201 	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
4202 	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
4203 }
4204 
4205 static void bond_work_cancel_all(struct bonding *bond)
4206 {
4207 	cancel_delayed_work_sync(&bond->mii_work);
4208 	cancel_delayed_work_sync(&bond->arp_work);
4209 	cancel_delayed_work_sync(&bond->alb_work);
4210 	cancel_delayed_work_sync(&bond->ad_work);
4211 	cancel_delayed_work_sync(&bond->mcast_work);
4212 	cancel_delayed_work_sync(&bond->slave_arr_work);
4213 }
4214 
4215 static int bond_open(struct net_device *bond_dev)
4216 {
4217 	struct bonding *bond = netdev_priv(bond_dev);
4218 	struct list_head *iter;
4219 	struct slave *slave;
4220 
4221 	if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
4222 		bond->rr_tx_counter = alloc_percpu(u32);
4223 		if (!bond->rr_tx_counter)
4224 			return -ENOMEM;
4225 	}
4226 
4227 	/* reset slave->backup and slave->inactive */
4228 	if (bond_has_slaves(bond)) {
4229 		bond_for_each_slave(bond, slave, iter) {
4230 			if (bond_uses_primary(bond) &&
4231 			    slave != rcu_access_pointer(bond->curr_active_slave)) {
4232 				bond_set_slave_inactive_flags(slave,
4233 							      BOND_SLAVE_NOTIFY_NOW);
4234 			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
4235 				bond_set_slave_active_flags(slave,
4236 							    BOND_SLAVE_NOTIFY_NOW);
4237 			}
4238 		}
4239 	}
4240 
4241 	if (bond_is_lb(bond)) {
4242 		/* bond_alb_initialize must be called before the timer
4243 		 * is started.
4244 		 */
4245 		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
4246 			return -ENOMEM;
4247 		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
4248 			queue_delayed_work(bond->wq, &bond->alb_work, 0);
4249 	}
4250 
4251 	if (bond->params.miimon)  /* link check interval, in milliseconds. */
4252 		queue_delayed_work(bond->wq, &bond->mii_work, 0);
4253 
4254 	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
4255 		queue_delayed_work(bond->wq, &bond->arp_work, 0);
4256 		bond->recv_probe = bond_rcv_validate;
4257 	}
4258 
4259 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4260 		queue_delayed_work(bond->wq, &bond->ad_work, 0);
4261 		/* register to receive LACPDUs */
4262 		bond->recv_probe = bond_3ad_lacpdu_recv;
4263 		bond_3ad_initiate_agg_selection(bond, 1);
4264 
4265 		bond_for_each_slave(bond, slave, iter)
4266 			dev_mc_add(slave->dev, lacpdu_mcast_addr);
4267 	}
4268 
4269 	if (bond_mode_can_use_xmit_hash(bond))
4270 		bond_update_slave_arr(bond, NULL);
4271 
4272 	return 0;
4273 }
4274 
4275 static int bond_close(struct net_device *bond_dev)
4276 {
4277 	struct bonding *bond = netdev_priv(bond_dev);
4278 	struct slave *slave;
4279 
4280 	bond_work_cancel_all(bond);
4281 	bond->send_peer_notif = 0;
4282 	if (bond_is_lb(bond))
4283 		bond_alb_deinitialize(bond);
4284 	bond->recv_probe = NULL;
4285 
4286 	if (bond_uses_primary(bond)) {
4287 		rcu_read_lock();
4288 		slave = rcu_dereference(bond->curr_active_slave);
4289 		if (slave)
4290 			bond_hw_addr_flush(bond_dev, slave->dev);
4291 		rcu_read_unlock();
4292 	} else {
4293 		struct list_head *iter;
4294 
4295 		bond_for_each_slave(bond, slave, iter)
4296 			bond_hw_addr_flush(bond_dev, slave->dev);
4297 	}
4298 
4299 	return 0;
4300 }
4301 
4302 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but
4303  * that some drivers can provide 32bit values only.
4304  */
4305 static void bond_fold_stats(struct rtnl_link_stats64 *_res,
4306 			    const struct rtnl_link_stats64 *_new,
4307 			    const struct rtnl_link_stats64 *_old)
4308 {
4309 	const u64 *new = (const u64 *)_new;
4310 	const u64 *old = (const u64 *)_old;
4311 	u64 *res = (u64 *)_res;
4312 	int i;
4313 
4314 	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
4315 		u64 nv = new[i];
4316 		u64 ov = old[i];
4317 		s64 delta = nv - ov;
4318 
4319 		/* detects if this particular field is 32bit only */
4320 		if (((nv | ov) >> 32) == 0)
4321 			delta = (s64)(s32)((u32)nv - (u32)ov);
4322 
4323 		/* filter anomalies, some drivers reset their stats
4324 		 * at down/up events.
4325 		 */
4326 		if (delta > 0)
4327 			res[i] += delta;
4328 	}
4329 }
4330 
4331 #ifdef CONFIG_LOCKDEP
4332 static int bond_get_lowest_level_rcu(struct net_device *dev)
4333 {
4334 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
4335 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
4336 	int cur = 0, max = 0;
4337 
4338 	now = dev;
4339 	iter = &dev->adj_list.lower;
4340 
4341 	while (1) {
4342 		next = NULL;
4343 		while (1) {
4344 			ldev = netdev_next_lower_dev_rcu(now, &iter);
4345 			if (!ldev)
4346 				break;
4347 
4348 			next = ldev;
4349 			niter = &ldev->adj_list.lower;
4350 			dev_stack[cur] = now;
4351 			iter_stack[cur++] = iter;
4352 			if (max <= cur)
4353 				max = cur;
4354 			break;
4355 		}
4356 
4357 		if (!next) {
4358 			if (!cur)
4359 				return max;
4360 			next = dev_stack[--cur];
4361 			niter = iter_stack[cur];
4362 		}
4363 
4364 		now = next;
4365 		iter = niter;
4366 	}
4367 
4368 	return max;
4369 }
4370 #endif
4371 
4372 static void bond_get_stats(struct net_device *bond_dev,
4373 			   struct rtnl_link_stats64 *stats)
4374 {
4375 	struct bonding *bond = netdev_priv(bond_dev);
4376 	struct rtnl_link_stats64 temp;
4377 	struct list_head *iter;
4378 	struct slave *slave;
4379 	int nest_level = 0;
4380 
4381 
4382 	rcu_read_lock();
4383 #ifdef CONFIG_LOCKDEP
4384 	nest_level = bond_get_lowest_level_rcu(bond_dev);
4385 #endif
4386 
4387 	spin_lock_nested(&bond->stats_lock, nest_level);
4388 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
4389 
4390 	bond_for_each_slave_rcu(bond, slave, iter) {
4391 		const struct rtnl_link_stats64 *new =
4392 			dev_get_stats(slave->dev, &temp);
4393 
4394 		bond_fold_stats(stats, new, &slave->slave_stats);
4395 
4396 		/* save off the slave stats for the next run */
4397 		memcpy(&slave->slave_stats, new, sizeof(*new));
4398 	}
4399 
4400 	memcpy(&bond->bond_stats, stats, sizeof(*stats));
4401 	spin_unlock(&bond->stats_lock);
4402 	rcu_read_unlock();
4403 }
4404 
4405 static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4406 {
4407 	struct bonding *bond = netdev_priv(bond_dev);
4408 	struct mii_ioctl_data *mii = NULL;
4409 	const struct net_device_ops *ops;
4410 	struct net_device *real_dev;
4411 	struct hwtstamp_config cfg;
4412 	struct ifreq ifrr;
4413 	int res = 0;
4414 
4415 	netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
4416 
4417 	switch (cmd) {
4418 	case SIOCGMIIPHY:
4419 		mii = if_mii(ifr);
4420 		if (!mii)
4421 			return -EINVAL;
4422 
4423 		mii->phy_id = 0;
4424 		fallthrough;
4425 	case SIOCGMIIREG:
4426 		/* We do this again just in case we were called by SIOCGMIIREG
4427 		 * instead of SIOCGMIIPHY.
4428 		 */
4429 		mii = if_mii(ifr);
4430 		if (!mii)
4431 			return -EINVAL;
4432 
4433 		if (mii->reg_num == 1) {
4434 			mii->val_out = 0;
4435 			if (netif_carrier_ok(bond->dev))
4436 				mii->val_out = BMSR_LSTATUS;
4437 		}
4438 
4439 		break;
4440 	case SIOCSHWTSTAMP:
4441 		if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4442 			return -EFAULT;
4443 
4444 		if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
4445 			return -EOPNOTSUPP;
4446 
4447 		fallthrough;
4448 	case SIOCGHWTSTAMP:
4449 		real_dev = bond_option_active_slave_get_rcu(bond);
4450 		if (!real_dev)
4451 			return -EOPNOTSUPP;
4452 
4453 		strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
4454 		ifrr.ifr_ifru = ifr->ifr_ifru;
4455 
4456 		ops = real_dev->netdev_ops;
4457 		if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
4458 			res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
4459 			if (res)
4460 				return res;
4461 
4462 			ifr->ifr_ifru = ifrr.ifr_ifru;
4463 			if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4464 				return -EFAULT;
4465 
4466 			/* Set the BOND_PHC_INDEX flag to notify user space */
4467 			cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
4468 
4469 			return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
4470 				-EFAULT : 0;
4471 		}
4472 		fallthrough;
4473 	default:
4474 		res = -EOPNOTSUPP;
4475 	}
4476 
4477 	return res;
4478 }
4479 
4480 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4481 {
4482 	struct bonding *bond = netdev_priv(bond_dev);
4483 	struct net_device *slave_dev = NULL;
4484 	struct ifbond k_binfo;
4485 	struct ifbond __user *u_binfo = NULL;
4486 	struct ifslave k_sinfo;
4487 	struct ifslave __user *u_sinfo = NULL;
4488 	struct bond_opt_value newval;
4489 	struct net *net;
4490 	int res = 0;
4491 
4492 	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
4493 
4494 	switch (cmd) {
4495 	case SIOCBONDINFOQUERY:
4496 		u_binfo = (struct ifbond __user *)ifr->ifr_data;
4497 
4498 		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
4499 			return -EFAULT;
4500 
4501 		bond_info_query(bond_dev, &k_binfo);
4502 		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
4503 			return -EFAULT;
4504 
4505 		return 0;
4506 	case SIOCBONDSLAVEINFOQUERY:
4507 		u_sinfo = (struct ifslave __user *)ifr->ifr_data;
4508 
4509 		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
4510 			return -EFAULT;
4511 
4512 		res = bond_slave_info_query(bond_dev, &k_sinfo);
4513 		if (res == 0 &&
4514 		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
4515 			return -EFAULT;
4516 
4517 		return res;
4518 	default:
4519 		break;
4520 	}
4521 
4522 	net = dev_net(bond_dev);
4523 
4524 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4525 		return -EPERM;
4526 
4527 	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
4528 
4529 	slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
4530 
4531 	if (!slave_dev)
4532 		return -ENODEV;
4533 
4534 	switch (cmd) {
4535 	case SIOCBONDENSLAVE:
4536 		res = bond_enslave(bond_dev, slave_dev, NULL);
4537 		break;
4538 	case SIOCBONDRELEASE:
4539 		res = bond_release(bond_dev, slave_dev);
4540 		break;
4541 	case SIOCBONDSETHWADDR:
4542 		res = bond_set_dev_addr(bond_dev, slave_dev);
4543 		break;
4544 	case SIOCBONDCHANGEACTIVE:
4545 		bond_opt_initstr(&newval, slave_dev->name);
4546 		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
4547 					    &newval);
4548 		break;
4549 	default:
4550 		res = -EOPNOTSUPP;
4551 	}
4552 
4553 	return res;
4554 }
4555 
4556 static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
4557 			       void __user *data, int cmd)
4558 {
4559 	struct ifreq ifrdata = { .ifr_data = data };
4560 
4561 	switch (cmd) {
4562 	case BOND_INFO_QUERY_OLD:
4563 		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
4564 	case BOND_SLAVE_INFO_QUERY_OLD:
4565 		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
4566 	case BOND_ENSLAVE_OLD:
4567 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
4568 	case BOND_RELEASE_OLD:
4569 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
4570 	case BOND_SETHWADDR_OLD:
4571 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
4572 	case BOND_CHANGE_ACTIVE_OLD:
4573 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
4574 	}
4575 
4576 	return -EOPNOTSUPP;
4577 }
4578 
4579 static void bond_change_rx_flags(struct net_device *bond_dev, int change)
4580 {
4581 	struct bonding *bond = netdev_priv(bond_dev);
4582 
4583 	if (change & IFF_PROMISC)
4584 		bond_set_promiscuity(bond,
4585 				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
4586 
4587 	if (change & IFF_ALLMULTI)
4588 		bond_set_allmulti(bond,
4589 				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
4590 }
4591 
4592 static void bond_set_rx_mode(struct net_device *bond_dev)
4593 {
4594 	struct bonding *bond = netdev_priv(bond_dev);
4595 	struct list_head *iter;
4596 	struct slave *slave;
4597 
4598 	rcu_read_lock();
4599 	if (bond_uses_primary(bond)) {
4600 		slave = rcu_dereference(bond->curr_active_slave);
4601 		if (slave) {
4602 			dev_uc_sync(slave->dev, bond_dev);
4603 			dev_mc_sync(slave->dev, bond_dev);
4604 		}
4605 	} else {
4606 		bond_for_each_slave_rcu(bond, slave, iter) {
4607 			dev_uc_sync_multiple(slave->dev, bond_dev);
4608 			dev_mc_sync_multiple(slave->dev, bond_dev);
4609 		}
4610 	}
4611 	rcu_read_unlock();
4612 }
4613 
4614 static int bond_neigh_init(struct neighbour *n)
4615 {
4616 	struct bonding *bond = netdev_priv(n->dev);
4617 	const struct net_device_ops *slave_ops;
4618 	struct neigh_parms parms;
4619 	struct slave *slave;
4620 	int ret = 0;
4621 
4622 	rcu_read_lock();
4623 	slave = bond_first_slave_rcu(bond);
4624 	if (!slave)
4625 		goto out;
4626 	slave_ops = slave->dev->netdev_ops;
4627 	if (!slave_ops->ndo_neigh_setup)
4628 		goto out;
4629 
4630 	/* TODO: find another way [1] to implement this.
4631 	 * Passing a zeroed structure is fragile,
4632 	 * but at least we do not pass garbage.
4633 	 *
4634 	 * [1] One way would be that ndo_neigh_setup() never touch
4635 	 *     struct neigh_parms, but propagate the new neigh_setup()
4636 	 *     back to ___neigh_create() / neigh_parms_alloc()
4637 	 */
4638 	memset(&parms, 0, sizeof(parms));
4639 	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
4640 
4641 	if (ret)
4642 		goto out;
4643 
4644 	if (parms.neigh_setup)
4645 		ret = parms.neigh_setup(n);
4646 out:
4647 	rcu_read_unlock();
4648 	return ret;
4649 }
4650 
4651 /* The bonding ndo_neigh_setup is called at init time beofre any
4652  * slave exists. So we must declare proxy setup function which will
4653  * be used at run time to resolve the actual slave neigh param setup.
4654  *
4655  * It's also called by master devices (such as vlans) to setup their
4656  * underlying devices. In that case - do nothing, we're already set up from
4657  * our init.
4658  */
4659 static int bond_neigh_setup(struct net_device *dev,
4660 			    struct neigh_parms *parms)
4661 {
4662 	/* modify only our neigh_parms */
4663 	if (parms->dev == dev)
4664 		parms->neigh_setup = bond_neigh_init;
4665 
4666 	return 0;
4667 }
4668 
4669 /* Change the MTU of all of a master's slaves to match the master */
4670 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4671 {
4672 	struct bonding *bond = netdev_priv(bond_dev);
4673 	struct slave *slave, *rollback_slave;
4674 	struct list_head *iter;
4675 	int res = 0;
4676 
4677 	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4678 
4679 	bond_for_each_slave(bond, slave, iter) {
4680 		slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4681 			   slave, slave->dev->netdev_ops->ndo_change_mtu);
4682 
4683 		res = dev_set_mtu(slave->dev, new_mtu);
4684 
4685 		if (res) {
4686 			/* If we failed to set the slave's mtu to the new value
4687 			 * we must abort the operation even in ACTIVE_BACKUP
4688 			 * mode, because if we allow the backup slaves to have
4689 			 * different mtu values than the active slave we'll
4690 			 * need to change their mtu when doing a failover. That
4691 			 * means changing their mtu from timer context, which
4692 			 * is probably not a good idea.
4693 			 */
4694 			slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4695 				  res, new_mtu);
4696 			goto unwind;
4697 		}
4698 	}
4699 
4700 	bond_dev->mtu = new_mtu;
4701 
4702 	return 0;
4703 
4704 unwind:
4705 	/* unwind from head to the slave that failed */
4706 	bond_for_each_slave(bond, rollback_slave, iter) {
4707 		int tmp_res;
4708 
4709 		if (rollback_slave == slave)
4710 			break;
4711 
4712 		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4713 		if (tmp_res)
4714 			slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4715 				  tmp_res);
4716 	}
4717 
4718 	return res;
4719 }
4720 
4721 /* Change HW address
4722  *
4723  * Note that many devices must be down to change the HW address, and
4724  * downing the master releases all slaves.  We can make bonds full of
4725  * bonding devices to test this, however.
4726  */
4727 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4728 {
4729 	struct bonding *bond = netdev_priv(bond_dev);
4730 	struct slave *slave, *rollback_slave;
4731 	struct sockaddr_storage *ss = addr, tmp_ss;
4732 	struct list_head *iter;
4733 	int res = 0;
4734 
4735 	if (BOND_MODE(bond) == BOND_MODE_ALB)
4736 		return bond_alb_set_mac_address(bond_dev, addr);
4737 
4738 
4739 	netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4740 
4741 	/* If fail_over_mac is enabled, do nothing and return success.
4742 	 * Returning an error causes ifenslave to fail.
4743 	 */
4744 	if (bond->params.fail_over_mac &&
4745 	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4746 		return 0;
4747 
4748 	if (!is_valid_ether_addr(ss->__data))
4749 		return -EADDRNOTAVAIL;
4750 
4751 	bond_for_each_slave(bond, slave, iter) {
4752 		slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4753 			  __func__, slave);
4754 		res = dev_set_mac_address(slave->dev, addr, NULL);
4755 		if (res) {
4756 			/* TODO: consider downing the slave
4757 			 * and retry ?
4758 			 * User should expect communications
4759 			 * breakage anyway until ARP finish
4760 			 * updating, so...
4761 			 */
4762 			slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4763 				  __func__, res);
4764 			goto unwind;
4765 		}
4766 	}
4767 
4768 	/* success */
4769 	dev_addr_set(bond_dev, ss->__data);
4770 	return 0;
4771 
4772 unwind:
4773 	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4774 	tmp_ss.ss_family = bond_dev->type;
4775 
4776 	/* unwind from head to the slave that failed */
4777 	bond_for_each_slave(bond, rollback_slave, iter) {
4778 		int tmp_res;
4779 
4780 		if (rollback_slave == slave)
4781 			break;
4782 
4783 		tmp_res = dev_set_mac_address(rollback_slave->dev,
4784 					      (struct sockaddr *)&tmp_ss, NULL);
4785 		if (tmp_res) {
4786 			slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4787 				   __func__, tmp_res);
4788 		}
4789 	}
4790 
4791 	return res;
4792 }
4793 
4794 /**
4795  * bond_get_slave_by_id - get xmit slave with slave_id
4796  * @bond: bonding device that is transmitting
4797  * @slave_id: slave id up to slave_cnt-1 through which to transmit
4798  *
4799  * This function tries to get slave with slave_id but in case
4800  * it fails, it tries to find the first available slave for transmission.
4801  */
4802 static struct slave *bond_get_slave_by_id(struct bonding *bond,
4803 					  int slave_id)
4804 {
4805 	struct list_head *iter;
4806 	struct slave *slave;
4807 	int i = slave_id;
4808 
4809 	/* Here we start from the slave with slave_id */
4810 	bond_for_each_slave_rcu(bond, slave, iter) {
4811 		if (--i < 0) {
4812 			if (bond_slave_can_tx(slave))
4813 				return slave;
4814 		}
4815 	}
4816 
4817 	/* Here we start from the first slave up to slave_id */
4818 	i = slave_id;
4819 	bond_for_each_slave_rcu(bond, slave, iter) {
4820 		if (--i < 0)
4821 			break;
4822 		if (bond_slave_can_tx(slave))
4823 			return slave;
4824 	}
4825 	/* no slave that can tx has been found */
4826 	return NULL;
4827 }
4828 
4829 /**
4830  * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4831  * @bond: bonding device to use
4832  *
4833  * Based on the value of the bonding device's packets_per_slave parameter
4834  * this function generates a slave id, which is usually used as the next
4835  * slave to transmit through.
4836  */
4837 static u32 bond_rr_gen_slave_id(struct bonding *bond)
4838 {
4839 	u32 slave_id;
4840 	struct reciprocal_value reciprocal_packets_per_slave;
4841 	int packets_per_slave = bond->params.packets_per_slave;
4842 
4843 	switch (packets_per_slave) {
4844 	case 0:
4845 		slave_id = get_random_u32();
4846 		break;
4847 	case 1:
4848 		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4849 		break;
4850 	default:
4851 		reciprocal_packets_per_slave =
4852 			bond->params.reciprocal_packets_per_slave;
4853 		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4854 		slave_id = reciprocal_divide(slave_id,
4855 					     reciprocal_packets_per_slave);
4856 		break;
4857 	}
4858 
4859 	return slave_id;
4860 }
4861 
4862 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4863 						    struct sk_buff *skb)
4864 {
4865 	struct slave *slave;
4866 	int slave_cnt;
4867 	u32 slave_id;
4868 
4869 	/* Start with the curr_active_slave that joined the bond as the
4870 	 * default for sending IGMP traffic.  For failover purposes one
4871 	 * needs to maintain some consistency for the interface that will
4872 	 * send the join/membership reports.  The curr_active_slave found
4873 	 * will send all of this type of traffic.
4874 	 */
4875 	if (skb->protocol == htons(ETH_P_IP)) {
4876 		int noff = skb_network_offset(skb);
4877 		struct iphdr *iph;
4878 
4879 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4880 			goto non_igmp;
4881 
4882 		iph = ip_hdr(skb);
4883 		if (iph->protocol == IPPROTO_IGMP) {
4884 			slave = rcu_dereference(bond->curr_active_slave);
4885 			if (slave)
4886 				return slave;
4887 			return bond_get_slave_by_id(bond, 0);
4888 		}
4889 	}
4890 
4891 non_igmp:
4892 	slave_cnt = READ_ONCE(bond->slave_cnt);
4893 	if (likely(slave_cnt)) {
4894 		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4895 		return bond_get_slave_by_id(bond, slave_id);
4896 	}
4897 	return NULL;
4898 }
4899 
4900 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
4901 							struct xdp_buff *xdp)
4902 {
4903 	struct slave *slave;
4904 	int slave_cnt;
4905 	u32 slave_id;
4906 	const struct ethhdr *eth;
4907 	void *data = xdp->data;
4908 
4909 	if (data + sizeof(struct ethhdr) > xdp->data_end)
4910 		goto non_igmp;
4911 
4912 	eth = (struct ethhdr *)data;
4913 	data += sizeof(struct ethhdr);
4914 
4915 	/* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
4916 	if (eth->h_proto == htons(ETH_P_IP)) {
4917 		const struct iphdr *iph;
4918 
4919 		if (data + sizeof(struct iphdr) > xdp->data_end)
4920 			goto non_igmp;
4921 
4922 		iph = (struct iphdr *)data;
4923 
4924 		if (iph->protocol == IPPROTO_IGMP) {
4925 			slave = rcu_dereference(bond->curr_active_slave);
4926 			if (slave)
4927 				return slave;
4928 			return bond_get_slave_by_id(bond, 0);
4929 		}
4930 	}
4931 
4932 non_igmp:
4933 	slave_cnt = READ_ONCE(bond->slave_cnt);
4934 	if (likely(slave_cnt)) {
4935 		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4936 		return bond_get_slave_by_id(bond, slave_id);
4937 	}
4938 	return NULL;
4939 }
4940 
4941 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4942 					struct net_device *bond_dev)
4943 {
4944 	struct bonding *bond = netdev_priv(bond_dev);
4945 	struct slave *slave;
4946 
4947 	slave = bond_xmit_roundrobin_slave_get(bond, skb);
4948 	if (likely(slave))
4949 		return bond_dev_queue_xmit(bond, skb, slave->dev);
4950 
4951 	return bond_tx_drop(bond_dev, skb);
4952 }
4953 
4954 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
4955 {
4956 	return rcu_dereference(bond->curr_active_slave);
4957 }
4958 
4959 /* In active-backup mode, we know that bond->curr_active_slave is always valid if
4960  * the bond has a usable interface.
4961  */
4962 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
4963 					  struct net_device *bond_dev)
4964 {
4965 	struct bonding *bond = netdev_priv(bond_dev);
4966 	struct slave *slave;
4967 
4968 	slave = bond_xmit_activebackup_slave_get(bond);
4969 	if (slave)
4970 		return bond_dev_queue_xmit(bond, skb, slave->dev);
4971 
4972 	return bond_tx_drop(bond_dev, skb);
4973 }
4974 
4975 /* Use this to update slave_array when (a) it's not appropriate to update
4976  * slave_array right away (note that update_slave_array() may sleep)
4977  * and / or (b) RTNL is not held.
4978  */
4979 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
4980 {
4981 	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
4982 }
4983 
4984 /* Slave array work handler. Holds only RTNL */
4985 static void bond_slave_arr_handler(struct work_struct *work)
4986 {
4987 	struct bonding *bond = container_of(work, struct bonding,
4988 					    slave_arr_work.work);
4989 	int ret;
4990 
4991 	if (!rtnl_trylock())
4992 		goto err;
4993 
4994 	ret = bond_update_slave_arr(bond, NULL);
4995 	rtnl_unlock();
4996 	if (ret) {
4997 		pr_warn_ratelimited("Failed to update slave array from WT\n");
4998 		goto err;
4999 	}
5000 	return;
5001 
5002 err:
5003 	bond_slave_arr_work_rearm(bond, 1);
5004 }
5005 
5006 static void bond_skip_slave(struct bond_up_slave *slaves,
5007 			    struct slave *skipslave)
5008 {
5009 	int idx;
5010 
5011 	/* Rare situation where caller has asked to skip a specific
5012 	 * slave but allocation failed (most likely!). BTW this is
5013 	 * only possible when the call is initiated from
5014 	 * __bond_release_one(). In this situation; overwrite the
5015 	 * skipslave entry in the array with the last entry from the
5016 	 * array to avoid a situation where the xmit path may choose
5017 	 * this to-be-skipped slave to send a packet out.
5018 	 */
5019 	for (idx = 0; slaves && idx < slaves->count; idx++) {
5020 		if (skipslave == slaves->arr[idx]) {
5021 			slaves->arr[idx] =
5022 				slaves->arr[slaves->count - 1];
5023 			slaves->count--;
5024 			break;
5025 		}
5026 	}
5027 }
5028 
5029 static void bond_set_slave_arr(struct bonding *bond,
5030 			       struct bond_up_slave *usable_slaves,
5031 			       struct bond_up_slave *all_slaves)
5032 {
5033 	struct bond_up_slave *usable, *all;
5034 
5035 	usable = rtnl_dereference(bond->usable_slaves);
5036 	rcu_assign_pointer(bond->usable_slaves, usable_slaves);
5037 	kfree_rcu(usable, rcu);
5038 
5039 	all = rtnl_dereference(bond->all_slaves);
5040 	rcu_assign_pointer(bond->all_slaves, all_slaves);
5041 	kfree_rcu(all, rcu);
5042 }
5043 
5044 static void bond_reset_slave_arr(struct bonding *bond)
5045 {
5046 	struct bond_up_slave *usable, *all;
5047 
5048 	usable = rtnl_dereference(bond->usable_slaves);
5049 	if (usable) {
5050 		RCU_INIT_POINTER(bond->usable_slaves, NULL);
5051 		kfree_rcu(usable, rcu);
5052 	}
5053 
5054 	all = rtnl_dereference(bond->all_slaves);
5055 	if (all) {
5056 		RCU_INIT_POINTER(bond->all_slaves, NULL);
5057 		kfree_rcu(all, rcu);
5058 	}
5059 }
5060 
5061 /* Build the usable slaves array in control path for modes that use xmit-hash
5062  * to determine the slave interface -
5063  * (a) BOND_MODE_8023AD
5064  * (b) BOND_MODE_XOR
5065  * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
5066  *
5067  * The caller is expected to hold RTNL only and NO other lock!
5068  */
5069 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
5070 {
5071 	struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
5072 	struct slave *slave;
5073 	struct list_head *iter;
5074 	int agg_id = 0;
5075 	int ret = 0;
5076 
5077 	might_sleep();
5078 
5079 	usable_slaves = kzalloc(struct_size(usable_slaves, arr,
5080 					    bond->slave_cnt), GFP_KERNEL);
5081 	all_slaves = kzalloc(struct_size(all_slaves, arr,
5082 					 bond->slave_cnt), GFP_KERNEL);
5083 	if (!usable_slaves || !all_slaves) {
5084 		ret = -ENOMEM;
5085 		goto out;
5086 	}
5087 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5088 		struct ad_info ad_info;
5089 
5090 		spin_lock_bh(&bond->mode_lock);
5091 		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
5092 			spin_unlock_bh(&bond->mode_lock);
5093 			pr_debug("bond_3ad_get_active_agg_info failed\n");
5094 			/* No active aggragator means it's not safe to use
5095 			 * the previous array.
5096 			 */
5097 			bond_reset_slave_arr(bond);
5098 			goto out;
5099 		}
5100 		spin_unlock_bh(&bond->mode_lock);
5101 		agg_id = ad_info.aggregator_id;
5102 	}
5103 	bond_for_each_slave(bond, slave, iter) {
5104 		if (skipslave == slave)
5105 			continue;
5106 
5107 		all_slaves->arr[all_slaves->count++] = slave;
5108 		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5109 			struct aggregator *agg;
5110 
5111 			agg = SLAVE_AD_INFO(slave)->port.aggregator;
5112 			if (!agg || agg->aggregator_identifier != agg_id)
5113 				continue;
5114 		}
5115 		if (!bond_slave_can_tx(slave))
5116 			continue;
5117 
5118 		slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
5119 			  usable_slaves->count);
5120 
5121 		usable_slaves->arr[usable_slaves->count++] = slave;
5122 	}
5123 
5124 	bond_set_slave_arr(bond, usable_slaves, all_slaves);
5125 	return ret;
5126 out:
5127 	if (ret != 0 && skipslave) {
5128 		bond_skip_slave(rtnl_dereference(bond->all_slaves),
5129 				skipslave);
5130 		bond_skip_slave(rtnl_dereference(bond->usable_slaves),
5131 				skipslave);
5132 	}
5133 	kfree_rcu(all_slaves, rcu);
5134 	kfree_rcu(usable_slaves, rcu);
5135 
5136 	return ret;
5137 }
5138 
5139 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
5140 						 struct sk_buff *skb,
5141 						 struct bond_up_slave *slaves)
5142 {
5143 	struct slave *slave;
5144 	unsigned int count;
5145 	u32 hash;
5146 
5147 	hash = bond_xmit_hash(bond, skb);
5148 	count = slaves ? READ_ONCE(slaves->count) : 0;
5149 	if (unlikely(!count))
5150 		return NULL;
5151 
5152 	slave = slaves->arr[hash % count];
5153 	return slave;
5154 }
5155 
5156 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
5157 						     struct xdp_buff *xdp)
5158 {
5159 	struct bond_up_slave *slaves;
5160 	unsigned int count;
5161 	u32 hash;
5162 
5163 	hash = bond_xmit_hash_xdp(bond, xdp);
5164 	slaves = rcu_dereference(bond->usable_slaves);
5165 	count = slaves ? READ_ONCE(slaves->count) : 0;
5166 	if (unlikely(!count))
5167 		return NULL;
5168 
5169 	return slaves->arr[hash % count];
5170 }
5171 
5172 /* Use this Xmit function for 3AD as well as XOR modes. The current
5173  * usable slave array is formed in the control path. The xmit function
5174  * just calculates hash and sends the packet out.
5175  */
5176 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
5177 				     struct net_device *dev)
5178 {
5179 	struct bonding *bond = netdev_priv(dev);
5180 	struct bond_up_slave *slaves;
5181 	struct slave *slave;
5182 
5183 	slaves = rcu_dereference(bond->usable_slaves);
5184 	slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5185 	if (likely(slave))
5186 		return bond_dev_queue_xmit(bond, skb, slave->dev);
5187 
5188 	return bond_tx_drop(dev, skb);
5189 }
5190 
5191 /* in broadcast mode, we send everything to all usable interfaces. */
5192 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
5193 				       struct net_device *bond_dev)
5194 {
5195 	struct bonding *bond = netdev_priv(bond_dev);
5196 	struct slave *slave = NULL;
5197 	struct list_head *iter;
5198 	bool xmit_suc = false;
5199 	bool skb_used = false;
5200 
5201 	bond_for_each_slave_rcu(bond, slave, iter) {
5202 		struct sk_buff *skb2;
5203 
5204 		if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
5205 			continue;
5206 
5207 		if (bond_is_last_slave(bond, slave)) {
5208 			skb2 = skb;
5209 			skb_used = true;
5210 		} else {
5211 			skb2 = skb_clone(skb, GFP_ATOMIC);
5212 			if (!skb2) {
5213 				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
5214 						    bond_dev->name, __func__);
5215 				continue;
5216 			}
5217 		}
5218 
5219 		if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
5220 			xmit_suc = true;
5221 	}
5222 
5223 	if (!skb_used)
5224 		dev_kfree_skb_any(skb);
5225 
5226 	if (xmit_suc)
5227 		return NETDEV_TX_OK;
5228 
5229 	dev_core_stats_tx_dropped_inc(bond_dev);
5230 	return NET_XMIT_DROP;
5231 }
5232 
5233 /*------------------------- Device initialization ---------------------------*/
5234 
5235 /* Lookup the slave that corresponds to a qid */
5236 static inline int bond_slave_override(struct bonding *bond,
5237 				      struct sk_buff *skb)
5238 {
5239 	struct slave *slave = NULL;
5240 	struct list_head *iter;
5241 
5242 	if (!skb_rx_queue_recorded(skb))
5243 		return 1;
5244 
5245 	/* Find out if any slaves have the same mapping as this skb. */
5246 	bond_for_each_slave_rcu(bond, slave, iter) {
5247 		if (slave->queue_id == skb_get_queue_mapping(skb)) {
5248 			if (bond_slave_is_up(slave) &&
5249 			    slave->link == BOND_LINK_UP) {
5250 				bond_dev_queue_xmit(bond, skb, slave->dev);
5251 				return 0;
5252 			}
5253 			/* If the slave isn't UP, use default transmit policy. */
5254 			break;
5255 		}
5256 	}
5257 
5258 	return 1;
5259 }
5260 
5261 
5262 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
5263 			     struct net_device *sb_dev)
5264 {
5265 	/* This helper function exists to help dev_pick_tx get the correct
5266 	 * destination queue.  Using a helper function skips a call to
5267 	 * skb_tx_hash and will put the skbs in the queue we expect on their
5268 	 * way down to the bonding driver.
5269 	 */
5270 	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
5271 
5272 	/* Save the original txq to restore before passing to the driver */
5273 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
5274 
5275 	if (unlikely(txq >= dev->real_num_tx_queues)) {
5276 		do {
5277 			txq -= dev->real_num_tx_queues;
5278 		} while (txq >= dev->real_num_tx_queues);
5279 	}
5280 	return txq;
5281 }
5282 
5283 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
5284 					      struct sk_buff *skb,
5285 					      bool all_slaves)
5286 {
5287 	struct bonding *bond = netdev_priv(master_dev);
5288 	struct bond_up_slave *slaves;
5289 	struct slave *slave = NULL;
5290 
5291 	switch (BOND_MODE(bond)) {
5292 	case BOND_MODE_ROUNDROBIN:
5293 		slave = bond_xmit_roundrobin_slave_get(bond, skb);
5294 		break;
5295 	case BOND_MODE_ACTIVEBACKUP:
5296 		slave = bond_xmit_activebackup_slave_get(bond);
5297 		break;
5298 	case BOND_MODE_8023AD:
5299 	case BOND_MODE_XOR:
5300 		if (all_slaves)
5301 			slaves = rcu_dereference(bond->all_slaves);
5302 		else
5303 			slaves = rcu_dereference(bond->usable_slaves);
5304 		slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5305 		break;
5306 	case BOND_MODE_BROADCAST:
5307 		break;
5308 	case BOND_MODE_ALB:
5309 		slave = bond_xmit_alb_slave_get(bond, skb);
5310 		break;
5311 	case BOND_MODE_TLB:
5312 		slave = bond_xmit_tlb_slave_get(bond, skb);
5313 		break;
5314 	default:
5315 		/* Should never happen, mode already checked */
5316 		WARN_ONCE(true, "Unknown bonding mode");
5317 		break;
5318 	}
5319 
5320 	if (slave)
5321 		return slave->dev;
5322 	return NULL;
5323 }
5324 
5325 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
5326 {
5327 	switch (sk->sk_family) {
5328 #if IS_ENABLED(CONFIG_IPV6)
5329 	case AF_INET6:
5330 		if (ipv6_only_sock(sk) ||
5331 		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
5332 			flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
5333 			flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
5334 			flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
5335 			break;
5336 		}
5337 		fallthrough;
5338 #endif
5339 	default: /* AF_INET */
5340 		flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
5341 		flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
5342 		flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
5343 		break;
5344 	}
5345 
5346 	flow->ports.src = inet_sk(sk)->inet_sport;
5347 	flow->ports.dst = inet_sk(sk)->inet_dport;
5348 }
5349 
5350 /**
5351  * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
5352  * @sk: socket to use for headers
5353  *
5354  * This function will extract the necessary field from the socket and use
5355  * them to generate a hash based on the LAYER34 xmit_policy.
5356  * Assumes that sk is a TCP or UDP socket.
5357  */
5358 static u32 bond_sk_hash_l34(struct sock *sk)
5359 {
5360 	struct flow_keys flow;
5361 	u32 hash;
5362 
5363 	bond_sk_to_flow(sk, &flow);
5364 
5365 	/* L4 */
5366 	memcpy(&hash, &flow.ports.ports, sizeof(hash));
5367 	/* L3 */
5368 	return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
5369 }
5370 
5371 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
5372 						  struct sock *sk)
5373 {
5374 	struct bond_up_slave *slaves;
5375 	struct slave *slave;
5376 	unsigned int count;
5377 	u32 hash;
5378 
5379 	slaves = rcu_dereference(bond->usable_slaves);
5380 	count = slaves ? READ_ONCE(slaves->count) : 0;
5381 	if (unlikely(!count))
5382 		return NULL;
5383 
5384 	hash = bond_sk_hash_l34(sk);
5385 	slave = slaves->arr[hash % count];
5386 
5387 	return slave->dev;
5388 }
5389 
5390 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
5391 						struct sock *sk)
5392 {
5393 	struct bonding *bond = netdev_priv(dev);
5394 	struct net_device *lower = NULL;
5395 
5396 	rcu_read_lock();
5397 	if (bond_sk_check(bond))
5398 		lower = __bond_sk_get_lower_dev(bond, sk);
5399 	rcu_read_unlock();
5400 
5401 	return lower;
5402 }
5403 
5404 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5405 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
5406 					struct net_device *dev)
5407 {
5408 	struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
5409 
5410 	/* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
5411 	 * was true, if tls_device_down is running in parallel, but it's OK,
5412 	 * because bond_get_slave_by_dev has a NULL check.
5413 	 */
5414 	if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
5415 		return bond_dev_queue_xmit(bond, skb, tls_netdev);
5416 	return bond_tx_drop(dev, skb);
5417 }
5418 #endif
5419 
5420 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5421 {
5422 	struct bonding *bond = netdev_priv(dev);
5423 
5424 	if (bond_should_override_tx_queue(bond) &&
5425 	    !bond_slave_override(bond, skb))
5426 		return NETDEV_TX_OK;
5427 
5428 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5429 	if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
5430 		return bond_tls_device_xmit(bond, skb, dev);
5431 #endif
5432 
5433 	switch (BOND_MODE(bond)) {
5434 	case BOND_MODE_ROUNDROBIN:
5435 		return bond_xmit_roundrobin(skb, dev);
5436 	case BOND_MODE_ACTIVEBACKUP:
5437 		return bond_xmit_activebackup(skb, dev);
5438 	case BOND_MODE_8023AD:
5439 	case BOND_MODE_XOR:
5440 		return bond_3ad_xor_xmit(skb, dev);
5441 	case BOND_MODE_BROADCAST:
5442 		return bond_xmit_broadcast(skb, dev);
5443 	case BOND_MODE_ALB:
5444 		return bond_alb_xmit(skb, dev);
5445 	case BOND_MODE_TLB:
5446 		return bond_tlb_xmit(skb, dev);
5447 	default:
5448 		/* Should never happen, mode already checked */
5449 		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
5450 		WARN_ON_ONCE(1);
5451 		return bond_tx_drop(dev, skb);
5452 	}
5453 }
5454 
5455 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5456 {
5457 	struct bonding *bond = netdev_priv(dev);
5458 	netdev_tx_t ret = NETDEV_TX_OK;
5459 
5460 	/* If we risk deadlock from transmitting this in the
5461 	 * netpoll path, tell netpoll to queue the frame for later tx
5462 	 */
5463 	if (unlikely(is_netpoll_tx_blocked(dev)))
5464 		return NETDEV_TX_BUSY;
5465 
5466 	rcu_read_lock();
5467 	if (bond_has_slaves(bond))
5468 		ret = __bond_start_xmit(skb, dev);
5469 	else
5470 		ret = bond_tx_drop(dev, skb);
5471 	rcu_read_unlock();
5472 
5473 	return ret;
5474 }
5475 
5476 static struct net_device *
5477 bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
5478 {
5479 	struct bonding *bond = netdev_priv(bond_dev);
5480 	struct slave *slave;
5481 
5482 	/* Caller needs to hold rcu_read_lock() */
5483 
5484 	switch (BOND_MODE(bond)) {
5485 	case BOND_MODE_ROUNDROBIN:
5486 		slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
5487 		break;
5488 
5489 	case BOND_MODE_ACTIVEBACKUP:
5490 		slave = bond_xmit_activebackup_slave_get(bond);
5491 		break;
5492 
5493 	case BOND_MODE_8023AD:
5494 	case BOND_MODE_XOR:
5495 		slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
5496 		break;
5497 
5498 	default:
5499 		/* Should never happen. Mode guarded by bond_xdp_check() */
5500 		netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
5501 		WARN_ON_ONCE(1);
5502 		return NULL;
5503 	}
5504 
5505 	if (slave)
5506 		return slave->dev;
5507 
5508 	return NULL;
5509 }
5510 
5511 static int bond_xdp_xmit(struct net_device *bond_dev,
5512 			 int n, struct xdp_frame **frames, u32 flags)
5513 {
5514 	int nxmit, err = -ENXIO;
5515 
5516 	rcu_read_lock();
5517 
5518 	for (nxmit = 0; nxmit < n; nxmit++) {
5519 		struct xdp_frame *frame = frames[nxmit];
5520 		struct xdp_frame *frames1[] = {frame};
5521 		struct net_device *slave_dev;
5522 		struct xdp_buff xdp;
5523 
5524 		xdp_convert_frame_to_buff(frame, &xdp);
5525 
5526 		slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
5527 		if (!slave_dev) {
5528 			err = -ENXIO;
5529 			break;
5530 		}
5531 
5532 		err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
5533 		if (err < 1)
5534 			break;
5535 	}
5536 
5537 	rcu_read_unlock();
5538 
5539 	/* If error happened on the first frame then we can pass the error up, otherwise
5540 	 * report the number of frames that were xmitted.
5541 	 */
5542 	if (err < 0)
5543 		return (nxmit == 0 ? err : nxmit);
5544 
5545 	return nxmit;
5546 }
5547 
5548 static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5549 			struct netlink_ext_ack *extack)
5550 {
5551 	struct bonding *bond = netdev_priv(dev);
5552 	struct list_head *iter;
5553 	struct slave *slave, *rollback_slave;
5554 	struct bpf_prog *old_prog;
5555 	struct netdev_bpf xdp = {
5556 		.command = XDP_SETUP_PROG,
5557 		.flags   = 0,
5558 		.prog    = prog,
5559 		.extack  = extack,
5560 	};
5561 	int err;
5562 
5563 	ASSERT_RTNL();
5564 
5565 	if (!bond_xdp_check(bond))
5566 		return -EOPNOTSUPP;
5567 
5568 	old_prog = bond->xdp_prog;
5569 	bond->xdp_prog = prog;
5570 
5571 	bond_for_each_slave(bond, slave, iter) {
5572 		struct net_device *slave_dev = slave->dev;
5573 
5574 		if (!slave_dev->netdev_ops->ndo_bpf ||
5575 		    !slave_dev->netdev_ops->ndo_xdp_xmit) {
5576 			SLAVE_NL_ERR(dev, slave_dev, extack,
5577 				     "Slave device does not support XDP");
5578 			err = -EOPNOTSUPP;
5579 			goto err;
5580 		}
5581 
5582 		if (dev_xdp_prog_count(slave_dev) > 0) {
5583 			SLAVE_NL_ERR(dev, slave_dev, extack,
5584 				     "Slave has XDP program loaded, please unload before enslaving");
5585 			err = -EOPNOTSUPP;
5586 			goto err;
5587 		}
5588 
5589 		err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5590 		if (err < 0) {
5591 			/* ndo_bpf() sets extack error message */
5592 			slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
5593 			goto err;
5594 		}
5595 		if (prog)
5596 			bpf_prog_inc(prog);
5597 	}
5598 
5599 	if (prog) {
5600 		static_branch_inc(&bpf_master_redirect_enabled_key);
5601 	} else if (old_prog) {
5602 		bpf_prog_put(old_prog);
5603 		static_branch_dec(&bpf_master_redirect_enabled_key);
5604 	}
5605 
5606 	return 0;
5607 
5608 err:
5609 	/* unwind the program changes */
5610 	bond->xdp_prog = old_prog;
5611 	xdp.prog = old_prog;
5612 	xdp.extack = NULL; /* do not overwrite original error */
5613 
5614 	bond_for_each_slave(bond, rollback_slave, iter) {
5615 		struct net_device *slave_dev = rollback_slave->dev;
5616 		int err_unwind;
5617 
5618 		if (slave == rollback_slave)
5619 			break;
5620 
5621 		err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5622 		if (err_unwind < 0)
5623 			slave_err(dev, slave_dev,
5624 				  "Error %d when unwinding XDP program change\n", err_unwind);
5625 		else if (xdp.prog)
5626 			bpf_prog_inc(xdp.prog);
5627 	}
5628 	return err;
5629 }
5630 
5631 static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5632 {
5633 	switch (xdp->command) {
5634 	case XDP_SETUP_PROG:
5635 		return bond_xdp_set(dev, xdp->prog, xdp->extack);
5636 	default:
5637 		return -EINVAL;
5638 	}
5639 }
5640 
5641 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
5642 {
5643 	if (speed == 0 || speed == SPEED_UNKNOWN)
5644 		speed = slave->speed;
5645 	else
5646 		speed = min(speed, slave->speed);
5647 
5648 	return speed;
5649 }
5650 
5651 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
5652 					   struct ethtool_link_ksettings *cmd)
5653 {
5654 	struct bonding *bond = netdev_priv(bond_dev);
5655 	struct list_head *iter;
5656 	struct slave *slave;
5657 	u32 speed = 0;
5658 
5659 	cmd->base.duplex = DUPLEX_UNKNOWN;
5660 	cmd->base.port = PORT_OTHER;
5661 
5662 	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
5663 	 * do not need to check mode.  Though link speed might not represent
5664 	 * the true receive or transmit bandwidth (not all modes are symmetric)
5665 	 * this is an accurate maximum.
5666 	 */
5667 	bond_for_each_slave(bond, slave, iter) {
5668 		if (bond_slave_can_tx(slave)) {
5669 			if (slave->speed != SPEED_UNKNOWN) {
5670 				if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
5671 					speed = bond_mode_bcast_speed(slave,
5672 								      speed);
5673 				else
5674 					speed += slave->speed;
5675 			}
5676 			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
5677 			    slave->duplex != DUPLEX_UNKNOWN)
5678 				cmd->base.duplex = slave->duplex;
5679 		}
5680 	}
5681 	cmd->base.speed = speed ? : SPEED_UNKNOWN;
5682 
5683 	return 0;
5684 }
5685 
5686 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
5687 				     struct ethtool_drvinfo *drvinfo)
5688 {
5689 	strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
5690 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
5691 		 BOND_ABI_VERSION);
5692 }
5693 
5694 static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
5695 				    struct ethtool_ts_info *info)
5696 {
5697 	struct bonding *bond = netdev_priv(bond_dev);
5698 	const struct ethtool_ops *ops;
5699 	struct net_device *real_dev;
5700 	struct phy_device *phydev;
5701 	int ret = 0;
5702 
5703 	rcu_read_lock();
5704 	real_dev = bond_option_active_slave_get_rcu(bond);
5705 	dev_hold(real_dev);
5706 	rcu_read_unlock();
5707 
5708 	if (real_dev) {
5709 		ops = real_dev->ethtool_ops;
5710 		phydev = real_dev->phydev;
5711 
5712 		if (phy_has_tsinfo(phydev)) {
5713 			ret = phy_ts_info(phydev, info);
5714 			goto out;
5715 		} else if (ops->get_ts_info) {
5716 			ret = ops->get_ts_info(real_dev, info);
5717 			goto out;
5718 		}
5719 	}
5720 
5721 	info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
5722 				SOF_TIMESTAMPING_SOFTWARE;
5723 	info->phc_index = -1;
5724 
5725 out:
5726 	dev_put(real_dev);
5727 	return ret;
5728 }
5729 
5730 static const struct ethtool_ops bond_ethtool_ops = {
5731 	.get_drvinfo		= bond_ethtool_get_drvinfo,
5732 	.get_link		= ethtool_op_get_link,
5733 	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
5734 	.get_ts_info		= bond_ethtool_get_ts_info,
5735 };
5736 
5737 static const struct net_device_ops bond_netdev_ops = {
5738 	.ndo_init		= bond_init,
5739 	.ndo_uninit		= bond_uninit,
5740 	.ndo_open		= bond_open,
5741 	.ndo_stop		= bond_close,
5742 	.ndo_start_xmit		= bond_start_xmit,
5743 	.ndo_select_queue	= bond_select_queue,
5744 	.ndo_get_stats64	= bond_get_stats,
5745 	.ndo_eth_ioctl		= bond_eth_ioctl,
5746 	.ndo_siocbond		= bond_do_ioctl,
5747 	.ndo_siocdevprivate	= bond_siocdevprivate,
5748 	.ndo_change_rx_flags	= bond_change_rx_flags,
5749 	.ndo_set_rx_mode	= bond_set_rx_mode,
5750 	.ndo_change_mtu		= bond_change_mtu,
5751 	.ndo_set_mac_address	= bond_set_mac_address,
5752 	.ndo_neigh_setup	= bond_neigh_setup,
5753 	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
5754 	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
5755 #ifdef CONFIG_NET_POLL_CONTROLLER
5756 	.ndo_netpoll_setup	= bond_netpoll_setup,
5757 	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
5758 	.ndo_poll_controller	= bond_poll_controller,
5759 #endif
5760 	.ndo_add_slave		= bond_enslave,
5761 	.ndo_del_slave		= bond_release,
5762 	.ndo_fix_features	= bond_fix_features,
5763 	.ndo_features_check	= passthru_features_check,
5764 	.ndo_get_xmit_slave	= bond_xmit_get_slave,
5765 	.ndo_sk_get_lower_dev	= bond_sk_get_lower_dev,
5766 	.ndo_bpf		= bond_xdp,
5767 	.ndo_xdp_xmit           = bond_xdp_xmit,
5768 	.ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
5769 };
5770 
5771 static const struct device_type bond_type = {
5772 	.name = "bond",
5773 };
5774 
5775 static void bond_destructor(struct net_device *bond_dev)
5776 {
5777 	struct bonding *bond = netdev_priv(bond_dev);
5778 
5779 	if (bond->wq)
5780 		destroy_workqueue(bond->wq);
5781 
5782 	if (bond->rr_tx_counter)
5783 		free_percpu(bond->rr_tx_counter);
5784 }
5785 
5786 void bond_setup(struct net_device *bond_dev)
5787 {
5788 	struct bonding *bond = netdev_priv(bond_dev);
5789 
5790 	spin_lock_init(&bond->mode_lock);
5791 	bond->params = bonding_defaults;
5792 
5793 	/* Initialize pointers */
5794 	bond->dev = bond_dev;
5795 
5796 	/* Initialize the device entry points */
5797 	ether_setup(bond_dev);
5798 	bond_dev->max_mtu = ETH_MAX_MTU;
5799 	bond_dev->netdev_ops = &bond_netdev_ops;
5800 	bond_dev->ethtool_ops = &bond_ethtool_ops;
5801 
5802 	bond_dev->needs_free_netdev = true;
5803 	bond_dev->priv_destructor = bond_destructor;
5804 
5805 	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
5806 
5807 	/* Initialize the device options */
5808 	bond_dev->flags |= IFF_MASTER;
5809 	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5810 	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5811 
5812 #ifdef CONFIG_XFRM_OFFLOAD
5813 	/* set up xfrm device ops (only supported in active-backup right now) */
5814 	bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5815 	INIT_LIST_HEAD(&bond->ipsec_list);
5816 	spin_lock_init(&bond->ipsec_lock);
5817 #endif /* CONFIG_XFRM_OFFLOAD */
5818 
5819 	/* don't acquire bond device's netif_tx_lock when transmitting */
5820 	bond_dev->features |= NETIF_F_LLTX;
5821 
5822 	/* By default, we declare the bond to be fully
5823 	 * VLAN hardware accelerated capable. Special
5824 	 * care is taken in the various xmit functions
5825 	 * when there are slaves that are not hw accel
5826 	 * capable
5827 	 */
5828 
5829 	/* Don't allow bond devices to change network namespaces. */
5830 	bond_dev->features |= NETIF_F_NETNS_LOCAL;
5831 
5832 	bond_dev->hw_features = BOND_VLAN_FEATURES |
5833 				NETIF_F_HW_VLAN_CTAG_RX |
5834 				NETIF_F_HW_VLAN_CTAG_FILTER;
5835 
5836 	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5837 	bond_dev->features |= bond_dev->hw_features;
5838 	bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5839 #ifdef CONFIG_XFRM_OFFLOAD
5840 	bond_dev->hw_features |= BOND_XFRM_FEATURES;
5841 	/* Only enable XFRM features if this is an active-backup config */
5842 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
5843 		bond_dev->features |= BOND_XFRM_FEATURES;
5844 #endif /* CONFIG_XFRM_OFFLOAD */
5845 }
5846 
5847 /* Destroy a bonding device.
5848  * Must be under rtnl_lock when this function is called.
5849  */
5850 static void bond_uninit(struct net_device *bond_dev)
5851 {
5852 	struct bonding *bond = netdev_priv(bond_dev);
5853 	struct bond_up_slave *usable, *all;
5854 	struct list_head *iter;
5855 	struct slave *slave;
5856 
5857 	bond_netpoll_cleanup(bond_dev);
5858 
5859 	/* Release the bonded slaves */
5860 	bond_for_each_slave(bond, slave, iter)
5861 		__bond_release_one(bond_dev, slave->dev, true, true);
5862 	netdev_info(bond_dev, "Released all slaves\n");
5863 
5864 	usable = rtnl_dereference(bond->usable_slaves);
5865 	if (usable) {
5866 		RCU_INIT_POINTER(bond->usable_slaves, NULL);
5867 		kfree_rcu(usable, rcu);
5868 	}
5869 
5870 	all = rtnl_dereference(bond->all_slaves);
5871 	if (all) {
5872 		RCU_INIT_POINTER(bond->all_slaves, NULL);
5873 		kfree_rcu(all, rcu);
5874 	}
5875 
5876 	list_del(&bond->bond_list);
5877 
5878 	bond_debug_unregister(bond);
5879 }
5880 
5881 /*------------------------- Module initialization ---------------------------*/
5882 
5883 static int bond_check_params(struct bond_params *params)
5884 {
5885 	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5886 	struct bond_opt_value newval;
5887 	const struct bond_opt_value *valptr;
5888 	int arp_all_targets_value = 0;
5889 	u16 ad_actor_sys_prio = 0;
5890 	u16 ad_user_port_key = 0;
5891 	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5892 	int arp_ip_count;
5893 	int bond_mode	= BOND_MODE_ROUNDROBIN;
5894 	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
5895 	int lacp_fast = 0;
5896 	int tlb_dynamic_lb;
5897 
5898 	/* Convert string parameters. */
5899 	if (mode) {
5900 		bond_opt_initstr(&newval, mode);
5901 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
5902 		if (!valptr) {
5903 			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
5904 			return -EINVAL;
5905 		}
5906 		bond_mode = valptr->value;
5907 	}
5908 
5909 	if (xmit_hash_policy) {
5910 		if (bond_mode == BOND_MODE_ROUNDROBIN ||
5911 		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
5912 		    bond_mode == BOND_MODE_BROADCAST) {
5913 			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
5914 				bond_mode_name(bond_mode));
5915 		} else {
5916 			bond_opt_initstr(&newval, xmit_hash_policy);
5917 			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
5918 						&newval);
5919 			if (!valptr) {
5920 				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5921 				       xmit_hash_policy);
5922 				return -EINVAL;
5923 			}
5924 			xmit_hashtype = valptr->value;
5925 		}
5926 	}
5927 
5928 	if (lacp_rate) {
5929 		if (bond_mode != BOND_MODE_8023AD) {
5930 			pr_info("lacp_rate param is irrelevant in mode %s\n",
5931 				bond_mode_name(bond_mode));
5932 		} else {
5933 			bond_opt_initstr(&newval, lacp_rate);
5934 			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
5935 						&newval);
5936 			if (!valptr) {
5937 				pr_err("Error: Invalid lacp rate \"%s\"\n",
5938 				       lacp_rate);
5939 				return -EINVAL;
5940 			}
5941 			lacp_fast = valptr->value;
5942 		}
5943 	}
5944 
5945 	if (ad_select) {
5946 		bond_opt_initstr(&newval, ad_select);
5947 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
5948 					&newval);
5949 		if (!valptr) {
5950 			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
5951 			return -EINVAL;
5952 		}
5953 		params->ad_select = valptr->value;
5954 		if (bond_mode != BOND_MODE_8023AD)
5955 			pr_warn("ad_select param only affects 802.3ad mode\n");
5956 	} else {
5957 		params->ad_select = BOND_AD_STABLE;
5958 	}
5959 
5960 	if (max_bonds < 0) {
5961 		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
5962 			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
5963 		max_bonds = BOND_DEFAULT_MAX_BONDS;
5964 	}
5965 
5966 	if (miimon < 0) {
5967 		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5968 			miimon, INT_MAX);
5969 		miimon = 0;
5970 	}
5971 
5972 	if (updelay < 0) {
5973 		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5974 			updelay, INT_MAX);
5975 		updelay = 0;
5976 	}
5977 
5978 	if (downdelay < 0) {
5979 		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5980 			downdelay, INT_MAX);
5981 		downdelay = 0;
5982 	}
5983 
5984 	if ((use_carrier != 0) && (use_carrier != 1)) {
5985 		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
5986 			use_carrier);
5987 		use_carrier = 1;
5988 	}
5989 
5990 	if (num_peer_notif < 0 || num_peer_notif > 255) {
5991 		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
5992 			num_peer_notif);
5993 		num_peer_notif = 1;
5994 	}
5995 
5996 	/* reset values for 802.3ad/TLB/ALB */
5997 	if (!bond_mode_uses_arp(bond_mode)) {
5998 		if (!miimon) {
5999 			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
6000 			pr_warn("Forcing miimon to 100msec\n");
6001 			miimon = BOND_DEFAULT_MIIMON;
6002 		}
6003 	}
6004 
6005 	if (tx_queues < 1 || tx_queues > 255) {
6006 		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
6007 			tx_queues, BOND_DEFAULT_TX_QUEUES);
6008 		tx_queues = BOND_DEFAULT_TX_QUEUES;
6009 	}
6010 
6011 	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
6012 		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
6013 			all_slaves_active);
6014 		all_slaves_active = 0;
6015 	}
6016 
6017 	if (resend_igmp < 0 || resend_igmp > 255) {
6018 		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
6019 			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
6020 		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
6021 	}
6022 
6023 	bond_opt_initval(&newval, packets_per_slave);
6024 	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
6025 		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
6026 			packets_per_slave, USHRT_MAX);
6027 		packets_per_slave = 1;
6028 	}
6029 
6030 	if (bond_mode == BOND_MODE_ALB) {
6031 		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
6032 			  updelay);
6033 	}
6034 
6035 	if (!miimon) {
6036 		if (updelay || downdelay) {
6037 			/* just warn the user the up/down delay will have
6038 			 * no effect since miimon is zero...
6039 			 */
6040 			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
6041 				updelay, downdelay);
6042 		}
6043 	} else {
6044 		/* don't allow arp monitoring */
6045 		if (arp_interval) {
6046 			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
6047 				miimon, arp_interval);
6048 			arp_interval = 0;
6049 		}
6050 
6051 		if ((updelay % miimon) != 0) {
6052 			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
6053 				updelay, miimon, (updelay / miimon) * miimon);
6054 		}
6055 
6056 		updelay /= miimon;
6057 
6058 		if ((downdelay % miimon) != 0) {
6059 			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
6060 				downdelay, miimon,
6061 				(downdelay / miimon) * miimon);
6062 		}
6063 
6064 		downdelay /= miimon;
6065 	}
6066 
6067 	if (arp_interval < 0) {
6068 		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6069 			arp_interval, INT_MAX);
6070 		arp_interval = 0;
6071 	}
6072 
6073 	for (arp_ip_count = 0, i = 0;
6074 	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
6075 		__be32 ip;
6076 
6077 		/* not a complete check, but good enough to catch mistakes */
6078 		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
6079 		    !bond_is_ip_target_ok(ip)) {
6080 			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
6081 				arp_ip_target[i]);
6082 			arp_interval = 0;
6083 		} else {
6084 			if (bond_get_targets_ip(arp_target, ip) == -1)
6085 				arp_target[arp_ip_count++] = ip;
6086 			else
6087 				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
6088 					&ip);
6089 		}
6090 	}
6091 
6092 	if (arp_interval && !arp_ip_count) {
6093 		/* don't allow arping if no arp_ip_target given... */
6094 		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
6095 			arp_interval);
6096 		arp_interval = 0;
6097 	}
6098 
6099 	if (arp_validate) {
6100 		if (!arp_interval) {
6101 			pr_err("arp_validate requires arp_interval\n");
6102 			return -EINVAL;
6103 		}
6104 
6105 		bond_opt_initstr(&newval, arp_validate);
6106 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
6107 					&newval);
6108 		if (!valptr) {
6109 			pr_err("Error: invalid arp_validate \"%s\"\n",
6110 			       arp_validate);
6111 			return -EINVAL;
6112 		}
6113 		arp_validate_value = valptr->value;
6114 	} else {
6115 		arp_validate_value = 0;
6116 	}
6117 
6118 	if (arp_all_targets) {
6119 		bond_opt_initstr(&newval, arp_all_targets);
6120 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
6121 					&newval);
6122 		if (!valptr) {
6123 			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
6124 			       arp_all_targets);
6125 			arp_all_targets_value = 0;
6126 		} else {
6127 			arp_all_targets_value = valptr->value;
6128 		}
6129 	}
6130 
6131 	if (miimon) {
6132 		pr_info("MII link monitoring set to %d ms\n", miimon);
6133 	} else if (arp_interval) {
6134 		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
6135 					  arp_validate_value);
6136 		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
6137 			arp_interval, valptr->string, arp_ip_count);
6138 
6139 		for (i = 0; i < arp_ip_count; i++)
6140 			pr_cont(" %s", arp_ip_target[i]);
6141 
6142 		pr_cont("\n");
6143 
6144 	} else if (max_bonds) {
6145 		/* miimon and arp_interval not set, we need one so things
6146 		 * work as expected, see bonding.txt for details
6147 		 */
6148 		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
6149 	}
6150 
6151 	if (primary && !bond_mode_uses_primary(bond_mode)) {
6152 		/* currently, using a primary only makes sense
6153 		 * in active backup, TLB or ALB modes
6154 		 */
6155 		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
6156 			primary, bond_mode_name(bond_mode));
6157 		primary = NULL;
6158 	}
6159 
6160 	if (primary && primary_reselect) {
6161 		bond_opt_initstr(&newval, primary_reselect);
6162 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
6163 					&newval);
6164 		if (!valptr) {
6165 			pr_err("Error: Invalid primary_reselect \"%s\"\n",
6166 			       primary_reselect);
6167 			return -EINVAL;
6168 		}
6169 		primary_reselect_value = valptr->value;
6170 	} else {
6171 		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
6172 	}
6173 
6174 	if (fail_over_mac) {
6175 		bond_opt_initstr(&newval, fail_over_mac);
6176 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
6177 					&newval);
6178 		if (!valptr) {
6179 			pr_err("Error: invalid fail_over_mac \"%s\"\n",
6180 			       fail_over_mac);
6181 			return -EINVAL;
6182 		}
6183 		fail_over_mac_value = valptr->value;
6184 		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
6185 			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
6186 	} else {
6187 		fail_over_mac_value = BOND_FOM_NONE;
6188 	}
6189 
6190 	bond_opt_initstr(&newval, "default");
6191 	valptr = bond_opt_parse(
6192 			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
6193 				     &newval);
6194 	if (!valptr) {
6195 		pr_err("Error: No ad_actor_sys_prio default value");
6196 		return -EINVAL;
6197 	}
6198 	ad_actor_sys_prio = valptr->value;
6199 
6200 	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
6201 				&newval);
6202 	if (!valptr) {
6203 		pr_err("Error: No ad_user_port_key default value");
6204 		return -EINVAL;
6205 	}
6206 	ad_user_port_key = valptr->value;
6207 
6208 	bond_opt_initstr(&newval, "default");
6209 	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
6210 	if (!valptr) {
6211 		pr_err("Error: No tlb_dynamic_lb default value");
6212 		return -EINVAL;
6213 	}
6214 	tlb_dynamic_lb = valptr->value;
6215 
6216 	if (lp_interval == 0) {
6217 		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
6218 			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
6219 		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
6220 	}
6221 
6222 	/* fill params struct with the proper values */
6223 	params->mode = bond_mode;
6224 	params->xmit_policy = xmit_hashtype;
6225 	params->miimon = miimon;
6226 	params->num_peer_notif = num_peer_notif;
6227 	params->arp_interval = arp_interval;
6228 	params->arp_validate = arp_validate_value;
6229 	params->arp_all_targets = arp_all_targets_value;
6230 	params->missed_max = 2;
6231 	params->updelay = updelay;
6232 	params->downdelay = downdelay;
6233 	params->peer_notif_delay = 0;
6234 	params->use_carrier = use_carrier;
6235 	params->lacp_active = 1;
6236 	params->lacp_fast = lacp_fast;
6237 	params->primary[0] = 0;
6238 	params->primary_reselect = primary_reselect_value;
6239 	params->fail_over_mac = fail_over_mac_value;
6240 	params->tx_queues = tx_queues;
6241 	params->all_slaves_active = all_slaves_active;
6242 	params->resend_igmp = resend_igmp;
6243 	params->min_links = min_links;
6244 	params->lp_interval = lp_interval;
6245 	params->packets_per_slave = packets_per_slave;
6246 	params->tlb_dynamic_lb = tlb_dynamic_lb;
6247 	params->ad_actor_sys_prio = ad_actor_sys_prio;
6248 	eth_zero_addr(params->ad_actor_system);
6249 	params->ad_user_port_key = ad_user_port_key;
6250 	if (packets_per_slave > 0) {
6251 		params->reciprocal_packets_per_slave =
6252 			reciprocal_value(packets_per_slave);
6253 	} else {
6254 		/* reciprocal_packets_per_slave is unused if
6255 		 * packets_per_slave is 0 or 1, just initialize it
6256 		 */
6257 		params->reciprocal_packets_per_slave =
6258 			(struct reciprocal_value) { 0 };
6259 	}
6260 
6261 	if (primary)
6262 		strscpy_pad(params->primary, primary, sizeof(params->primary));
6263 
6264 	memcpy(params->arp_targets, arp_target, sizeof(arp_target));
6265 #if IS_ENABLED(CONFIG_IPV6)
6266 	memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
6267 #endif
6268 
6269 	return 0;
6270 }
6271 
6272 /* Called from registration process */
6273 static int bond_init(struct net_device *bond_dev)
6274 {
6275 	struct bonding *bond = netdev_priv(bond_dev);
6276 	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
6277 
6278 	netdev_dbg(bond_dev, "Begin bond_init\n");
6279 
6280 	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
6281 	if (!bond->wq)
6282 		return -ENOMEM;
6283 
6284 	spin_lock_init(&bond->stats_lock);
6285 	netdev_lockdep_set_classes(bond_dev);
6286 
6287 	list_add_tail(&bond->bond_list, &bn->dev_list);
6288 
6289 	bond_prepare_sysfs_group(bond);
6290 
6291 	bond_debug_register(bond);
6292 
6293 	/* Ensure valid dev_addr */
6294 	if (is_zero_ether_addr(bond_dev->dev_addr) &&
6295 	    bond_dev->addr_assign_type == NET_ADDR_PERM)
6296 		eth_hw_addr_random(bond_dev);
6297 
6298 	return 0;
6299 }
6300 
6301 unsigned int bond_get_num_tx_queues(void)
6302 {
6303 	return tx_queues;
6304 }
6305 
6306 /* Create a new bond based on the specified name and bonding parameters.
6307  * If name is NULL, obtain a suitable "bond%d" name for us.
6308  * Caller must NOT hold rtnl_lock; we need to release it here before we
6309  * set up our sysfs entries.
6310  */
6311 int bond_create(struct net *net, const char *name)
6312 {
6313 	struct net_device *bond_dev;
6314 	struct bonding *bond;
6315 	int res = -ENOMEM;
6316 
6317 	rtnl_lock();
6318 
6319 	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
6320 				   name ? name : "bond%d", NET_NAME_UNKNOWN,
6321 				   bond_setup, tx_queues);
6322 	if (!bond_dev)
6323 		goto out;
6324 
6325 	bond = netdev_priv(bond_dev);
6326 	dev_net_set(bond_dev, net);
6327 	bond_dev->rtnl_link_ops = &bond_link_ops;
6328 
6329 	res = register_netdevice(bond_dev);
6330 	if (res < 0) {
6331 		free_netdev(bond_dev);
6332 		goto out;
6333 	}
6334 
6335 	netif_carrier_off(bond_dev);
6336 
6337 	bond_work_init_all(bond);
6338 
6339 out:
6340 	rtnl_unlock();
6341 	return res;
6342 }
6343 
6344 static int __net_init bond_net_init(struct net *net)
6345 {
6346 	struct bond_net *bn = net_generic(net, bond_net_id);
6347 
6348 	bn->net = net;
6349 	INIT_LIST_HEAD(&bn->dev_list);
6350 
6351 	bond_create_proc_dir(bn);
6352 	bond_create_sysfs(bn);
6353 
6354 	return 0;
6355 }
6356 
6357 static void __net_exit bond_net_exit_batch(struct list_head *net_list)
6358 {
6359 	struct bond_net *bn;
6360 	struct net *net;
6361 	LIST_HEAD(list);
6362 
6363 	list_for_each_entry(net, net_list, exit_list) {
6364 		bn = net_generic(net, bond_net_id);
6365 		bond_destroy_sysfs(bn);
6366 	}
6367 
6368 	/* Kill off any bonds created after unregistering bond rtnl ops */
6369 	rtnl_lock();
6370 	list_for_each_entry(net, net_list, exit_list) {
6371 		struct bonding *bond, *tmp_bond;
6372 
6373 		bn = net_generic(net, bond_net_id);
6374 		list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
6375 			unregister_netdevice_queue(bond->dev, &list);
6376 	}
6377 	unregister_netdevice_many(&list);
6378 	rtnl_unlock();
6379 
6380 	list_for_each_entry(net, net_list, exit_list) {
6381 		bn = net_generic(net, bond_net_id);
6382 		bond_destroy_proc_dir(bn);
6383 	}
6384 }
6385 
6386 static struct pernet_operations bond_net_ops = {
6387 	.init = bond_net_init,
6388 	.exit_batch = bond_net_exit_batch,
6389 	.id   = &bond_net_id,
6390 	.size = sizeof(struct bond_net),
6391 };
6392 
6393 static int __init bonding_init(void)
6394 {
6395 	int i;
6396 	int res;
6397 
6398 	res = bond_check_params(&bonding_defaults);
6399 	if (res)
6400 		goto out;
6401 
6402 	res = register_pernet_subsys(&bond_net_ops);
6403 	if (res)
6404 		goto out;
6405 
6406 	res = bond_netlink_init();
6407 	if (res)
6408 		goto err_link;
6409 
6410 	bond_create_debugfs();
6411 
6412 	for (i = 0; i < max_bonds; i++) {
6413 		res = bond_create(&init_net, NULL);
6414 		if (res)
6415 			goto err;
6416 	}
6417 
6418 	skb_flow_dissector_init(&flow_keys_bonding,
6419 				flow_keys_bonding_keys,
6420 				ARRAY_SIZE(flow_keys_bonding_keys));
6421 
6422 	register_netdevice_notifier(&bond_netdev_notifier);
6423 out:
6424 	return res;
6425 err:
6426 	bond_destroy_debugfs();
6427 	bond_netlink_fini();
6428 err_link:
6429 	unregister_pernet_subsys(&bond_net_ops);
6430 	goto out;
6431 
6432 }
6433 
6434 static void __exit bonding_exit(void)
6435 {
6436 	unregister_netdevice_notifier(&bond_netdev_notifier);
6437 
6438 	bond_destroy_debugfs();
6439 
6440 	bond_netlink_fini();
6441 	unregister_pernet_subsys(&bond_net_ops);
6442 
6443 #ifdef CONFIG_NET_POLL_CONTROLLER
6444 	/* Make sure we don't have an imbalance on our netpoll blocking */
6445 	WARN_ON(atomic_read(&netpoll_block_tx));
6446 #endif
6447 }
6448 
6449 module_init(bonding_init);
6450 module_exit(bonding_exit);
6451 MODULE_LICENSE("GPL");
6452 MODULE_DESCRIPTION(DRV_DESCRIPTION);
6453 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
6454