xref: /openbmc/linux/drivers/net/bonding/bond_main.c (revision 6523d3b2)
1 /*
2  * originally based on the dummy device.
3  *
4  * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
5  * Licensed under the GPL. Based on dummy.c, and eql.c devices.
6  *
7  * bonding.c: an Ethernet Bonding driver
8  *
9  * This is useful to talk to a Cisco EtherChannel compatible equipment:
10  *	Cisco 5500
11  *	Sun Trunking (Solaris)
12  *	Alteon AceDirector Trunks
13  *	Linux Bonding
14  *	and probably many L2 switches ...
15  *
16  * How it works:
17  *    ifconfig bond0 ipaddress netmask up
18  *      will setup a network device, with an ip address.  No mac address
19  *	will be assigned at this time.  The hw mac address will come from
20  *	the first slave bonded to the channel.  All slaves will then use
21  *	this hw mac address.
22  *
23  *    ifconfig bond0 down
24  *         will release all slaves, marking them as down.
25  *
26  *    ifenslave bond0 eth0
27  *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
28  *	a: be used as initial mac address
29  *	b: if a hw mac address already is there, eth0's hw mac address
30  *	   will then be set from bond0.
31  *
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/fcntl.h>
38 #include <linux/filter.h>
39 #include <linux/interrupt.h>
40 #include <linux/ptrace.h>
41 #include <linux/ioport.h>
42 #include <linux/in.h>
43 #include <net/ip.h>
44 #include <linux/ip.h>
45 #include <linux/icmp.h>
46 #include <linux/icmpv6.h>
47 #include <linux/tcp.h>
48 #include <linux/udp.h>
49 #include <linux/slab.h>
50 #include <linux/string.h>
51 #include <linux/init.h>
52 #include <linux/timer.h>
53 #include <linux/socket.h>
54 #include <linux/ctype.h>
55 #include <linux/inet.h>
56 #include <linux/bitops.h>
57 #include <linux/io.h>
58 #include <asm/dma.h>
59 #include <linux/uaccess.h>
60 #include <linux/errno.h>
61 #include <linux/netdevice.h>
62 #include <linux/inetdevice.h>
63 #include <linux/igmp.h>
64 #include <linux/etherdevice.h>
65 #include <linux/skbuff.h>
66 #include <net/sock.h>
67 #include <linux/rtnetlink.h>
68 #include <linux/smp.h>
69 #include <linux/if_ether.h>
70 #include <net/arp.h>
71 #include <linux/mii.h>
72 #include <linux/ethtool.h>
73 #include <linux/if_vlan.h>
74 #include <linux/if_bonding.h>
75 #include <linux/phy.h>
76 #include <linux/jiffies.h>
77 #include <linux/preempt.h>
78 #include <net/route.h>
79 #include <net/net_namespace.h>
80 #include <net/netns/generic.h>
81 #include <net/pkt_sched.h>
82 #include <linux/rculist.h>
83 #include <net/flow_dissector.h>
84 #include <net/xfrm.h>
85 #include <net/bonding.h>
86 #include <net/bond_3ad.h>
87 #include <net/bond_alb.h>
88 #if IS_ENABLED(CONFIG_TLS_DEVICE)
89 #include <net/tls.h>
90 #endif
91 
92 #include "bonding_priv.h"
93 
94 /*---------------------------- Module parameters ----------------------------*/
95 
96 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
97 
98 static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
99 static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
100 static int num_peer_notif = 1;
101 static int miimon;
102 static int updelay;
103 static int downdelay;
104 static int use_carrier	= 1;
105 static char *mode;
106 static char *primary;
107 static char *primary_reselect;
108 static char *lacp_rate;
109 static int min_links;
110 static char *ad_select;
111 static char *xmit_hash_policy;
112 static int arp_interval;
113 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
114 static char *arp_validate;
115 static char *arp_all_targets;
116 static char *fail_over_mac;
117 static int all_slaves_active;
118 static struct bond_params bonding_defaults;
119 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
120 static int packets_per_slave = 1;
121 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
122 
123 module_param(max_bonds, int, 0);
124 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
125 module_param(tx_queues, int, 0);
126 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
127 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
128 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
129 			       "failover event (alias of num_unsol_na)");
130 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
131 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
132 			       "failover event (alias of num_grat_arp)");
133 module_param(miimon, int, 0);
134 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
135 module_param(updelay, int, 0);
136 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
137 module_param(downdelay, int, 0);
138 MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
139 			    "in milliseconds");
140 module_param(use_carrier, int, 0);
141 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
142 			      "0 for off, 1 for on (default)");
143 module_param(mode, charp, 0);
144 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
145 		       "1 for active-backup, 2 for balance-xor, "
146 		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
147 		       "6 for balance-alb");
148 module_param(primary, charp, 0);
149 MODULE_PARM_DESC(primary, "Primary network device to use");
150 module_param(primary_reselect, charp, 0);
151 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
152 				   "once it comes up; "
153 				   "0 for always (default), "
154 				   "1 for only if speed of primary is "
155 				   "better, "
156 				   "2 for only on active slave "
157 				   "failure");
158 module_param(lacp_rate, charp, 0);
159 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
160 			    "0 for slow, 1 for fast");
161 module_param(ad_select, charp, 0);
162 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
163 			    "0 for stable (default), 1 for bandwidth, "
164 			    "2 for count");
165 module_param(min_links, int, 0);
166 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
167 
168 module_param(xmit_hash_policy, charp, 0);
169 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
170 				   "0 for layer 2 (default), 1 for layer 3+4, "
171 				   "2 for layer 2+3, 3 for encap layer 2+3, "
172 				   "4 for encap layer 3+4, 5 for vlan+srcmac");
173 module_param(arp_interval, int, 0);
174 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
175 module_param_array(arp_ip_target, charp, NULL, 0);
176 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
177 module_param(arp_validate, charp, 0);
178 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
179 			       "0 for none (default), 1 for active, "
180 			       "2 for backup, 3 for all");
181 module_param(arp_all_targets, charp, 0);
182 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
183 module_param(fail_over_mac, charp, 0);
184 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
185 				"the same MAC; 0 for none (default), "
186 				"1 for active, 2 for follow");
187 module_param(all_slaves_active, int, 0);
188 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
189 				     "by setting active flag for all slaves; "
190 				     "0 for never (default), 1 for always.");
191 module_param(resend_igmp, int, 0);
192 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
193 			      "link failure");
194 module_param(packets_per_slave, int, 0);
195 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
196 				    "mode; 0 for a random slave, 1 packet per "
197 				    "slave (default), >1 packets per slave.");
198 module_param(lp_interval, uint, 0);
199 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
200 			      "the bonding driver sends learning packets to "
201 			      "each slaves peer switch. The default is 1.");
202 
203 /*----------------------------- Global variables ----------------------------*/
204 
205 #ifdef CONFIG_NET_POLL_CONTROLLER
206 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
207 #endif
208 
209 unsigned int bond_net_id __read_mostly;
210 
211 static const struct flow_dissector_key flow_keys_bonding_keys[] = {
212 	{
213 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
214 		.offset = offsetof(struct flow_keys, control),
215 	},
216 	{
217 		.key_id = FLOW_DISSECTOR_KEY_BASIC,
218 		.offset = offsetof(struct flow_keys, basic),
219 	},
220 	{
221 		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
222 		.offset = offsetof(struct flow_keys, addrs.v4addrs),
223 	},
224 	{
225 		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
226 		.offset = offsetof(struct flow_keys, addrs.v6addrs),
227 	},
228 	{
229 		.key_id = FLOW_DISSECTOR_KEY_TIPC,
230 		.offset = offsetof(struct flow_keys, addrs.tipckey),
231 	},
232 	{
233 		.key_id = FLOW_DISSECTOR_KEY_PORTS,
234 		.offset = offsetof(struct flow_keys, ports),
235 	},
236 	{
237 		.key_id = FLOW_DISSECTOR_KEY_ICMP,
238 		.offset = offsetof(struct flow_keys, icmp),
239 	},
240 	{
241 		.key_id = FLOW_DISSECTOR_KEY_VLAN,
242 		.offset = offsetof(struct flow_keys, vlan),
243 	},
244 	{
245 		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
246 		.offset = offsetof(struct flow_keys, tags),
247 	},
248 	{
249 		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
250 		.offset = offsetof(struct flow_keys, keyid),
251 	},
252 };
253 
254 static struct flow_dissector flow_keys_bonding __read_mostly;
255 
256 /*-------------------------- Forward declarations ---------------------------*/
257 
258 static int bond_init(struct net_device *bond_dev);
259 static void bond_uninit(struct net_device *bond_dev);
260 static void bond_get_stats(struct net_device *bond_dev,
261 			   struct rtnl_link_stats64 *stats);
262 static void bond_slave_arr_handler(struct work_struct *work);
263 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
264 				  int mod);
265 static void bond_netdev_notify_work(struct work_struct *work);
266 
267 /*---------------------------- General routines -----------------------------*/
268 
269 const char *bond_mode_name(int mode)
270 {
271 	static const char *names[] = {
272 		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
273 		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
274 		[BOND_MODE_XOR] = "load balancing (xor)",
275 		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
276 		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
277 		[BOND_MODE_TLB] = "transmit load balancing",
278 		[BOND_MODE_ALB] = "adaptive load balancing",
279 	};
280 
281 	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
282 		return "unknown";
283 
284 	return names[mode];
285 }
286 
287 /**
288  * bond_dev_queue_xmit - Prepare skb for xmit.
289  *
290  * @bond: bond device that got this skb for tx.
291  * @skb: hw accel VLAN tagged skb to transmit
292  * @slave_dev: slave that is supposed to xmit this skbuff
293  */
294 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
295 			struct net_device *slave_dev)
296 {
297 	skb->dev = slave_dev;
298 
299 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
300 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
301 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
302 
303 	if (unlikely(netpoll_tx_running(bond->dev)))
304 		return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
305 
306 	return dev_queue_xmit(skb);
307 }
308 
309 bool bond_sk_check(struct bonding *bond)
310 {
311 	switch (BOND_MODE(bond)) {
312 	case BOND_MODE_8023AD:
313 	case BOND_MODE_XOR:
314 		if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
315 			return true;
316 		fallthrough;
317 	default:
318 		return false;
319 	}
320 }
321 
322 static bool bond_xdp_check(struct bonding *bond)
323 {
324 	switch (BOND_MODE(bond)) {
325 	case BOND_MODE_ROUNDROBIN:
326 	case BOND_MODE_ACTIVEBACKUP:
327 		return true;
328 	case BOND_MODE_8023AD:
329 	case BOND_MODE_XOR:
330 		/* vlan+srcmac is not supported with XDP as in most cases the 802.1q
331 		 * payload is not in the packet due to hardware offload.
332 		 */
333 		if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
334 			return true;
335 		fallthrough;
336 	default:
337 		return false;
338 	}
339 }
340 
341 /*---------------------------------- VLAN -----------------------------------*/
342 
343 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
344  * We don't protect the slave list iteration with a lock because:
345  * a. This operation is performed in IOCTL context,
346  * b. The operation is protected by the RTNL semaphore in the 8021q code,
347  * c. Holding a lock with BH disabled while directly calling a base driver
348  *    entry point is generally a BAD idea.
349  *
350  * The design of synchronization/protection for this operation in the 8021q
351  * module is good for one or more VLAN devices over a single physical device
352  * and cannot be extended for a teaming solution like bonding, so there is a
353  * potential race condition here where a net device from the vlan group might
354  * be referenced (either by a base driver or the 8021q code) while it is being
355  * removed from the system. However, it turns out we're not making matters
356  * worse, and if it works for regular VLAN usage it will work here too.
357 */
358 
359 /**
360  * bond_vlan_rx_add_vid - Propagates adding an id to slaves
361  * @bond_dev: bonding net device that got called
362  * @proto: network protocol ID
363  * @vid: vlan id being added
364  */
365 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
366 				__be16 proto, u16 vid)
367 {
368 	struct bonding *bond = netdev_priv(bond_dev);
369 	struct slave *slave, *rollback_slave;
370 	struct list_head *iter;
371 	int res;
372 
373 	bond_for_each_slave(bond, slave, iter) {
374 		res = vlan_vid_add(slave->dev, proto, vid);
375 		if (res)
376 			goto unwind;
377 	}
378 
379 	return 0;
380 
381 unwind:
382 	/* unwind to the slave that failed */
383 	bond_for_each_slave(bond, rollback_slave, iter) {
384 		if (rollback_slave == slave)
385 			break;
386 
387 		vlan_vid_del(rollback_slave->dev, proto, vid);
388 	}
389 
390 	return res;
391 }
392 
393 /**
394  * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
395  * @bond_dev: bonding net device that got called
396  * @proto: network protocol ID
397  * @vid: vlan id being removed
398  */
399 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
400 				 __be16 proto, u16 vid)
401 {
402 	struct bonding *bond = netdev_priv(bond_dev);
403 	struct list_head *iter;
404 	struct slave *slave;
405 
406 	bond_for_each_slave(bond, slave, iter)
407 		vlan_vid_del(slave->dev, proto, vid);
408 
409 	if (bond_is_lb(bond))
410 		bond_alb_clear_vlan(bond, vid);
411 
412 	return 0;
413 }
414 
415 /*---------------------------------- XFRM -----------------------------------*/
416 
417 #ifdef CONFIG_XFRM_OFFLOAD
418 /**
419  * bond_ipsec_add_sa - program device with a security association
420  * @xs: pointer to transformer state struct
421  **/
422 static int bond_ipsec_add_sa(struct xfrm_state *xs)
423 {
424 	struct net_device *bond_dev = xs->xso.dev;
425 	struct bond_ipsec *ipsec;
426 	struct bonding *bond;
427 	struct slave *slave;
428 	int err;
429 
430 	if (!bond_dev)
431 		return -EINVAL;
432 
433 	rcu_read_lock();
434 	bond = netdev_priv(bond_dev);
435 	slave = rcu_dereference(bond->curr_active_slave);
436 	if (!slave) {
437 		rcu_read_unlock();
438 		return -ENODEV;
439 	}
440 
441 	if (!slave->dev->xfrmdev_ops ||
442 	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
443 	    netif_is_bond_master(slave->dev)) {
444 		slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
445 		rcu_read_unlock();
446 		return -EINVAL;
447 	}
448 
449 	ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
450 	if (!ipsec) {
451 		rcu_read_unlock();
452 		return -ENOMEM;
453 	}
454 	xs->xso.real_dev = slave->dev;
455 
456 	err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
457 	if (!err) {
458 		ipsec->xs = xs;
459 		INIT_LIST_HEAD(&ipsec->list);
460 		spin_lock_bh(&bond->ipsec_lock);
461 		list_add(&ipsec->list, &bond->ipsec_list);
462 		spin_unlock_bh(&bond->ipsec_lock);
463 	} else {
464 		kfree(ipsec);
465 	}
466 	rcu_read_unlock();
467 	return err;
468 }
469 
470 static void bond_ipsec_add_sa_all(struct bonding *bond)
471 {
472 	struct net_device *bond_dev = bond->dev;
473 	struct bond_ipsec *ipsec;
474 	struct slave *slave;
475 
476 	rcu_read_lock();
477 	slave = rcu_dereference(bond->curr_active_slave);
478 	if (!slave)
479 		goto out;
480 
481 	if (!slave->dev->xfrmdev_ops ||
482 	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
483 	    netif_is_bond_master(slave->dev)) {
484 		spin_lock_bh(&bond->ipsec_lock);
485 		if (!list_empty(&bond->ipsec_list))
486 			slave_warn(bond_dev, slave->dev,
487 				   "%s: no slave xdo_dev_state_add\n",
488 				   __func__);
489 		spin_unlock_bh(&bond->ipsec_lock);
490 		goto out;
491 	}
492 
493 	spin_lock_bh(&bond->ipsec_lock);
494 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
495 		ipsec->xs->xso.real_dev = slave->dev;
496 		if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
497 			slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
498 			ipsec->xs->xso.real_dev = NULL;
499 		}
500 	}
501 	spin_unlock_bh(&bond->ipsec_lock);
502 out:
503 	rcu_read_unlock();
504 }
505 
506 /**
507  * bond_ipsec_del_sa - clear out this specific SA
508  * @xs: pointer to transformer state struct
509  **/
510 static void bond_ipsec_del_sa(struct xfrm_state *xs)
511 {
512 	struct net_device *bond_dev = xs->xso.dev;
513 	struct bond_ipsec *ipsec;
514 	struct bonding *bond;
515 	struct slave *slave;
516 
517 	if (!bond_dev)
518 		return;
519 
520 	rcu_read_lock();
521 	bond = netdev_priv(bond_dev);
522 	slave = rcu_dereference(bond->curr_active_slave);
523 
524 	if (!slave)
525 		goto out;
526 
527 	if (!xs->xso.real_dev)
528 		goto out;
529 
530 	WARN_ON(xs->xso.real_dev != slave->dev);
531 
532 	if (!slave->dev->xfrmdev_ops ||
533 	    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
534 	    netif_is_bond_master(slave->dev)) {
535 		slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
536 		goto out;
537 	}
538 
539 	slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
540 out:
541 	spin_lock_bh(&bond->ipsec_lock);
542 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
543 		if (ipsec->xs == xs) {
544 			list_del(&ipsec->list);
545 			kfree(ipsec);
546 			break;
547 		}
548 	}
549 	spin_unlock_bh(&bond->ipsec_lock);
550 	rcu_read_unlock();
551 }
552 
553 static void bond_ipsec_del_sa_all(struct bonding *bond)
554 {
555 	struct net_device *bond_dev = bond->dev;
556 	struct bond_ipsec *ipsec;
557 	struct slave *slave;
558 
559 	rcu_read_lock();
560 	slave = rcu_dereference(bond->curr_active_slave);
561 	if (!slave) {
562 		rcu_read_unlock();
563 		return;
564 	}
565 
566 	spin_lock_bh(&bond->ipsec_lock);
567 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
568 		if (!ipsec->xs->xso.real_dev)
569 			continue;
570 
571 		if (!slave->dev->xfrmdev_ops ||
572 		    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
573 		    netif_is_bond_master(slave->dev)) {
574 			slave_warn(bond_dev, slave->dev,
575 				   "%s: no slave xdo_dev_state_delete\n",
576 				   __func__);
577 		} else {
578 			slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
579 		}
580 		ipsec->xs->xso.real_dev = NULL;
581 	}
582 	spin_unlock_bh(&bond->ipsec_lock);
583 	rcu_read_unlock();
584 }
585 
586 /**
587  * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
588  * @skb: current data packet
589  * @xs: pointer to transformer state struct
590  **/
591 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
592 {
593 	struct net_device *bond_dev = xs->xso.dev;
594 	struct net_device *real_dev;
595 	struct slave *curr_active;
596 	struct bonding *bond;
597 	int err;
598 
599 	bond = netdev_priv(bond_dev);
600 	rcu_read_lock();
601 	curr_active = rcu_dereference(bond->curr_active_slave);
602 	real_dev = curr_active->dev;
603 
604 	if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
605 		err = false;
606 		goto out;
607 	}
608 
609 	if (!xs->xso.real_dev) {
610 		err = false;
611 		goto out;
612 	}
613 
614 	if (!real_dev->xfrmdev_ops ||
615 	    !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
616 	    netif_is_bond_master(real_dev)) {
617 		err = false;
618 		goto out;
619 	}
620 
621 	err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
622 out:
623 	rcu_read_unlock();
624 	return err;
625 }
626 
627 static const struct xfrmdev_ops bond_xfrmdev_ops = {
628 	.xdo_dev_state_add = bond_ipsec_add_sa,
629 	.xdo_dev_state_delete = bond_ipsec_del_sa,
630 	.xdo_dev_offload_ok = bond_ipsec_offload_ok,
631 };
632 #endif /* CONFIG_XFRM_OFFLOAD */
633 
634 /*------------------------------- Link status -------------------------------*/
635 
636 /* Set the carrier state for the master according to the state of its
637  * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
638  * do special 802.3ad magic.
639  *
640  * Returns zero if carrier state does not change, nonzero if it does.
641  */
642 int bond_set_carrier(struct bonding *bond)
643 {
644 	struct list_head *iter;
645 	struct slave *slave;
646 
647 	if (!bond_has_slaves(bond))
648 		goto down;
649 
650 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
651 		return bond_3ad_set_carrier(bond);
652 
653 	bond_for_each_slave(bond, slave, iter) {
654 		if (slave->link == BOND_LINK_UP) {
655 			if (!netif_carrier_ok(bond->dev)) {
656 				netif_carrier_on(bond->dev);
657 				return 1;
658 			}
659 			return 0;
660 		}
661 	}
662 
663 down:
664 	if (netif_carrier_ok(bond->dev)) {
665 		netif_carrier_off(bond->dev);
666 		return 1;
667 	}
668 	return 0;
669 }
670 
671 /* Get link speed and duplex from the slave's base driver
672  * using ethtool. If for some reason the call fails or the
673  * values are invalid, set speed and duplex to -1,
674  * and return. Return 1 if speed or duplex settings are
675  * UNKNOWN; 0 otherwise.
676  */
677 static int bond_update_speed_duplex(struct slave *slave)
678 {
679 	struct net_device *slave_dev = slave->dev;
680 	struct ethtool_link_ksettings ecmd;
681 	int res;
682 
683 	slave->speed = SPEED_UNKNOWN;
684 	slave->duplex = DUPLEX_UNKNOWN;
685 
686 	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
687 	if (res < 0)
688 		return 1;
689 	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
690 		return 1;
691 	switch (ecmd.base.duplex) {
692 	case DUPLEX_FULL:
693 	case DUPLEX_HALF:
694 		break;
695 	default:
696 		return 1;
697 	}
698 
699 	slave->speed = ecmd.base.speed;
700 	slave->duplex = ecmd.base.duplex;
701 
702 	return 0;
703 }
704 
705 const char *bond_slave_link_status(s8 link)
706 {
707 	switch (link) {
708 	case BOND_LINK_UP:
709 		return "up";
710 	case BOND_LINK_FAIL:
711 		return "going down";
712 	case BOND_LINK_DOWN:
713 		return "down";
714 	case BOND_LINK_BACK:
715 		return "going back";
716 	default:
717 		return "unknown";
718 	}
719 }
720 
721 /* if <dev> supports MII link status reporting, check its link status.
722  *
723  * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
724  * depending upon the setting of the use_carrier parameter.
725  *
726  * Return either BMSR_LSTATUS, meaning that the link is up (or we
727  * can't tell and just pretend it is), or 0, meaning that the link is
728  * down.
729  *
730  * If reporting is non-zero, instead of faking link up, return -1 if
731  * both ETHTOOL and MII ioctls fail (meaning the device does not
732  * support them).  If use_carrier is set, return whatever it says.
733  * It'd be nice if there was a good way to tell if a driver supports
734  * netif_carrier, but there really isn't.
735  */
736 static int bond_check_dev_link(struct bonding *bond,
737 			       struct net_device *slave_dev, int reporting)
738 {
739 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
740 	int (*ioctl)(struct net_device *, struct ifreq *, int);
741 	struct ifreq ifr;
742 	struct mii_ioctl_data *mii;
743 
744 	if (!reporting && !netif_running(slave_dev))
745 		return 0;
746 
747 	if (bond->params.use_carrier)
748 		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
749 
750 	/* Try to get link status using Ethtool first. */
751 	if (slave_dev->ethtool_ops->get_link)
752 		return slave_dev->ethtool_ops->get_link(slave_dev) ?
753 			BMSR_LSTATUS : 0;
754 
755 	/* Ethtool can't be used, fallback to MII ioctls. */
756 	ioctl = slave_ops->ndo_eth_ioctl;
757 	if (ioctl) {
758 		/* TODO: set pointer to correct ioctl on a per team member
759 		 *       bases to make this more efficient. that is, once
760 		 *       we determine the correct ioctl, we will always
761 		 *       call it and not the others for that team
762 		 *       member.
763 		 */
764 
765 		/* We cannot assume that SIOCGMIIPHY will also read a
766 		 * register; not all network drivers (e.g., e100)
767 		 * support that.
768 		 */
769 
770 		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
771 		strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
772 		mii = if_mii(&ifr);
773 		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
774 			mii->reg_num = MII_BMSR;
775 			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
776 				return mii->val_out & BMSR_LSTATUS;
777 		}
778 	}
779 
780 	/* If reporting, report that either there's no ndo_eth_ioctl,
781 	 * or both SIOCGMIIREG and get_link failed (meaning that we
782 	 * cannot report link status).  If not reporting, pretend
783 	 * we're ok.
784 	 */
785 	return reporting ? -1 : BMSR_LSTATUS;
786 }
787 
788 /*----------------------------- Multicast list ------------------------------*/
789 
790 /* Push the promiscuity flag down to appropriate slaves */
791 static int bond_set_promiscuity(struct bonding *bond, int inc)
792 {
793 	struct list_head *iter;
794 	int err = 0;
795 
796 	if (bond_uses_primary(bond)) {
797 		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
798 
799 		if (curr_active)
800 			err = dev_set_promiscuity(curr_active->dev, inc);
801 	} else {
802 		struct slave *slave;
803 
804 		bond_for_each_slave(bond, slave, iter) {
805 			err = dev_set_promiscuity(slave->dev, inc);
806 			if (err)
807 				return err;
808 		}
809 	}
810 	return err;
811 }
812 
813 /* Push the allmulti flag down to all slaves */
814 static int bond_set_allmulti(struct bonding *bond, int inc)
815 {
816 	struct list_head *iter;
817 	int err = 0;
818 
819 	if (bond_uses_primary(bond)) {
820 		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
821 
822 		if (curr_active)
823 			err = dev_set_allmulti(curr_active->dev, inc);
824 	} else {
825 		struct slave *slave;
826 
827 		bond_for_each_slave(bond, slave, iter) {
828 			err = dev_set_allmulti(slave->dev, inc);
829 			if (err)
830 				return err;
831 		}
832 	}
833 	return err;
834 }
835 
836 /* Retrieve the list of registered multicast addresses for the bonding
837  * device and retransmit an IGMP JOIN request to the current active
838  * slave.
839  */
840 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
841 {
842 	struct bonding *bond = container_of(work, struct bonding,
843 					    mcast_work.work);
844 
845 	if (!rtnl_trylock()) {
846 		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
847 		return;
848 	}
849 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
850 
851 	if (bond->igmp_retrans > 1) {
852 		bond->igmp_retrans--;
853 		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
854 	}
855 	rtnl_unlock();
856 }
857 
858 /* Flush bond's hardware addresses from slave */
859 static void bond_hw_addr_flush(struct net_device *bond_dev,
860 			       struct net_device *slave_dev)
861 {
862 	struct bonding *bond = netdev_priv(bond_dev);
863 
864 	dev_uc_unsync(slave_dev, bond_dev);
865 	dev_mc_unsync(slave_dev, bond_dev);
866 
867 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
868 		/* del lacpdu mc addr from mc list */
869 		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
870 
871 		dev_mc_del(slave_dev, lacpdu_multicast);
872 	}
873 }
874 
875 /*--------------------------- Active slave change ---------------------------*/
876 
877 /* Update the hardware address list and promisc/allmulti for the new and
878  * old active slaves (if any).  Modes that are not using primary keep all
879  * slaves up date at all times; only the modes that use primary need to call
880  * this function to swap these settings during a failover.
881  */
882 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
883 			      struct slave *old_active)
884 {
885 	if (old_active) {
886 		if (bond->dev->flags & IFF_PROMISC)
887 			dev_set_promiscuity(old_active->dev, -1);
888 
889 		if (bond->dev->flags & IFF_ALLMULTI)
890 			dev_set_allmulti(old_active->dev, -1);
891 
892 		bond_hw_addr_flush(bond->dev, old_active->dev);
893 	}
894 
895 	if (new_active) {
896 		/* FIXME: Signal errors upstream. */
897 		if (bond->dev->flags & IFF_PROMISC)
898 			dev_set_promiscuity(new_active->dev, 1);
899 
900 		if (bond->dev->flags & IFF_ALLMULTI)
901 			dev_set_allmulti(new_active->dev, 1);
902 
903 		netif_addr_lock_bh(bond->dev);
904 		dev_uc_sync(new_active->dev, bond->dev);
905 		dev_mc_sync(new_active->dev, bond->dev);
906 		netif_addr_unlock_bh(bond->dev);
907 	}
908 }
909 
910 /**
911  * bond_set_dev_addr - clone slave's address to bond
912  * @bond_dev: bond net device
913  * @slave_dev: slave net device
914  *
915  * Should be called with RTNL held.
916  */
917 static int bond_set_dev_addr(struct net_device *bond_dev,
918 			     struct net_device *slave_dev)
919 {
920 	int err;
921 
922 	slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
923 		  bond_dev, slave_dev, slave_dev->addr_len);
924 	err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
925 	if (err)
926 		return err;
927 
928 	__dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
929 	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
930 	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
931 	return 0;
932 }
933 
934 static struct slave *bond_get_old_active(struct bonding *bond,
935 					 struct slave *new_active)
936 {
937 	struct slave *slave;
938 	struct list_head *iter;
939 
940 	bond_for_each_slave(bond, slave, iter) {
941 		if (slave == new_active)
942 			continue;
943 
944 		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
945 			return slave;
946 	}
947 
948 	return NULL;
949 }
950 
951 /* bond_do_fail_over_mac
952  *
953  * Perform special MAC address swapping for fail_over_mac settings
954  *
955  * Called with RTNL
956  */
957 static void bond_do_fail_over_mac(struct bonding *bond,
958 				  struct slave *new_active,
959 				  struct slave *old_active)
960 {
961 	u8 tmp_mac[MAX_ADDR_LEN];
962 	struct sockaddr_storage ss;
963 	int rv;
964 
965 	switch (bond->params.fail_over_mac) {
966 	case BOND_FOM_ACTIVE:
967 		if (new_active) {
968 			rv = bond_set_dev_addr(bond->dev, new_active->dev);
969 			if (rv)
970 				slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
971 					  -rv);
972 		}
973 		break;
974 	case BOND_FOM_FOLLOW:
975 		/* if new_active && old_active, swap them
976 		 * if just old_active, do nothing (going to no active slave)
977 		 * if just new_active, set new_active to bond's MAC
978 		 */
979 		if (!new_active)
980 			return;
981 
982 		if (!old_active)
983 			old_active = bond_get_old_active(bond, new_active);
984 
985 		if (old_active) {
986 			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
987 					  new_active->dev->addr_len);
988 			bond_hw_addr_copy(ss.__data,
989 					  old_active->dev->dev_addr,
990 					  old_active->dev->addr_len);
991 			ss.ss_family = new_active->dev->type;
992 		} else {
993 			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
994 					  bond->dev->addr_len);
995 			ss.ss_family = bond->dev->type;
996 		}
997 
998 		rv = dev_set_mac_address(new_active->dev,
999 					 (struct sockaddr *)&ss, NULL);
1000 		if (rv) {
1001 			slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
1002 				  -rv);
1003 			goto out;
1004 		}
1005 
1006 		if (!old_active)
1007 			goto out;
1008 
1009 		bond_hw_addr_copy(ss.__data, tmp_mac,
1010 				  new_active->dev->addr_len);
1011 		ss.ss_family = old_active->dev->type;
1012 
1013 		rv = dev_set_mac_address(old_active->dev,
1014 					 (struct sockaddr *)&ss, NULL);
1015 		if (rv)
1016 			slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
1017 				  -rv);
1018 out:
1019 		break;
1020 	default:
1021 		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
1022 			   bond->params.fail_over_mac);
1023 		break;
1024 	}
1025 
1026 }
1027 
1028 static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1029 {
1030 	struct slave *prim = rtnl_dereference(bond->primary_slave);
1031 	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1032 
1033 	if (!prim || prim->link != BOND_LINK_UP) {
1034 		if (!curr || curr->link != BOND_LINK_UP)
1035 			return NULL;
1036 		return curr;
1037 	}
1038 
1039 	if (bond->force_primary) {
1040 		bond->force_primary = false;
1041 		return prim;
1042 	}
1043 
1044 	if (!curr || curr->link != BOND_LINK_UP)
1045 		return prim;
1046 
1047 	/* At this point, prim and curr are both up */
1048 	switch (bond->params.primary_reselect) {
1049 	case BOND_PRI_RESELECT_ALWAYS:
1050 		return prim;
1051 	case BOND_PRI_RESELECT_BETTER:
1052 		if (prim->speed < curr->speed)
1053 			return curr;
1054 		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
1055 			return curr;
1056 		return prim;
1057 	case BOND_PRI_RESELECT_FAILURE:
1058 		return curr;
1059 	default:
1060 		netdev_err(bond->dev, "impossible primary_reselect %d\n",
1061 			   bond->params.primary_reselect);
1062 		return curr;
1063 	}
1064 }
1065 
1066 /**
1067  * bond_find_best_slave - select the best available slave to be the active one
1068  * @bond: our bonding struct
1069  */
1070 static struct slave *bond_find_best_slave(struct bonding *bond)
1071 {
1072 	struct slave *slave, *bestslave = NULL;
1073 	struct list_head *iter;
1074 	int mintime = bond->params.updelay;
1075 
1076 	slave = bond_choose_primary_or_current(bond);
1077 	if (slave)
1078 		return slave;
1079 
1080 	bond_for_each_slave(bond, slave, iter) {
1081 		if (slave->link == BOND_LINK_UP)
1082 			return slave;
1083 		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1084 		    slave->delay < mintime) {
1085 			mintime = slave->delay;
1086 			bestslave = slave;
1087 		}
1088 	}
1089 
1090 	return bestslave;
1091 }
1092 
1093 static bool bond_should_notify_peers(struct bonding *bond)
1094 {
1095 	struct slave *slave;
1096 
1097 	rcu_read_lock();
1098 	slave = rcu_dereference(bond->curr_active_slave);
1099 	rcu_read_unlock();
1100 
1101 	if (!slave || !bond->send_peer_notif ||
1102 	    bond->send_peer_notif %
1103 	    max(1, bond->params.peer_notif_delay) != 0 ||
1104 	    !netif_carrier_ok(bond->dev) ||
1105 	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1106 		return false;
1107 
1108 	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
1109 		   slave ? slave->dev->name : "NULL");
1110 
1111 	return true;
1112 }
1113 
1114 /**
1115  * bond_change_active_slave - change the active slave into the specified one
1116  * @bond: our bonding struct
1117  * @new_active: the new slave to make the active one
1118  *
1119  * Set the new slave to the bond's settings and unset them on the old
1120  * curr_active_slave.
1121  * Setting include flags, mc-list, promiscuity, allmulti, etc.
1122  *
1123  * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
1124  * because it is apparently the best available slave we have, even though its
1125  * updelay hasn't timed out yet.
1126  *
1127  * Caller must hold RTNL.
1128  */
1129 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1130 {
1131 	struct slave *old_active;
1132 
1133 	ASSERT_RTNL();
1134 
1135 	old_active = rtnl_dereference(bond->curr_active_slave);
1136 
1137 	if (old_active == new_active)
1138 		return;
1139 
1140 #ifdef CONFIG_XFRM_OFFLOAD
1141 	bond_ipsec_del_sa_all(bond);
1142 #endif /* CONFIG_XFRM_OFFLOAD */
1143 
1144 	if (new_active) {
1145 		new_active->last_link_up = jiffies;
1146 
1147 		if (new_active->link == BOND_LINK_BACK) {
1148 			if (bond_uses_primary(bond)) {
1149 				slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1150 					   (bond->params.updelay - new_active->delay) * bond->params.miimon);
1151 			}
1152 
1153 			new_active->delay = 0;
1154 			bond_set_slave_link_state(new_active, BOND_LINK_UP,
1155 						  BOND_SLAVE_NOTIFY_NOW);
1156 
1157 			if (BOND_MODE(bond) == BOND_MODE_8023AD)
1158 				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1159 
1160 			if (bond_is_lb(bond))
1161 				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1162 		} else {
1163 			if (bond_uses_primary(bond))
1164 				slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
1165 		}
1166 	}
1167 
1168 	if (bond_uses_primary(bond))
1169 		bond_hw_addr_swap(bond, new_active, old_active);
1170 
1171 	if (bond_is_lb(bond)) {
1172 		bond_alb_handle_active_change(bond, new_active);
1173 		if (old_active)
1174 			bond_set_slave_inactive_flags(old_active,
1175 						      BOND_SLAVE_NOTIFY_NOW);
1176 		if (new_active)
1177 			bond_set_slave_active_flags(new_active,
1178 						    BOND_SLAVE_NOTIFY_NOW);
1179 	} else {
1180 		rcu_assign_pointer(bond->curr_active_slave, new_active);
1181 	}
1182 
1183 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1184 		if (old_active)
1185 			bond_set_slave_inactive_flags(old_active,
1186 						      BOND_SLAVE_NOTIFY_NOW);
1187 
1188 		if (new_active) {
1189 			bool should_notify_peers = false;
1190 
1191 			bond_set_slave_active_flags(new_active,
1192 						    BOND_SLAVE_NOTIFY_NOW);
1193 
1194 			if (bond->params.fail_over_mac)
1195 				bond_do_fail_over_mac(bond, new_active,
1196 						      old_active);
1197 
1198 			if (netif_running(bond->dev)) {
1199 				bond->send_peer_notif =
1200 					bond->params.num_peer_notif *
1201 					max(1, bond->params.peer_notif_delay);
1202 				should_notify_peers =
1203 					bond_should_notify_peers(bond);
1204 			}
1205 
1206 			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1207 			if (should_notify_peers) {
1208 				bond->send_peer_notif--;
1209 				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1210 							 bond->dev);
1211 			}
1212 		}
1213 	}
1214 
1215 #ifdef CONFIG_XFRM_OFFLOAD
1216 	bond_ipsec_add_sa_all(bond);
1217 #endif /* CONFIG_XFRM_OFFLOAD */
1218 
1219 	/* resend IGMP joins since active slave has changed or
1220 	 * all were sent on curr_active_slave.
1221 	 * resend only if bond is brought up with the affected
1222 	 * bonding modes and the retransmission is enabled
1223 	 */
1224 	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1225 	    ((bond_uses_primary(bond) && new_active) ||
1226 	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1227 		bond->igmp_retrans = bond->params.resend_igmp;
1228 		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1229 	}
1230 }
1231 
1232 /**
1233  * bond_select_active_slave - select a new active slave, if needed
1234  * @bond: our bonding struct
1235  *
1236  * This functions should be called when one of the following occurs:
1237  * - The old curr_active_slave has been released or lost its link.
1238  * - The primary_slave has got its link back.
1239  * - A slave has got its link back and there's no old curr_active_slave.
1240  *
1241  * Caller must hold RTNL.
1242  */
1243 void bond_select_active_slave(struct bonding *bond)
1244 {
1245 	struct slave *best_slave;
1246 	int rv;
1247 
1248 	ASSERT_RTNL();
1249 
1250 	best_slave = bond_find_best_slave(bond);
1251 	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1252 		bond_change_active_slave(bond, best_slave);
1253 		rv = bond_set_carrier(bond);
1254 		if (!rv)
1255 			return;
1256 
1257 		if (netif_carrier_ok(bond->dev))
1258 			netdev_info(bond->dev, "active interface up!\n");
1259 		else
1260 			netdev_info(bond->dev, "now running without any active interface!\n");
1261 	}
1262 }
1263 
1264 #ifdef CONFIG_NET_POLL_CONTROLLER
1265 static inline int slave_enable_netpoll(struct slave *slave)
1266 {
1267 	struct netpoll *np;
1268 	int err = 0;
1269 
1270 	np = kzalloc(sizeof(*np), GFP_KERNEL);
1271 	err = -ENOMEM;
1272 	if (!np)
1273 		goto out;
1274 
1275 	err = __netpoll_setup(np, slave->dev);
1276 	if (err) {
1277 		kfree(np);
1278 		goto out;
1279 	}
1280 	slave->np = np;
1281 out:
1282 	return err;
1283 }
1284 static inline void slave_disable_netpoll(struct slave *slave)
1285 {
1286 	struct netpoll *np = slave->np;
1287 
1288 	if (!np)
1289 		return;
1290 
1291 	slave->np = NULL;
1292 
1293 	__netpoll_free(np);
1294 }
1295 
1296 static void bond_poll_controller(struct net_device *bond_dev)
1297 {
1298 	struct bonding *bond = netdev_priv(bond_dev);
1299 	struct slave *slave = NULL;
1300 	struct list_head *iter;
1301 	struct ad_info ad_info;
1302 
1303 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1304 		if (bond_3ad_get_active_agg_info(bond, &ad_info))
1305 			return;
1306 
1307 	bond_for_each_slave_rcu(bond, slave, iter) {
1308 		if (!bond_slave_is_up(slave))
1309 			continue;
1310 
1311 		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1312 			struct aggregator *agg =
1313 			    SLAVE_AD_INFO(slave)->port.aggregator;
1314 
1315 			if (agg &&
1316 			    agg->aggregator_identifier != ad_info.aggregator_id)
1317 				continue;
1318 		}
1319 
1320 		netpoll_poll_dev(slave->dev);
1321 	}
1322 }
1323 
1324 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1325 {
1326 	struct bonding *bond = netdev_priv(bond_dev);
1327 	struct list_head *iter;
1328 	struct slave *slave;
1329 
1330 	bond_for_each_slave(bond, slave, iter)
1331 		if (bond_slave_is_up(slave))
1332 			slave_disable_netpoll(slave);
1333 }
1334 
1335 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1336 {
1337 	struct bonding *bond = netdev_priv(dev);
1338 	struct list_head *iter;
1339 	struct slave *slave;
1340 	int err = 0;
1341 
1342 	bond_for_each_slave(bond, slave, iter) {
1343 		err = slave_enable_netpoll(slave);
1344 		if (err) {
1345 			bond_netpoll_cleanup(dev);
1346 			break;
1347 		}
1348 	}
1349 	return err;
1350 }
1351 #else
1352 static inline int slave_enable_netpoll(struct slave *slave)
1353 {
1354 	return 0;
1355 }
1356 static inline void slave_disable_netpoll(struct slave *slave)
1357 {
1358 }
1359 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1360 {
1361 }
1362 #endif
1363 
1364 /*---------------------------------- IOCTL ----------------------------------*/
1365 
1366 static netdev_features_t bond_fix_features(struct net_device *dev,
1367 					   netdev_features_t features)
1368 {
1369 	struct bonding *bond = netdev_priv(dev);
1370 	struct list_head *iter;
1371 	netdev_features_t mask;
1372 	struct slave *slave;
1373 
1374 #if IS_ENABLED(CONFIG_TLS_DEVICE)
1375 	if (bond_sk_check(bond))
1376 		features |= BOND_TLS_FEATURES;
1377 	else
1378 		features &= ~BOND_TLS_FEATURES;
1379 #endif
1380 
1381 	mask = features;
1382 
1383 	features &= ~NETIF_F_ONE_FOR_ALL;
1384 	features |= NETIF_F_ALL_FOR_ALL;
1385 
1386 	bond_for_each_slave(bond, slave, iter) {
1387 		features = netdev_increment_features(features,
1388 						     slave->dev->features,
1389 						     mask);
1390 	}
1391 	features = netdev_add_tso_features(features, mask);
1392 
1393 	return features;
1394 }
1395 
1396 #define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1397 				 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1398 				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1399 
1400 #define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1401 				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1402 
1403 #define BOND_MPLS_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1404 				 NETIF_F_GSO_SOFTWARE)
1405 
1406 
1407 static void bond_compute_features(struct bonding *bond)
1408 {
1409 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1410 					IFF_XMIT_DST_RELEASE_PERM;
1411 	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1412 	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1413 #ifdef CONFIG_XFRM_OFFLOAD
1414 	netdev_features_t xfrm_features  = BOND_XFRM_FEATURES;
1415 #endif /* CONFIG_XFRM_OFFLOAD */
1416 	netdev_features_t mpls_features  = BOND_MPLS_FEATURES;
1417 	struct net_device *bond_dev = bond->dev;
1418 	struct list_head *iter;
1419 	struct slave *slave;
1420 	unsigned short max_hard_header_len = ETH_HLEN;
1421 	unsigned int gso_max_size = GSO_MAX_SIZE;
1422 	u16 gso_max_segs = GSO_MAX_SEGS;
1423 
1424 	if (!bond_has_slaves(bond))
1425 		goto done;
1426 	vlan_features &= NETIF_F_ALL_FOR_ALL;
1427 	mpls_features &= NETIF_F_ALL_FOR_ALL;
1428 
1429 	bond_for_each_slave(bond, slave, iter) {
1430 		vlan_features = netdev_increment_features(vlan_features,
1431 			slave->dev->vlan_features, BOND_VLAN_FEATURES);
1432 
1433 		enc_features = netdev_increment_features(enc_features,
1434 							 slave->dev->hw_enc_features,
1435 							 BOND_ENC_FEATURES);
1436 
1437 #ifdef CONFIG_XFRM_OFFLOAD
1438 		xfrm_features = netdev_increment_features(xfrm_features,
1439 							  slave->dev->hw_enc_features,
1440 							  BOND_XFRM_FEATURES);
1441 #endif /* CONFIG_XFRM_OFFLOAD */
1442 
1443 		mpls_features = netdev_increment_features(mpls_features,
1444 							  slave->dev->mpls_features,
1445 							  BOND_MPLS_FEATURES);
1446 
1447 		dst_release_flag &= slave->dev->priv_flags;
1448 		if (slave->dev->hard_header_len > max_hard_header_len)
1449 			max_hard_header_len = slave->dev->hard_header_len;
1450 
1451 		gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
1452 		gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1453 	}
1454 	bond_dev->hard_header_len = max_hard_header_len;
1455 
1456 done:
1457 	bond_dev->vlan_features = vlan_features;
1458 	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1459 				    NETIF_F_HW_VLAN_CTAG_TX |
1460 				    NETIF_F_HW_VLAN_STAG_TX;
1461 #ifdef CONFIG_XFRM_OFFLOAD
1462 	bond_dev->hw_enc_features |= xfrm_features;
1463 #endif /* CONFIG_XFRM_OFFLOAD */
1464 	bond_dev->mpls_features = mpls_features;
1465 	netif_set_gso_max_segs(bond_dev, gso_max_segs);
1466 	netif_set_gso_max_size(bond_dev, gso_max_size);
1467 
1468 	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1469 	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1470 	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1471 		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1472 
1473 	netdev_change_features(bond_dev);
1474 }
1475 
1476 static void bond_setup_by_slave(struct net_device *bond_dev,
1477 				struct net_device *slave_dev)
1478 {
1479 	bond_dev->header_ops	    = slave_dev->header_ops;
1480 
1481 	bond_dev->type		    = slave_dev->type;
1482 	bond_dev->hard_header_len   = slave_dev->hard_header_len;
1483 	bond_dev->needed_headroom   = slave_dev->needed_headroom;
1484 	bond_dev->addr_len	    = slave_dev->addr_len;
1485 
1486 	memcpy(bond_dev->broadcast, slave_dev->broadcast,
1487 		slave_dev->addr_len);
1488 }
1489 
1490 /* On bonding slaves other than the currently active slave, suppress
1491  * duplicates except for alb non-mcast/bcast.
1492  */
1493 static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1494 					    struct slave *slave,
1495 					    struct bonding *bond)
1496 {
1497 	if (bond_is_slave_inactive(slave)) {
1498 		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1499 		    skb->pkt_type != PACKET_BROADCAST &&
1500 		    skb->pkt_type != PACKET_MULTICAST)
1501 			return false;
1502 		return true;
1503 	}
1504 	return false;
1505 }
1506 
1507 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1508 {
1509 	struct sk_buff *skb = *pskb;
1510 	struct slave *slave;
1511 	struct bonding *bond;
1512 	int (*recv_probe)(const struct sk_buff *, struct bonding *,
1513 			  struct slave *);
1514 	int ret = RX_HANDLER_ANOTHER;
1515 
1516 	skb = skb_share_check(skb, GFP_ATOMIC);
1517 	if (unlikely(!skb))
1518 		return RX_HANDLER_CONSUMED;
1519 
1520 	*pskb = skb;
1521 
1522 	slave = bond_slave_get_rcu(skb->dev);
1523 	bond = slave->bond;
1524 
1525 	recv_probe = READ_ONCE(bond->recv_probe);
1526 	if (recv_probe) {
1527 		ret = recv_probe(skb, bond, slave);
1528 		if (ret == RX_HANDLER_CONSUMED) {
1529 			consume_skb(skb);
1530 			return ret;
1531 		}
1532 	}
1533 
1534 	/*
1535 	 * For packets determined by bond_should_deliver_exact_match() call to
1536 	 * be suppressed we want to make an exception for link-local packets.
1537 	 * This is necessary for e.g. LLDP daemons to be able to monitor
1538 	 * inactive slave links without being forced to bind to them
1539 	 * explicitly.
1540 	 *
1541 	 * At the same time, packets that are passed to the bonding master
1542 	 * (including link-local ones) can have their originating interface
1543 	 * determined via PACKET_ORIGDEV socket option.
1544 	 */
1545 	if (bond_should_deliver_exact_match(skb, slave, bond)) {
1546 		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1547 			return RX_HANDLER_PASS;
1548 		return RX_HANDLER_EXACT;
1549 	}
1550 
1551 	skb->dev = bond->dev;
1552 
1553 	if (BOND_MODE(bond) == BOND_MODE_ALB &&
1554 	    netif_is_bridge_port(bond->dev) &&
1555 	    skb->pkt_type == PACKET_HOST) {
1556 
1557 		if (unlikely(skb_cow_head(skb,
1558 					  skb->data - skb_mac_header(skb)))) {
1559 			kfree_skb(skb);
1560 			return RX_HANDLER_CONSUMED;
1561 		}
1562 		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1563 				  bond->dev->addr_len);
1564 	}
1565 
1566 	return ret;
1567 }
1568 
1569 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1570 {
1571 	switch (BOND_MODE(bond)) {
1572 	case BOND_MODE_ROUNDROBIN:
1573 		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1574 	case BOND_MODE_ACTIVEBACKUP:
1575 		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1576 	case BOND_MODE_BROADCAST:
1577 		return NETDEV_LAG_TX_TYPE_BROADCAST;
1578 	case BOND_MODE_XOR:
1579 	case BOND_MODE_8023AD:
1580 		return NETDEV_LAG_TX_TYPE_HASH;
1581 	default:
1582 		return NETDEV_LAG_TX_TYPE_UNKNOWN;
1583 	}
1584 }
1585 
1586 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1587 					       enum netdev_lag_tx_type type)
1588 {
1589 	if (type != NETDEV_LAG_TX_TYPE_HASH)
1590 		return NETDEV_LAG_HASH_NONE;
1591 
1592 	switch (bond->params.xmit_policy) {
1593 	case BOND_XMIT_POLICY_LAYER2:
1594 		return NETDEV_LAG_HASH_L2;
1595 	case BOND_XMIT_POLICY_LAYER34:
1596 		return NETDEV_LAG_HASH_L34;
1597 	case BOND_XMIT_POLICY_LAYER23:
1598 		return NETDEV_LAG_HASH_L23;
1599 	case BOND_XMIT_POLICY_ENCAP23:
1600 		return NETDEV_LAG_HASH_E23;
1601 	case BOND_XMIT_POLICY_ENCAP34:
1602 		return NETDEV_LAG_HASH_E34;
1603 	case BOND_XMIT_POLICY_VLAN_SRCMAC:
1604 		return NETDEV_LAG_HASH_VLAN_SRCMAC;
1605 	default:
1606 		return NETDEV_LAG_HASH_UNKNOWN;
1607 	}
1608 }
1609 
1610 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1611 				      struct netlink_ext_ack *extack)
1612 {
1613 	struct netdev_lag_upper_info lag_upper_info;
1614 	enum netdev_lag_tx_type type;
1615 
1616 	type = bond_lag_tx_type(bond);
1617 	lag_upper_info.tx_type = type;
1618 	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1619 
1620 	return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1621 					    &lag_upper_info, extack);
1622 }
1623 
1624 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1625 {
1626 	netdev_upper_dev_unlink(slave->dev, bond->dev);
1627 	slave->dev->flags &= ~IFF_SLAVE;
1628 }
1629 
1630 static void slave_kobj_release(struct kobject *kobj)
1631 {
1632 	struct slave *slave = to_slave(kobj);
1633 	struct bonding *bond = bond_get_bond_by_slave(slave);
1634 
1635 	cancel_delayed_work_sync(&slave->notify_work);
1636 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1637 		kfree(SLAVE_AD_INFO(slave));
1638 
1639 	kfree(slave);
1640 }
1641 
1642 static struct kobj_type slave_ktype = {
1643 	.release = slave_kobj_release,
1644 #ifdef CONFIG_SYSFS
1645 	.sysfs_ops = &slave_sysfs_ops,
1646 #endif
1647 };
1648 
1649 static int bond_kobj_init(struct slave *slave)
1650 {
1651 	int err;
1652 
1653 	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1654 				   &(slave->dev->dev.kobj), "bonding_slave");
1655 	if (err)
1656 		kobject_put(&slave->kobj);
1657 
1658 	return err;
1659 }
1660 
1661 static struct slave *bond_alloc_slave(struct bonding *bond,
1662 				      struct net_device *slave_dev)
1663 {
1664 	struct slave *slave = NULL;
1665 
1666 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1667 	if (!slave)
1668 		return NULL;
1669 
1670 	slave->bond = bond;
1671 	slave->dev = slave_dev;
1672 	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1673 
1674 	if (bond_kobj_init(slave))
1675 		return NULL;
1676 
1677 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1678 		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1679 					       GFP_KERNEL);
1680 		if (!SLAVE_AD_INFO(slave)) {
1681 			kobject_put(&slave->kobj);
1682 			return NULL;
1683 		}
1684 	}
1685 
1686 	return slave;
1687 }
1688 
1689 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1690 {
1691 	info->bond_mode = BOND_MODE(bond);
1692 	info->miimon = bond->params.miimon;
1693 	info->num_slaves = bond->slave_cnt;
1694 }
1695 
1696 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1697 {
1698 	strcpy(info->slave_name, slave->dev->name);
1699 	info->link = slave->link;
1700 	info->state = bond_slave_state(slave);
1701 	info->link_failure_count = slave->link_failure_count;
1702 }
1703 
1704 static void bond_netdev_notify_work(struct work_struct *_work)
1705 {
1706 	struct slave *slave = container_of(_work, struct slave,
1707 					   notify_work.work);
1708 
1709 	if (rtnl_trylock()) {
1710 		struct netdev_bonding_info binfo;
1711 
1712 		bond_fill_ifslave(slave, &binfo.slave);
1713 		bond_fill_ifbond(slave->bond, &binfo.master);
1714 		netdev_bonding_info_change(slave->dev, &binfo);
1715 		rtnl_unlock();
1716 	} else {
1717 		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1718 	}
1719 }
1720 
1721 void bond_queue_slave_event(struct slave *slave)
1722 {
1723 	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1724 }
1725 
1726 void bond_lower_state_changed(struct slave *slave)
1727 {
1728 	struct netdev_lag_lower_state_info info;
1729 
1730 	info.link_up = slave->link == BOND_LINK_UP ||
1731 		       slave->link == BOND_LINK_FAIL;
1732 	info.tx_enabled = bond_is_active_slave(slave);
1733 	netdev_lower_state_changed(slave->dev, &info);
1734 }
1735 
1736 #define BOND_NL_ERR(bond_dev, extack, errmsg) do {		\
1737 	if (extack)						\
1738 		NL_SET_ERR_MSG(extack, errmsg);			\
1739 	else							\
1740 		netdev_err(bond_dev, "Error: %s\n", errmsg);	\
1741 } while (0)
1742 
1743 #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do {		\
1744 	if (extack)							\
1745 		NL_SET_ERR_MSG(extack, errmsg);				\
1746 	else								\
1747 		slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg);	\
1748 } while (0)
1749 
1750 /* enslave device <slave> to bond device <master> */
1751 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1752 		 struct netlink_ext_ack *extack)
1753 {
1754 	struct bonding *bond = netdev_priv(bond_dev);
1755 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1756 	struct slave *new_slave = NULL, *prev_slave;
1757 	struct sockaddr_storage ss;
1758 	int link_reporting;
1759 	int res = 0, i;
1760 
1761 	if (slave_dev->flags & IFF_MASTER &&
1762 	    !netif_is_bond_master(slave_dev)) {
1763 		BOND_NL_ERR(bond_dev, extack,
1764 			    "Device type (master device) cannot be enslaved");
1765 		return -EPERM;
1766 	}
1767 
1768 	if (!bond->params.use_carrier &&
1769 	    slave_dev->ethtool_ops->get_link == NULL &&
1770 	    slave_ops->ndo_eth_ioctl == NULL) {
1771 		slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1772 	}
1773 
1774 	/* already in-use? */
1775 	if (netdev_is_rx_handler_busy(slave_dev)) {
1776 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1777 			     "Device is in use and cannot be enslaved");
1778 		return -EBUSY;
1779 	}
1780 
1781 	if (bond_dev == slave_dev) {
1782 		BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
1783 		return -EPERM;
1784 	}
1785 
1786 	/* vlan challenged mutual exclusion */
1787 	/* no need to lock since we're protected by rtnl_lock */
1788 	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1789 		slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1790 		if (vlan_uses_dev(bond_dev)) {
1791 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1792 				     "Can not enslave VLAN challenged device to VLAN enabled bond");
1793 			return -EPERM;
1794 		} else {
1795 			slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1796 		}
1797 	} else {
1798 		slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1799 	}
1800 
1801 	if (slave_dev->features & NETIF_F_HW_ESP)
1802 		slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1803 
1804 	/* Old ifenslave binaries are no longer supported.  These can
1805 	 * be identified with moderate accuracy by the state of the slave:
1806 	 * the current ifenslave will set the interface down prior to
1807 	 * enslaving it; the old ifenslave will not.
1808 	 */
1809 	if (slave_dev->flags & IFF_UP) {
1810 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1811 			     "Device can not be enslaved while up");
1812 		return -EPERM;
1813 	}
1814 
1815 	/* set bonding device ether type by slave - bonding netdevices are
1816 	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1817 	 * there is a need to override some of the type dependent attribs/funcs.
1818 	 *
1819 	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1820 	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1821 	 */
1822 	if (!bond_has_slaves(bond)) {
1823 		if (bond_dev->type != slave_dev->type) {
1824 			slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1825 				  bond_dev->type, slave_dev->type);
1826 
1827 			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1828 						       bond_dev);
1829 			res = notifier_to_errno(res);
1830 			if (res) {
1831 				slave_err(bond_dev, slave_dev, "refused to change device type\n");
1832 				return -EBUSY;
1833 			}
1834 
1835 			/* Flush unicast and multicast addresses */
1836 			dev_uc_flush(bond_dev);
1837 			dev_mc_flush(bond_dev);
1838 
1839 			if (slave_dev->type != ARPHRD_ETHER)
1840 				bond_setup_by_slave(bond_dev, slave_dev);
1841 			else {
1842 				ether_setup(bond_dev);
1843 				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1844 			}
1845 
1846 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1847 						 bond_dev);
1848 		}
1849 	} else if (bond_dev->type != slave_dev->type) {
1850 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1851 			     "Device type is different from other slaves");
1852 		return -EINVAL;
1853 	}
1854 
1855 	if (slave_dev->type == ARPHRD_INFINIBAND &&
1856 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1857 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1858 			     "Only active-backup mode is supported for infiniband slaves");
1859 		res = -EOPNOTSUPP;
1860 		goto err_undo_flags;
1861 	}
1862 
1863 	if (!slave_ops->ndo_set_mac_address ||
1864 	    slave_dev->type == ARPHRD_INFINIBAND) {
1865 		slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1866 		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1867 		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1868 			if (!bond_has_slaves(bond)) {
1869 				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1870 				slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1871 			} else {
1872 				SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1873 					     "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1874 				res = -EOPNOTSUPP;
1875 				goto err_undo_flags;
1876 			}
1877 		}
1878 	}
1879 
1880 	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1881 
1882 	/* If this is the first slave, then we need to set the master's hardware
1883 	 * address to be the same as the slave's.
1884 	 */
1885 	if (!bond_has_slaves(bond) &&
1886 	    bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1887 		res = bond_set_dev_addr(bond->dev, slave_dev);
1888 		if (res)
1889 			goto err_undo_flags;
1890 	}
1891 
1892 	new_slave = bond_alloc_slave(bond, slave_dev);
1893 	if (!new_slave) {
1894 		res = -ENOMEM;
1895 		goto err_undo_flags;
1896 	}
1897 
1898 	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1899 	 * is set via sysfs or module option if desired.
1900 	 */
1901 	new_slave->queue_id = 0;
1902 
1903 	/* Save slave's original mtu and then set it to match the bond */
1904 	new_slave->original_mtu = slave_dev->mtu;
1905 	res = dev_set_mtu(slave_dev, bond->dev->mtu);
1906 	if (res) {
1907 		slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1908 		goto err_free;
1909 	}
1910 
1911 	/* Save slave's original ("permanent") mac address for modes
1912 	 * that need it, and for restoring it upon release, and then
1913 	 * set it to the master's address
1914 	 */
1915 	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1916 			  slave_dev->addr_len);
1917 
1918 	if (!bond->params.fail_over_mac ||
1919 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1920 		/* Set slave to master's mac address.  The application already
1921 		 * set the master's mac address to that of the first slave
1922 		 */
1923 		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1924 		ss.ss_family = slave_dev->type;
1925 		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1926 					  extack);
1927 		if (res) {
1928 			slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1929 			goto err_restore_mtu;
1930 		}
1931 	}
1932 
1933 	/* set slave flag before open to prevent IPv6 addrconf */
1934 	slave_dev->flags |= IFF_SLAVE;
1935 
1936 	/* open the slave since the application closed it */
1937 	res = dev_open(slave_dev, extack);
1938 	if (res) {
1939 		slave_err(bond_dev, slave_dev, "Opening slave failed\n");
1940 		goto err_restore_mac;
1941 	}
1942 
1943 	slave_dev->priv_flags |= IFF_BONDING;
1944 	/* initialize slave stats */
1945 	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
1946 
1947 	if (bond_is_lb(bond)) {
1948 		/* bond_alb_init_slave() must be called before all other stages since
1949 		 * it might fail and we do not want to have to undo everything
1950 		 */
1951 		res = bond_alb_init_slave(bond, new_slave);
1952 		if (res)
1953 			goto err_close;
1954 	}
1955 
1956 	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1957 	if (res) {
1958 		slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
1959 		goto err_close;
1960 	}
1961 
1962 	prev_slave = bond_last_slave(bond);
1963 
1964 	new_slave->delay = 0;
1965 	new_slave->link_failure_count = 0;
1966 
1967 	if (bond_update_speed_duplex(new_slave) &&
1968 	    bond_needs_speed_duplex(bond))
1969 		new_slave->link = BOND_LINK_DOWN;
1970 
1971 	new_slave->last_rx = jiffies -
1972 		(msecs_to_jiffies(bond->params.arp_interval) + 1);
1973 	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1974 		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1975 
1976 	if (bond->params.miimon && !bond->params.use_carrier) {
1977 		link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1978 
1979 		if ((link_reporting == -1) && !bond->params.arp_interval) {
1980 			/* miimon is set but a bonded network driver
1981 			 * does not support ETHTOOL/MII and
1982 			 * arp_interval is not set.  Note: if
1983 			 * use_carrier is enabled, we will never go
1984 			 * here (because netif_carrier is always
1985 			 * supported); thus, we don't need to change
1986 			 * the messages for netif_carrier.
1987 			 */
1988 			slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
1989 		} else if (link_reporting == -1) {
1990 			/* unable get link status using mii/ethtool */
1991 			slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
1992 		}
1993 	}
1994 
1995 	/* check for initial state */
1996 	new_slave->link = BOND_LINK_NOCHANGE;
1997 	if (bond->params.miimon) {
1998 		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1999 			if (bond->params.updelay) {
2000 				bond_set_slave_link_state(new_slave,
2001 							  BOND_LINK_BACK,
2002 							  BOND_SLAVE_NOTIFY_NOW);
2003 				new_slave->delay = bond->params.updelay;
2004 			} else {
2005 				bond_set_slave_link_state(new_slave,
2006 							  BOND_LINK_UP,
2007 							  BOND_SLAVE_NOTIFY_NOW);
2008 			}
2009 		} else {
2010 			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
2011 						  BOND_SLAVE_NOTIFY_NOW);
2012 		}
2013 	} else if (bond->params.arp_interval) {
2014 		bond_set_slave_link_state(new_slave,
2015 					  (netif_carrier_ok(slave_dev) ?
2016 					  BOND_LINK_UP : BOND_LINK_DOWN),
2017 					  BOND_SLAVE_NOTIFY_NOW);
2018 	} else {
2019 		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
2020 					  BOND_SLAVE_NOTIFY_NOW);
2021 	}
2022 
2023 	if (new_slave->link != BOND_LINK_DOWN)
2024 		new_slave->last_link_up = jiffies;
2025 	slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
2026 		  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
2027 		  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
2028 
2029 	if (bond_uses_primary(bond) && bond->params.primary[0]) {
2030 		/* if there is a primary slave, remember it */
2031 		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2032 			rcu_assign_pointer(bond->primary_slave, new_slave);
2033 			bond->force_primary = true;
2034 		}
2035 	}
2036 
2037 	switch (BOND_MODE(bond)) {
2038 	case BOND_MODE_ACTIVEBACKUP:
2039 		bond_set_slave_inactive_flags(new_slave,
2040 					      BOND_SLAVE_NOTIFY_NOW);
2041 		break;
2042 	case BOND_MODE_8023AD:
2043 		/* in 802.3ad mode, the internal mechanism
2044 		 * will activate the slaves in the selected
2045 		 * aggregator
2046 		 */
2047 		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2048 		/* if this is the first slave */
2049 		if (!prev_slave) {
2050 			SLAVE_AD_INFO(new_slave)->id = 1;
2051 			/* Initialize AD with the number of times that the AD timer is called in 1 second
2052 			 * can be called only after the mac address of the bond is set
2053 			 */
2054 			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
2055 		} else {
2056 			SLAVE_AD_INFO(new_slave)->id =
2057 				SLAVE_AD_INFO(prev_slave)->id + 1;
2058 		}
2059 
2060 		bond_3ad_bind_slave(new_slave);
2061 		break;
2062 	case BOND_MODE_TLB:
2063 	case BOND_MODE_ALB:
2064 		bond_set_active_slave(new_slave);
2065 		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2066 		break;
2067 	default:
2068 		slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
2069 
2070 		/* always active in trunk mode */
2071 		bond_set_active_slave(new_slave);
2072 
2073 		/* In trunking mode there is little meaning to curr_active_slave
2074 		 * anyway (it holds no special properties of the bond device),
2075 		 * so we can change it without calling change_active_interface()
2076 		 */
2077 		if (!rcu_access_pointer(bond->curr_active_slave) &&
2078 		    new_slave->link == BOND_LINK_UP)
2079 			rcu_assign_pointer(bond->curr_active_slave, new_slave);
2080 
2081 		break;
2082 	} /* switch(bond_mode) */
2083 
2084 #ifdef CONFIG_NET_POLL_CONTROLLER
2085 	if (bond->dev->npinfo) {
2086 		if (slave_enable_netpoll(new_slave)) {
2087 			slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2088 			res = -EBUSY;
2089 			goto err_detach;
2090 		}
2091 	}
2092 #endif
2093 
2094 	if (!(bond_dev->features & NETIF_F_LRO))
2095 		dev_disable_lro(slave_dev);
2096 
2097 	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
2098 					 new_slave);
2099 	if (res) {
2100 		slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2101 		goto err_detach;
2102 	}
2103 
2104 	res = bond_master_upper_dev_link(bond, new_slave, extack);
2105 	if (res) {
2106 		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2107 		goto err_unregister;
2108 	}
2109 
2110 	bond_lower_state_changed(new_slave);
2111 
2112 	res = bond_sysfs_slave_add(new_slave);
2113 	if (res) {
2114 		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2115 		goto err_upper_unlink;
2116 	}
2117 
2118 	/* If the mode uses primary, then the following is handled by
2119 	 * bond_change_active_slave().
2120 	 */
2121 	if (!bond_uses_primary(bond)) {
2122 		/* set promiscuity level to new slave */
2123 		if (bond_dev->flags & IFF_PROMISC) {
2124 			res = dev_set_promiscuity(slave_dev, 1);
2125 			if (res)
2126 				goto err_sysfs_del;
2127 		}
2128 
2129 		/* set allmulti level to new slave */
2130 		if (bond_dev->flags & IFF_ALLMULTI) {
2131 			res = dev_set_allmulti(slave_dev, 1);
2132 			if (res) {
2133 				if (bond_dev->flags & IFF_PROMISC)
2134 					dev_set_promiscuity(slave_dev, -1);
2135 				goto err_sysfs_del;
2136 			}
2137 		}
2138 
2139 		netif_addr_lock_bh(bond_dev);
2140 		dev_mc_sync_multiple(slave_dev, bond_dev);
2141 		dev_uc_sync_multiple(slave_dev, bond_dev);
2142 		netif_addr_unlock_bh(bond_dev);
2143 
2144 		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2145 			/* add lacpdu mc addr to mc list */
2146 			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
2147 
2148 			dev_mc_add(slave_dev, lacpdu_multicast);
2149 		}
2150 	}
2151 
2152 	bond->slave_cnt++;
2153 	bond_compute_features(bond);
2154 	bond_set_carrier(bond);
2155 
2156 	if (bond_uses_primary(bond)) {
2157 		block_netpoll_tx();
2158 		bond_select_active_slave(bond);
2159 		unblock_netpoll_tx();
2160 	}
2161 
2162 	if (bond_mode_can_use_xmit_hash(bond))
2163 		bond_update_slave_arr(bond, NULL);
2164 
2165 
2166 	if (!slave_dev->netdev_ops->ndo_bpf ||
2167 	    !slave_dev->netdev_ops->ndo_xdp_xmit) {
2168 		if (bond->xdp_prog) {
2169 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2170 				     "Slave does not support XDP");
2171 			res = -EOPNOTSUPP;
2172 			goto err_sysfs_del;
2173 		}
2174 	} else if (bond->xdp_prog) {
2175 		struct netdev_bpf xdp = {
2176 			.command = XDP_SETUP_PROG,
2177 			.flags   = 0,
2178 			.prog    = bond->xdp_prog,
2179 			.extack  = extack,
2180 		};
2181 
2182 		if (dev_xdp_prog_count(slave_dev) > 0) {
2183 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2184 				     "Slave has XDP program loaded, please unload before enslaving");
2185 			res = -EOPNOTSUPP;
2186 			goto err_sysfs_del;
2187 		}
2188 
2189 		res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
2190 		if (res < 0) {
2191 			/* ndo_bpf() sets extack error message */
2192 			slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
2193 			goto err_sysfs_del;
2194 		}
2195 		if (bond->xdp_prog)
2196 			bpf_prog_inc(bond->xdp_prog);
2197 	}
2198 
2199 	slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2200 		   bond_is_active_slave(new_slave) ? "an active" : "a backup",
2201 		   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2202 
2203 	/* enslave is successful */
2204 	bond_queue_slave_event(new_slave);
2205 	return 0;
2206 
2207 /* Undo stages on error */
2208 err_sysfs_del:
2209 	bond_sysfs_slave_del(new_slave);
2210 
2211 err_upper_unlink:
2212 	bond_upper_dev_unlink(bond, new_slave);
2213 
2214 err_unregister:
2215 	netdev_rx_handler_unregister(slave_dev);
2216 
2217 err_detach:
2218 	vlan_vids_del_by_dev(slave_dev, bond_dev);
2219 	if (rcu_access_pointer(bond->primary_slave) == new_slave)
2220 		RCU_INIT_POINTER(bond->primary_slave, NULL);
2221 	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2222 		block_netpoll_tx();
2223 		bond_change_active_slave(bond, NULL);
2224 		bond_select_active_slave(bond);
2225 		unblock_netpoll_tx();
2226 	}
2227 	/* either primary_slave or curr_active_slave might've changed */
2228 	synchronize_rcu();
2229 	slave_disable_netpoll(new_slave);
2230 
2231 err_close:
2232 	if (!netif_is_bond_master(slave_dev))
2233 		slave_dev->priv_flags &= ~IFF_BONDING;
2234 	dev_close(slave_dev);
2235 
2236 err_restore_mac:
2237 	slave_dev->flags &= ~IFF_SLAVE;
2238 	if (!bond->params.fail_over_mac ||
2239 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2240 		/* XXX TODO - fom follow mode needs to change master's
2241 		 * MAC if this slave's MAC is in use by the bond, or at
2242 		 * least print a warning.
2243 		 */
2244 		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2245 				  new_slave->dev->addr_len);
2246 		ss.ss_family = slave_dev->type;
2247 		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2248 	}
2249 
2250 err_restore_mtu:
2251 	dev_set_mtu(slave_dev, new_slave->original_mtu);
2252 
2253 err_free:
2254 	kobject_put(&new_slave->kobj);
2255 
2256 err_undo_flags:
2257 	/* Enslave of first slave has failed and we need to fix master's mac */
2258 	if (!bond_has_slaves(bond)) {
2259 		if (ether_addr_equal_64bits(bond_dev->dev_addr,
2260 					    slave_dev->dev_addr))
2261 			eth_hw_addr_random(bond_dev);
2262 		if (bond_dev->type != ARPHRD_ETHER) {
2263 			dev_close(bond_dev);
2264 			ether_setup(bond_dev);
2265 			bond_dev->flags |= IFF_MASTER;
2266 			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2267 		}
2268 	}
2269 
2270 	return res;
2271 }
2272 
2273 /* Try to release the slave device <slave> from the bond device <master>
2274  * It is legal to access curr_active_slave without a lock because all the function
2275  * is RTNL-locked. If "all" is true it means that the function is being called
2276  * while destroying a bond interface and all slaves are being released.
2277  *
2278  * The rules for slave state should be:
2279  *   for Active/Backup:
2280  *     Active stays on all backups go down
2281  *   for Bonded connections:
2282  *     The first up interface should be left on and all others downed.
2283  */
2284 static int __bond_release_one(struct net_device *bond_dev,
2285 			      struct net_device *slave_dev,
2286 			      bool all, bool unregister)
2287 {
2288 	struct bonding *bond = netdev_priv(bond_dev);
2289 	struct slave *slave, *oldcurrent;
2290 	struct sockaddr_storage ss;
2291 	int old_flags = bond_dev->flags;
2292 	netdev_features_t old_features = bond_dev->features;
2293 
2294 	/* slave is not a slave or master is not master of this slave */
2295 	if (!(slave_dev->flags & IFF_SLAVE) ||
2296 	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
2297 		slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2298 		return -EINVAL;
2299 	}
2300 
2301 	block_netpoll_tx();
2302 
2303 	slave = bond_get_slave_by_dev(bond, slave_dev);
2304 	if (!slave) {
2305 		/* not a slave of this bond */
2306 		slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2307 		unblock_netpoll_tx();
2308 		return -EINVAL;
2309 	}
2310 
2311 	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2312 
2313 	bond_sysfs_slave_del(slave);
2314 
2315 	/* recompute stats just before removing the slave */
2316 	bond_get_stats(bond->dev, &bond->bond_stats);
2317 
2318 	if (bond->xdp_prog) {
2319 		struct netdev_bpf xdp = {
2320 			.command = XDP_SETUP_PROG,
2321 			.flags   = 0,
2322 			.prog	 = NULL,
2323 			.extack  = NULL,
2324 		};
2325 		if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
2326 			slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
2327 	}
2328 
2329 	/* unregister rx_handler early so bond_handle_frame wouldn't be called
2330 	 * for this slave anymore.
2331 	 */
2332 	netdev_rx_handler_unregister(slave_dev);
2333 
2334 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
2335 		bond_3ad_unbind_slave(slave);
2336 
2337 	bond_upper_dev_unlink(bond, slave);
2338 
2339 	if (bond_mode_can_use_xmit_hash(bond))
2340 		bond_update_slave_arr(bond, slave);
2341 
2342 	slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2343 		    bond_is_active_slave(slave) ? "active" : "backup");
2344 
2345 	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2346 
2347 	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2348 
2349 	if (!all && (!bond->params.fail_over_mac ||
2350 		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2351 		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2352 		    bond_has_slaves(bond))
2353 			slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2354 				   slave->perm_hwaddr);
2355 	}
2356 
2357 	if (rtnl_dereference(bond->primary_slave) == slave)
2358 		RCU_INIT_POINTER(bond->primary_slave, NULL);
2359 
2360 	if (oldcurrent == slave)
2361 		bond_change_active_slave(bond, NULL);
2362 
2363 	if (bond_is_lb(bond)) {
2364 		/* Must be called only after the slave has been
2365 		 * detached from the list and the curr_active_slave
2366 		 * has been cleared (if our_slave == old_current),
2367 		 * but before a new active slave is selected.
2368 		 */
2369 		bond_alb_deinit_slave(bond, slave);
2370 	}
2371 
2372 	if (all) {
2373 		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2374 	} else if (oldcurrent == slave) {
2375 		/* Note that we hold RTNL over this sequence, so there
2376 		 * is no concern that another slave add/remove event
2377 		 * will interfere.
2378 		 */
2379 		bond_select_active_slave(bond);
2380 	}
2381 
2382 	if (!bond_has_slaves(bond)) {
2383 		bond_set_carrier(bond);
2384 		eth_hw_addr_random(bond_dev);
2385 	}
2386 
2387 	unblock_netpoll_tx();
2388 	synchronize_rcu();
2389 	bond->slave_cnt--;
2390 
2391 	if (!bond_has_slaves(bond)) {
2392 		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2393 		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2394 	}
2395 
2396 	bond_compute_features(bond);
2397 	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2398 	    (old_features & NETIF_F_VLAN_CHALLENGED))
2399 		slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2400 
2401 	vlan_vids_del_by_dev(slave_dev, bond_dev);
2402 
2403 	/* If the mode uses primary, then this case was handled above by
2404 	 * bond_change_active_slave(..., NULL)
2405 	 */
2406 	if (!bond_uses_primary(bond)) {
2407 		/* unset promiscuity level from slave
2408 		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2409 		 * of the IFF_PROMISC flag in the bond_dev, but we need the
2410 		 * value of that flag before that change, as that was the value
2411 		 * when this slave was attached, so we cache at the start of the
2412 		 * function and use it here. Same goes for ALLMULTI below
2413 		 */
2414 		if (old_flags & IFF_PROMISC)
2415 			dev_set_promiscuity(slave_dev, -1);
2416 
2417 		/* unset allmulti level from slave */
2418 		if (old_flags & IFF_ALLMULTI)
2419 			dev_set_allmulti(slave_dev, -1);
2420 
2421 		bond_hw_addr_flush(bond_dev, slave_dev);
2422 	}
2423 
2424 	slave_disable_netpoll(slave);
2425 
2426 	/* close slave before restoring its mac address */
2427 	dev_close(slave_dev);
2428 
2429 	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2430 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2431 		/* restore original ("permanent") mac address */
2432 		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2433 				  slave->dev->addr_len);
2434 		ss.ss_family = slave_dev->type;
2435 		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2436 	}
2437 
2438 	if (unregister)
2439 		__dev_set_mtu(slave_dev, slave->original_mtu);
2440 	else
2441 		dev_set_mtu(slave_dev, slave->original_mtu);
2442 
2443 	if (!netif_is_bond_master(slave_dev))
2444 		slave_dev->priv_flags &= ~IFF_BONDING;
2445 
2446 	kobject_put(&slave->kobj);
2447 
2448 	return 0;
2449 }
2450 
2451 /* A wrapper used because of ndo_del_link */
2452 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2453 {
2454 	return __bond_release_one(bond_dev, slave_dev, false, false);
2455 }
2456 
2457 /* First release a slave and then destroy the bond if no more slaves are left.
2458  * Must be under rtnl_lock when this function is called.
2459  */
2460 static int bond_release_and_destroy(struct net_device *bond_dev,
2461 				    struct net_device *slave_dev)
2462 {
2463 	struct bonding *bond = netdev_priv(bond_dev);
2464 	int ret;
2465 
2466 	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2467 	if (ret == 0 && !bond_has_slaves(bond) &&
2468 	    bond_dev->reg_state != NETREG_UNREGISTERING) {
2469 		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2470 		netdev_info(bond_dev, "Destroying bond\n");
2471 		bond_remove_proc_entry(bond);
2472 		unregister_netdevice(bond_dev);
2473 	}
2474 	return ret;
2475 }
2476 
2477 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2478 {
2479 	struct bonding *bond = netdev_priv(bond_dev);
2480 
2481 	bond_fill_ifbond(bond, info);
2482 }
2483 
2484 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2485 {
2486 	struct bonding *bond = netdev_priv(bond_dev);
2487 	struct list_head *iter;
2488 	int i = 0, res = -ENODEV;
2489 	struct slave *slave;
2490 
2491 	bond_for_each_slave(bond, slave, iter) {
2492 		if (i++ == (int)info->slave_id) {
2493 			res = 0;
2494 			bond_fill_ifslave(slave, info);
2495 			break;
2496 		}
2497 	}
2498 
2499 	return res;
2500 }
2501 
2502 /*-------------------------------- Monitoring -------------------------------*/
2503 
2504 /* called with rcu_read_lock() */
2505 static int bond_miimon_inspect(struct bonding *bond)
2506 {
2507 	int link_state, commit = 0;
2508 	struct list_head *iter;
2509 	struct slave *slave;
2510 	bool ignore_updelay;
2511 
2512 	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2513 
2514 	bond_for_each_slave_rcu(bond, slave, iter) {
2515 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2516 
2517 		link_state = bond_check_dev_link(bond, slave->dev, 0);
2518 
2519 		switch (slave->link) {
2520 		case BOND_LINK_UP:
2521 			if (link_state)
2522 				continue;
2523 
2524 			bond_propose_link_state(slave, BOND_LINK_FAIL);
2525 			commit++;
2526 			slave->delay = bond->params.downdelay;
2527 			if (slave->delay) {
2528 				slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2529 					   (BOND_MODE(bond) ==
2530 					    BOND_MODE_ACTIVEBACKUP) ?
2531 					    (bond_is_active_slave(slave) ?
2532 					     "active " : "backup ") : "",
2533 					   bond->params.downdelay * bond->params.miimon);
2534 			}
2535 			fallthrough;
2536 		case BOND_LINK_FAIL:
2537 			if (link_state) {
2538 				/* recovered before downdelay expired */
2539 				bond_propose_link_state(slave, BOND_LINK_UP);
2540 				slave->last_link_up = jiffies;
2541 				slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2542 					   (bond->params.downdelay - slave->delay) *
2543 					   bond->params.miimon);
2544 				commit++;
2545 				continue;
2546 			}
2547 
2548 			if (slave->delay <= 0) {
2549 				bond_propose_link_state(slave, BOND_LINK_DOWN);
2550 				commit++;
2551 				continue;
2552 			}
2553 
2554 			slave->delay--;
2555 			break;
2556 
2557 		case BOND_LINK_DOWN:
2558 			if (!link_state)
2559 				continue;
2560 
2561 			bond_propose_link_state(slave, BOND_LINK_BACK);
2562 			commit++;
2563 			slave->delay = bond->params.updelay;
2564 
2565 			if (slave->delay) {
2566 				slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2567 					   ignore_updelay ? 0 :
2568 					   bond->params.updelay *
2569 					   bond->params.miimon);
2570 			}
2571 			fallthrough;
2572 		case BOND_LINK_BACK:
2573 			if (!link_state) {
2574 				bond_propose_link_state(slave, BOND_LINK_DOWN);
2575 				slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2576 					   (bond->params.updelay - slave->delay) *
2577 					   bond->params.miimon);
2578 				commit++;
2579 				continue;
2580 			}
2581 
2582 			if (ignore_updelay)
2583 				slave->delay = 0;
2584 
2585 			if (slave->delay <= 0) {
2586 				bond_propose_link_state(slave, BOND_LINK_UP);
2587 				commit++;
2588 				ignore_updelay = false;
2589 				continue;
2590 			}
2591 
2592 			slave->delay--;
2593 			break;
2594 		}
2595 	}
2596 
2597 	return commit;
2598 }
2599 
2600 static void bond_miimon_link_change(struct bonding *bond,
2601 				    struct slave *slave,
2602 				    char link)
2603 {
2604 	switch (BOND_MODE(bond)) {
2605 	case BOND_MODE_8023AD:
2606 		bond_3ad_handle_link_change(slave, link);
2607 		break;
2608 	case BOND_MODE_TLB:
2609 	case BOND_MODE_ALB:
2610 		bond_alb_handle_link_change(bond, slave, link);
2611 		break;
2612 	case BOND_MODE_XOR:
2613 		bond_update_slave_arr(bond, NULL);
2614 		break;
2615 	}
2616 }
2617 
2618 static void bond_miimon_commit(struct bonding *bond)
2619 {
2620 	struct list_head *iter;
2621 	struct slave *slave, *primary;
2622 
2623 	bond_for_each_slave(bond, slave, iter) {
2624 		switch (slave->link_new_state) {
2625 		case BOND_LINK_NOCHANGE:
2626 			/* For 802.3ad mode, check current slave speed and
2627 			 * duplex again in case its port was disabled after
2628 			 * invalid speed/duplex reporting but recovered before
2629 			 * link monitoring could make a decision on the actual
2630 			 * link status
2631 			 */
2632 			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2633 			    slave->link == BOND_LINK_UP)
2634 				bond_3ad_adapter_speed_duplex_changed(slave);
2635 			continue;
2636 
2637 		case BOND_LINK_UP:
2638 			if (bond_update_speed_duplex(slave) &&
2639 			    bond_needs_speed_duplex(bond)) {
2640 				slave->link = BOND_LINK_DOWN;
2641 				if (net_ratelimit())
2642 					slave_warn(bond->dev, slave->dev,
2643 						   "failed to get link speed/duplex\n");
2644 				continue;
2645 			}
2646 			bond_set_slave_link_state(slave, BOND_LINK_UP,
2647 						  BOND_SLAVE_NOTIFY_NOW);
2648 			slave->last_link_up = jiffies;
2649 
2650 			primary = rtnl_dereference(bond->primary_slave);
2651 			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2652 				/* prevent it from being the active one */
2653 				bond_set_backup_slave(slave);
2654 			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2655 				/* make it immediately active */
2656 				bond_set_active_slave(slave);
2657 			}
2658 
2659 			slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2660 				   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2661 				   slave->duplex ? "full" : "half");
2662 
2663 			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2664 
2665 			if (!bond->curr_active_slave || slave == primary)
2666 				goto do_failover;
2667 
2668 			continue;
2669 
2670 		case BOND_LINK_DOWN:
2671 			if (slave->link_failure_count < UINT_MAX)
2672 				slave->link_failure_count++;
2673 
2674 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2675 						  BOND_SLAVE_NOTIFY_NOW);
2676 
2677 			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2678 			    BOND_MODE(bond) == BOND_MODE_8023AD)
2679 				bond_set_slave_inactive_flags(slave,
2680 							      BOND_SLAVE_NOTIFY_NOW);
2681 
2682 			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2683 
2684 			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2685 
2686 			if (slave == rcu_access_pointer(bond->curr_active_slave))
2687 				goto do_failover;
2688 
2689 			continue;
2690 
2691 		default:
2692 			slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2693 				  slave->link_new_state);
2694 			bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2695 
2696 			continue;
2697 		}
2698 
2699 do_failover:
2700 		block_netpoll_tx();
2701 		bond_select_active_slave(bond);
2702 		unblock_netpoll_tx();
2703 	}
2704 
2705 	bond_set_carrier(bond);
2706 }
2707 
2708 /* bond_mii_monitor
2709  *
2710  * Really a wrapper that splits the mii monitor into two phases: an
2711  * inspection, then (if inspection indicates something needs to be done)
2712  * an acquisition of appropriate locks followed by a commit phase to
2713  * implement whatever link state changes are indicated.
2714  */
2715 static void bond_mii_monitor(struct work_struct *work)
2716 {
2717 	struct bonding *bond = container_of(work, struct bonding,
2718 					    mii_work.work);
2719 	bool should_notify_peers = false;
2720 	bool commit;
2721 	unsigned long delay;
2722 	struct slave *slave;
2723 	struct list_head *iter;
2724 
2725 	delay = msecs_to_jiffies(bond->params.miimon);
2726 
2727 	if (!bond_has_slaves(bond))
2728 		goto re_arm;
2729 
2730 	rcu_read_lock();
2731 	should_notify_peers = bond_should_notify_peers(bond);
2732 	commit = !!bond_miimon_inspect(bond);
2733 	if (bond->send_peer_notif) {
2734 		rcu_read_unlock();
2735 		if (rtnl_trylock()) {
2736 			bond->send_peer_notif--;
2737 			rtnl_unlock();
2738 		}
2739 	} else {
2740 		rcu_read_unlock();
2741 	}
2742 
2743 	if (commit) {
2744 		/* Race avoidance with bond_close cancel of workqueue */
2745 		if (!rtnl_trylock()) {
2746 			delay = 1;
2747 			should_notify_peers = false;
2748 			goto re_arm;
2749 		}
2750 
2751 		bond_for_each_slave(bond, slave, iter) {
2752 			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2753 		}
2754 		bond_miimon_commit(bond);
2755 
2756 		rtnl_unlock();	/* might sleep, hold no other locks */
2757 	}
2758 
2759 re_arm:
2760 	if (bond->params.miimon)
2761 		queue_delayed_work(bond->wq, &bond->mii_work, delay);
2762 
2763 	if (should_notify_peers) {
2764 		if (!rtnl_trylock())
2765 			return;
2766 		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2767 		rtnl_unlock();
2768 	}
2769 }
2770 
2771 static int bond_upper_dev_walk(struct net_device *upper,
2772 			       struct netdev_nested_priv *priv)
2773 {
2774 	__be32 ip = *(__be32 *)priv->data;
2775 
2776 	return ip == bond_confirm_addr(upper, 0, ip);
2777 }
2778 
2779 static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2780 {
2781 	struct netdev_nested_priv priv = {
2782 		.data = (void *)&ip,
2783 	};
2784 	bool ret = false;
2785 
2786 	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2787 		return true;
2788 
2789 	rcu_read_lock();
2790 	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2791 		ret = true;
2792 	rcu_read_unlock();
2793 
2794 	return ret;
2795 }
2796 
2797 /* We go to the (large) trouble of VLAN tagging ARP frames because
2798  * switches in VLAN mode (especially if ports are configured as
2799  * "native" to a VLAN) might not pass non-tagged frames.
2800  */
2801 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2802 			  __be32 src_ip, struct bond_vlan_tag *tags)
2803 {
2804 	struct sk_buff *skb;
2805 	struct bond_vlan_tag *outer_tag = tags;
2806 	struct net_device *slave_dev = slave->dev;
2807 	struct net_device *bond_dev = slave->bond->dev;
2808 
2809 	slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2810 		  arp_op, &dest_ip, &src_ip);
2811 
2812 	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2813 			 NULL, slave_dev->dev_addr, NULL);
2814 
2815 	if (!skb) {
2816 		net_err_ratelimited("ARP packet allocation failed\n");
2817 		return;
2818 	}
2819 
2820 	if (!tags || tags->vlan_proto == VLAN_N_VID)
2821 		goto xmit;
2822 
2823 	tags++;
2824 
2825 	/* Go through all the tags backwards and add them to the packet */
2826 	while (tags->vlan_proto != VLAN_N_VID) {
2827 		if (!tags->vlan_id) {
2828 			tags++;
2829 			continue;
2830 		}
2831 
2832 		slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2833 			  ntohs(outer_tag->vlan_proto), tags->vlan_id);
2834 		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2835 						tags->vlan_id);
2836 		if (!skb) {
2837 			net_err_ratelimited("failed to insert inner VLAN tag\n");
2838 			return;
2839 		}
2840 
2841 		tags++;
2842 	}
2843 	/* Set the outer tag */
2844 	if (outer_tag->vlan_id) {
2845 		slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2846 			  ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2847 		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2848 				       outer_tag->vlan_id);
2849 	}
2850 
2851 xmit:
2852 	arp_xmit(skb);
2853 }
2854 
2855 /* Validate the device path between the @start_dev and the @end_dev.
2856  * The path is valid if the @end_dev is reachable through device
2857  * stacking.
2858  * When the path is validated, collect any vlan information in the
2859  * path.
2860  */
2861 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2862 					      struct net_device *end_dev,
2863 					      int level)
2864 {
2865 	struct bond_vlan_tag *tags;
2866 	struct net_device *upper;
2867 	struct list_head  *iter;
2868 
2869 	if (start_dev == end_dev) {
2870 		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2871 		if (!tags)
2872 			return ERR_PTR(-ENOMEM);
2873 		tags[level].vlan_proto = VLAN_N_VID;
2874 		return tags;
2875 	}
2876 
2877 	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2878 		tags = bond_verify_device_path(upper, end_dev, level + 1);
2879 		if (IS_ERR_OR_NULL(tags)) {
2880 			if (IS_ERR(tags))
2881 				return tags;
2882 			continue;
2883 		}
2884 		if (is_vlan_dev(upper)) {
2885 			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2886 			tags[level].vlan_id = vlan_dev_vlan_id(upper);
2887 		}
2888 
2889 		return tags;
2890 	}
2891 
2892 	return NULL;
2893 }
2894 
2895 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2896 {
2897 	struct rtable *rt;
2898 	struct bond_vlan_tag *tags;
2899 	__be32 *targets = bond->params.arp_targets, addr;
2900 	int i;
2901 
2902 	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2903 		slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
2904 			  __func__, &targets[i]);
2905 		tags = NULL;
2906 
2907 		/* Find out through which dev should the packet go */
2908 		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2909 				     RTO_ONLINK, 0);
2910 		if (IS_ERR(rt)) {
2911 			/* there's no route to target - try to send arp
2912 			 * probe to generate any traffic (arp_validate=0)
2913 			 */
2914 			if (bond->params.arp_validate)
2915 				pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2916 					     bond->dev->name,
2917 					     &targets[i]);
2918 			bond_arp_send(slave, ARPOP_REQUEST, targets[i],
2919 				      0, tags);
2920 			continue;
2921 		}
2922 
2923 		/* bond device itself */
2924 		if (rt->dst.dev == bond->dev)
2925 			goto found;
2926 
2927 		rcu_read_lock();
2928 		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2929 		rcu_read_unlock();
2930 
2931 		if (!IS_ERR_OR_NULL(tags))
2932 			goto found;
2933 
2934 		/* Not our device - skip */
2935 		slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
2936 			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2937 
2938 		ip_rt_put(rt);
2939 		continue;
2940 
2941 found:
2942 		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2943 		ip_rt_put(rt);
2944 		bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
2945 		kfree(tags);
2946 	}
2947 }
2948 
2949 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2950 {
2951 	int i;
2952 
2953 	if (!sip || !bond_has_this_ip(bond, tip)) {
2954 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
2955 			   __func__, &sip, &tip);
2956 		return;
2957 	}
2958 
2959 	i = bond_get_targets_ip(bond->params.arp_targets, sip);
2960 	if (i == -1) {
2961 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
2962 			   __func__, &sip);
2963 		return;
2964 	}
2965 	slave->last_rx = jiffies;
2966 	slave->target_last_arp_rx[i] = jiffies;
2967 }
2968 
2969 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2970 		 struct slave *slave)
2971 {
2972 	struct arphdr *arp = (struct arphdr *)skb->data;
2973 	struct slave *curr_active_slave, *curr_arp_slave;
2974 	unsigned char *arp_ptr;
2975 	__be32 sip, tip;
2976 	int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2977 	unsigned int alen;
2978 
2979 	if (!slave_do_arp_validate(bond, slave)) {
2980 		if ((slave_do_arp_validate_only(bond) && is_arp) ||
2981 		    !slave_do_arp_validate_only(bond))
2982 			slave->last_rx = jiffies;
2983 		return RX_HANDLER_ANOTHER;
2984 	} else if (!is_arp) {
2985 		return RX_HANDLER_ANOTHER;
2986 	}
2987 
2988 	alen = arp_hdr_len(bond->dev);
2989 
2990 	slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
2991 		   __func__, skb->dev->name);
2992 
2993 	if (alen > skb_headlen(skb)) {
2994 		arp = kmalloc(alen, GFP_ATOMIC);
2995 		if (!arp)
2996 			goto out_unlock;
2997 		if (skb_copy_bits(skb, 0, arp, alen) < 0)
2998 			goto out_unlock;
2999 	}
3000 
3001 	if (arp->ar_hln != bond->dev->addr_len ||
3002 	    skb->pkt_type == PACKET_OTHERHOST ||
3003 	    skb->pkt_type == PACKET_LOOPBACK ||
3004 	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
3005 	    arp->ar_pro != htons(ETH_P_IP) ||
3006 	    arp->ar_pln != 4)
3007 		goto out_unlock;
3008 
3009 	arp_ptr = (unsigned char *)(arp + 1);
3010 	arp_ptr += bond->dev->addr_len;
3011 	memcpy(&sip, arp_ptr, 4);
3012 	arp_ptr += 4 + bond->dev->addr_len;
3013 	memcpy(&tip, arp_ptr, 4);
3014 
3015 	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
3016 		  __func__, slave->dev->name, bond_slave_state(slave),
3017 		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3018 		  &sip, &tip);
3019 
3020 	curr_active_slave = rcu_dereference(bond->curr_active_slave);
3021 	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3022 
3023 	/* We 'trust' the received ARP enough to validate it if:
3024 	 *
3025 	 * (a) the slave receiving the ARP is active (which includes the
3026 	 * current ARP slave, if any), or
3027 	 *
3028 	 * (b) the receiving slave isn't active, but there is a currently
3029 	 * active slave and it received valid arp reply(s) after it became
3030 	 * the currently active slave, or
3031 	 *
3032 	 * (c) there is an ARP slave that sent an ARP during the prior ARP
3033 	 * interval, and we receive an ARP reply on any slave.  We accept
3034 	 * these because switch FDB update delays may deliver the ARP
3035 	 * reply to a slave other than the sender of the ARP request.
3036 	 *
3037 	 * Note: for (b), backup slaves are receiving the broadcast ARP
3038 	 * request, not a reply.  This request passes from the sending
3039 	 * slave through the L2 switch(es) to the receiving slave.  Since
3040 	 * this is checking the request, sip/tip are swapped for
3041 	 * validation.
3042 	 *
3043 	 * This is done to avoid endless looping when we can't reach the
3044 	 * arp_ip_target and fool ourselves with our own arp requests.
3045 	 */
3046 	if (bond_is_active_slave(slave))
3047 		bond_validate_arp(bond, slave, sip, tip);
3048 	else if (curr_active_slave &&
3049 		 time_after(slave_last_rx(bond, curr_active_slave),
3050 			    curr_active_slave->last_link_up))
3051 		bond_validate_arp(bond, slave, tip, sip);
3052 	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
3053 		 bond_time_in_interval(bond,
3054 				       dev_trans_start(curr_arp_slave->dev), 1))
3055 		bond_validate_arp(bond, slave, sip, tip);
3056 
3057 out_unlock:
3058 	if (arp != (struct arphdr *)skb->data)
3059 		kfree(arp);
3060 	return RX_HANDLER_ANOTHER;
3061 }
3062 
3063 /* function to verify if we're in the arp_interval timeslice, returns true if
3064  * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
3065  * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
3066  */
3067 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
3068 				  int mod)
3069 {
3070 	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3071 
3072 	return time_in_range(jiffies,
3073 			     last_act - delta_in_ticks,
3074 			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
3075 }
3076 
3077 /* This function is called regularly to monitor each slave's link
3078  * ensuring that traffic is being sent and received when arp monitoring
3079  * is used in load-balancing mode. if the adapter has been dormant, then an
3080  * arp is transmitted to generate traffic. see activebackup_arp_monitor for
3081  * arp monitoring in active backup mode.
3082  */
3083 static void bond_loadbalance_arp_mon(struct bonding *bond)
3084 {
3085 	struct slave *slave, *oldcurrent;
3086 	struct list_head *iter;
3087 	int do_failover = 0, slave_state_changed = 0;
3088 
3089 	if (!bond_has_slaves(bond))
3090 		goto re_arm;
3091 
3092 	rcu_read_lock();
3093 
3094 	oldcurrent = rcu_dereference(bond->curr_active_slave);
3095 	/* see if any of the previous devices are up now (i.e. they have
3096 	 * xmt and rcv traffic). the curr_active_slave does not come into
3097 	 * the picture unless it is null. also, slave->last_link_up is not
3098 	 * needed here because we send an arp on each slave and give a slave
3099 	 * as long as it needs to get the tx/rx within the delta.
3100 	 * TODO: what about up/down delay in arp mode? it wasn't here before
3101 	 *       so it can wait
3102 	 */
3103 	bond_for_each_slave_rcu(bond, slave, iter) {
3104 		unsigned long trans_start = dev_trans_start(slave->dev);
3105 
3106 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3107 
3108 		if (slave->link != BOND_LINK_UP) {
3109 			if (bond_time_in_interval(bond, trans_start, 1) &&
3110 			    bond_time_in_interval(bond, slave->last_rx, 1)) {
3111 
3112 				bond_propose_link_state(slave, BOND_LINK_UP);
3113 				slave_state_changed = 1;
3114 
3115 				/* primary_slave has no meaning in round-robin
3116 				 * mode. the window of a slave being up and
3117 				 * curr_active_slave being null after enslaving
3118 				 * is closed.
3119 				 */
3120 				if (!oldcurrent) {
3121 					slave_info(bond->dev, slave->dev, "link status definitely up\n");
3122 					do_failover = 1;
3123 				} else {
3124 					slave_info(bond->dev, slave->dev, "interface is now up\n");
3125 				}
3126 			}
3127 		} else {
3128 			/* slave->link == BOND_LINK_UP */
3129 
3130 			/* not all switches will respond to an arp request
3131 			 * when the source ip is 0, so don't take the link down
3132 			 * if we don't know our ip yet
3133 			 */
3134 			if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
3135 			    !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
3136 
3137 				bond_propose_link_state(slave, BOND_LINK_DOWN);
3138 				slave_state_changed = 1;
3139 
3140 				if (slave->link_failure_count < UINT_MAX)
3141 					slave->link_failure_count++;
3142 
3143 				slave_info(bond->dev, slave->dev, "interface is now down\n");
3144 
3145 				if (slave == oldcurrent)
3146 					do_failover = 1;
3147 			}
3148 		}
3149 
3150 		/* note: if switch is in round-robin mode, all links
3151 		 * must tx arp to ensure all links rx an arp - otherwise
3152 		 * links may oscillate or not come up at all; if switch is
3153 		 * in something like xor mode, there is nothing we can
3154 		 * do - all replies will be rx'ed on same link causing slaves
3155 		 * to be unstable during low/no traffic periods
3156 		 */
3157 		if (bond_slave_is_up(slave))
3158 			bond_arp_send_all(bond, slave);
3159 	}
3160 
3161 	rcu_read_unlock();
3162 
3163 	if (do_failover || slave_state_changed) {
3164 		if (!rtnl_trylock())
3165 			goto re_arm;
3166 
3167 		bond_for_each_slave(bond, slave, iter) {
3168 			if (slave->link_new_state != BOND_LINK_NOCHANGE)
3169 				slave->link = slave->link_new_state;
3170 		}
3171 
3172 		if (slave_state_changed) {
3173 			bond_slave_state_change(bond);
3174 			if (BOND_MODE(bond) == BOND_MODE_XOR)
3175 				bond_update_slave_arr(bond, NULL);
3176 		}
3177 		if (do_failover) {
3178 			block_netpoll_tx();
3179 			bond_select_active_slave(bond);
3180 			unblock_netpoll_tx();
3181 		}
3182 		rtnl_unlock();
3183 	}
3184 
3185 re_arm:
3186 	if (bond->params.arp_interval)
3187 		queue_delayed_work(bond->wq, &bond->arp_work,
3188 				   msecs_to_jiffies(bond->params.arp_interval));
3189 }
3190 
3191 /* Called to inspect slaves for active-backup mode ARP monitor link state
3192  * changes.  Sets proposed link state in slaves to specify what action
3193  * should take place for the slave.  Returns 0 if no changes are found, >0
3194  * if changes to link states must be committed.
3195  *
3196  * Called with rcu_read_lock held.
3197  */
3198 static int bond_ab_arp_inspect(struct bonding *bond)
3199 {
3200 	unsigned long trans_start, last_rx;
3201 	struct list_head *iter;
3202 	struct slave *slave;
3203 	int commit = 0;
3204 
3205 	bond_for_each_slave_rcu(bond, slave, iter) {
3206 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3207 		last_rx = slave_last_rx(bond, slave);
3208 
3209 		if (slave->link != BOND_LINK_UP) {
3210 			if (bond_time_in_interval(bond, last_rx, 1)) {
3211 				bond_propose_link_state(slave, BOND_LINK_UP);
3212 				commit++;
3213 			} else if (slave->link == BOND_LINK_BACK) {
3214 				bond_propose_link_state(slave, BOND_LINK_FAIL);
3215 				commit++;
3216 			}
3217 			continue;
3218 		}
3219 
3220 		/* Give slaves 2*delta after being enslaved or made
3221 		 * active.  This avoids bouncing, as the last receive
3222 		 * times need a full ARP monitor cycle to be updated.
3223 		 */
3224 		if (bond_time_in_interval(bond, slave->last_link_up, 2))
3225 			continue;
3226 
3227 		/* Backup slave is down if:
3228 		 * - No current_arp_slave AND
3229 		 * - more than (missed_max+1)*delta since last receive AND
3230 		 * - the bond has an IP address
3231 		 *
3232 		 * Note: a non-null current_arp_slave indicates
3233 		 * the curr_active_slave went down and we are
3234 		 * searching for a new one; under this condition
3235 		 * we only take the curr_active_slave down - this
3236 		 * gives each slave a chance to tx/rx traffic
3237 		 * before being taken out
3238 		 */
3239 		if (!bond_is_active_slave(slave) &&
3240 		    !rcu_access_pointer(bond->current_arp_slave) &&
3241 		    !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
3242 			bond_propose_link_state(slave, BOND_LINK_DOWN);
3243 			commit++;
3244 		}
3245 
3246 		/* Active slave is down if:
3247 		 * - more than missed_max*delta since transmitting OR
3248 		 * - (more than missed_max*delta since receive AND
3249 		 *    the bond has an IP address)
3250 		 */
3251 		trans_start = dev_trans_start(slave->dev);
3252 		if (bond_is_active_slave(slave) &&
3253 		    (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
3254 		     !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
3255 			bond_propose_link_state(slave, BOND_LINK_DOWN);
3256 			commit++;
3257 		}
3258 	}
3259 
3260 	return commit;
3261 }
3262 
3263 /* Called to commit link state changes noted by inspection step of
3264  * active-backup mode ARP monitor.
3265  *
3266  * Called with RTNL hold.
3267  */
3268 static void bond_ab_arp_commit(struct bonding *bond)
3269 {
3270 	unsigned long trans_start;
3271 	struct list_head *iter;
3272 	struct slave *slave;
3273 
3274 	bond_for_each_slave(bond, slave, iter) {
3275 		switch (slave->link_new_state) {
3276 		case BOND_LINK_NOCHANGE:
3277 			continue;
3278 
3279 		case BOND_LINK_UP:
3280 			trans_start = dev_trans_start(slave->dev);
3281 			if (rtnl_dereference(bond->curr_active_slave) != slave ||
3282 			    (!rtnl_dereference(bond->curr_active_slave) &&
3283 			     bond_time_in_interval(bond, trans_start, 1))) {
3284 				struct slave *current_arp_slave;
3285 
3286 				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3287 				bond_set_slave_link_state(slave, BOND_LINK_UP,
3288 							  BOND_SLAVE_NOTIFY_NOW);
3289 				if (current_arp_slave) {
3290 					bond_set_slave_inactive_flags(
3291 						current_arp_slave,
3292 						BOND_SLAVE_NOTIFY_NOW);
3293 					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3294 				}
3295 
3296 				slave_info(bond->dev, slave->dev, "link status definitely up\n");
3297 
3298 				if (!rtnl_dereference(bond->curr_active_slave) ||
3299 				    slave == rtnl_dereference(bond->primary_slave))
3300 					goto do_failover;
3301 
3302 			}
3303 
3304 			continue;
3305 
3306 		case BOND_LINK_DOWN:
3307 			if (slave->link_failure_count < UINT_MAX)
3308 				slave->link_failure_count++;
3309 
3310 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3311 						  BOND_SLAVE_NOTIFY_NOW);
3312 			bond_set_slave_inactive_flags(slave,
3313 						      BOND_SLAVE_NOTIFY_NOW);
3314 
3315 			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3316 
3317 			if (slave == rtnl_dereference(bond->curr_active_slave)) {
3318 				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3319 				goto do_failover;
3320 			}
3321 
3322 			continue;
3323 
3324 		case BOND_LINK_FAIL:
3325 			bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3326 						  BOND_SLAVE_NOTIFY_NOW);
3327 			bond_set_slave_inactive_flags(slave,
3328 						      BOND_SLAVE_NOTIFY_NOW);
3329 
3330 			/* A slave has just been enslaved and has become
3331 			 * the current active slave.
3332 			 */
3333 			if (rtnl_dereference(bond->curr_active_slave))
3334 				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3335 			continue;
3336 
3337 		default:
3338 			slave_err(bond->dev, slave->dev,
3339 				  "impossible: link_new_state %d on slave\n",
3340 				  slave->link_new_state);
3341 			continue;
3342 		}
3343 
3344 do_failover:
3345 		block_netpoll_tx();
3346 		bond_select_active_slave(bond);
3347 		unblock_netpoll_tx();
3348 	}
3349 
3350 	bond_set_carrier(bond);
3351 }
3352 
3353 /* Send ARP probes for active-backup mode ARP monitor.
3354  *
3355  * Called with rcu_read_lock held.
3356  */
3357 static bool bond_ab_arp_probe(struct bonding *bond)
3358 {
3359 	struct slave *slave, *before = NULL, *new_slave = NULL,
3360 		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3361 		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3362 	struct list_head *iter;
3363 	bool found = false;
3364 	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3365 
3366 	if (curr_arp_slave && curr_active_slave)
3367 		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3368 			    curr_arp_slave->dev->name,
3369 			    curr_active_slave->dev->name);
3370 
3371 	if (curr_active_slave) {
3372 		bond_arp_send_all(bond, curr_active_slave);
3373 		return should_notify_rtnl;
3374 	}
3375 
3376 	/* if we don't have a curr_active_slave, search for the next available
3377 	 * backup slave from the current_arp_slave and make it the candidate
3378 	 * for becoming the curr_active_slave
3379 	 */
3380 
3381 	if (!curr_arp_slave) {
3382 		curr_arp_slave = bond_first_slave_rcu(bond);
3383 		if (!curr_arp_slave)
3384 			return should_notify_rtnl;
3385 	}
3386 
3387 	bond_for_each_slave_rcu(bond, slave, iter) {
3388 		if (!found && !before && bond_slave_is_up(slave))
3389 			before = slave;
3390 
3391 		if (found && !new_slave && bond_slave_is_up(slave))
3392 			new_slave = slave;
3393 		/* if the link state is up at this point, we
3394 		 * mark it down - this can happen if we have
3395 		 * simultaneous link failures and
3396 		 * reselect_active_interface doesn't make this
3397 		 * one the current slave so it is still marked
3398 		 * up when it is actually down
3399 		 */
3400 		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3401 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3402 						  BOND_SLAVE_NOTIFY_LATER);
3403 			if (slave->link_failure_count < UINT_MAX)
3404 				slave->link_failure_count++;
3405 
3406 			bond_set_slave_inactive_flags(slave,
3407 						      BOND_SLAVE_NOTIFY_LATER);
3408 
3409 			slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3410 		}
3411 		if (slave == curr_arp_slave)
3412 			found = true;
3413 	}
3414 
3415 	if (!new_slave && before)
3416 		new_slave = before;
3417 
3418 	if (!new_slave)
3419 		goto check_state;
3420 
3421 	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3422 				  BOND_SLAVE_NOTIFY_LATER);
3423 	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3424 	bond_arp_send_all(bond, new_slave);
3425 	new_slave->last_link_up = jiffies;
3426 	rcu_assign_pointer(bond->current_arp_slave, new_slave);
3427 
3428 check_state:
3429 	bond_for_each_slave_rcu(bond, slave, iter) {
3430 		if (slave->should_notify || slave->should_notify_link) {
3431 			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3432 			break;
3433 		}
3434 	}
3435 	return should_notify_rtnl;
3436 }
3437 
3438 static void bond_activebackup_arp_mon(struct bonding *bond)
3439 {
3440 	bool should_notify_peers = false;
3441 	bool should_notify_rtnl = false;
3442 	int delta_in_ticks;
3443 
3444 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3445 
3446 	if (!bond_has_slaves(bond))
3447 		goto re_arm;
3448 
3449 	rcu_read_lock();
3450 
3451 	should_notify_peers = bond_should_notify_peers(bond);
3452 
3453 	if (bond_ab_arp_inspect(bond)) {
3454 		rcu_read_unlock();
3455 
3456 		/* Race avoidance with bond_close flush of workqueue */
3457 		if (!rtnl_trylock()) {
3458 			delta_in_ticks = 1;
3459 			should_notify_peers = false;
3460 			goto re_arm;
3461 		}
3462 
3463 		bond_ab_arp_commit(bond);
3464 
3465 		rtnl_unlock();
3466 		rcu_read_lock();
3467 	}
3468 
3469 	should_notify_rtnl = bond_ab_arp_probe(bond);
3470 	rcu_read_unlock();
3471 
3472 re_arm:
3473 	if (bond->params.arp_interval)
3474 		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3475 
3476 	if (should_notify_peers || should_notify_rtnl) {
3477 		if (!rtnl_trylock())
3478 			return;
3479 
3480 		if (should_notify_peers)
3481 			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3482 						 bond->dev);
3483 		if (should_notify_rtnl) {
3484 			bond_slave_state_notify(bond);
3485 			bond_slave_link_notify(bond);
3486 		}
3487 
3488 		rtnl_unlock();
3489 	}
3490 }
3491 
3492 static void bond_arp_monitor(struct work_struct *work)
3493 {
3494 	struct bonding *bond = container_of(work, struct bonding,
3495 					    arp_work.work);
3496 
3497 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3498 		bond_activebackup_arp_mon(bond);
3499 	else
3500 		bond_loadbalance_arp_mon(bond);
3501 }
3502 
3503 /*-------------------------- netdev event handling --------------------------*/
3504 
3505 /* Change device name */
3506 static int bond_event_changename(struct bonding *bond)
3507 {
3508 	bond_remove_proc_entry(bond);
3509 	bond_create_proc_entry(bond);
3510 
3511 	bond_debug_reregister(bond);
3512 
3513 	return NOTIFY_DONE;
3514 }
3515 
3516 static int bond_master_netdev_event(unsigned long event,
3517 				    struct net_device *bond_dev)
3518 {
3519 	struct bonding *event_bond = netdev_priv(bond_dev);
3520 
3521 	netdev_dbg(bond_dev, "%s called\n", __func__);
3522 
3523 	switch (event) {
3524 	case NETDEV_CHANGENAME:
3525 		return bond_event_changename(event_bond);
3526 	case NETDEV_UNREGISTER:
3527 		bond_remove_proc_entry(event_bond);
3528 #ifdef CONFIG_XFRM_OFFLOAD
3529 		xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
3530 #endif /* CONFIG_XFRM_OFFLOAD */
3531 		break;
3532 	case NETDEV_REGISTER:
3533 		bond_create_proc_entry(event_bond);
3534 		break;
3535 	default:
3536 		break;
3537 	}
3538 
3539 	return NOTIFY_DONE;
3540 }
3541 
3542 static int bond_slave_netdev_event(unsigned long event,
3543 				   struct net_device *slave_dev)
3544 {
3545 	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3546 	struct bonding *bond;
3547 	struct net_device *bond_dev;
3548 
3549 	/* A netdev event can be generated while enslaving a device
3550 	 * before netdev_rx_handler_register is called in which case
3551 	 * slave will be NULL
3552 	 */
3553 	if (!slave) {
3554 		netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3555 		return NOTIFY_DONE;
3556 	}
3557 
3558 	bond_dev = slave->bond->dev;
3559 	bond = slave->bond;
3560 	primary = rtnl_dereference(bond->primary_slave);
3561 
3562 	slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3563 
3564 	switch (event) {
3565 	case NETDEV_UNREGISTER:
3566 		if (bond_dev->type != ARPHRD_ETHER)
3567 			bond_release_and_destroy(bond_dev, slave_dev);
3568 		else
3569 			__bond_release_one(bond_dev, slave_dev, false, true);
3570 		break;
3571 	case NETDEV_UP:
3572 	case NETDEV_CHANGE:
3573 		/* For 802.3ad mode only:
3574 		 * Getting invalid Speed/Duplex values here will put slave
3575 		 * in weird state. Mark it as link-fail if the link was
3576 		 * previously up or link-down if it hasn't yet come up, and
3577 		 * let link-monitoring (miimon) set it right when correct
3578 		 * speeds/duplex are available.
3579 		 */
3580 		if (bond_update_speed_duplex(slave) &&
3581 		    BOND_MODE(bond) == BOND_MODE_8023AD) {
3582 			if (slave->last_link_up)
3583 				slave->link = BOND_LINK_FAIL;
3584 			else
3585 				slave->link = BOND_LINK_DOWN;
3586 		}
3587 
3588 		if (BOND_MODE(bond) == BOND_MODE_8023AD)
3589 			bond_3ad_adapter_speed_duplex_changed(slave);
3590 		fallthrough;
3591 	case NETDEV_DOWN:
3592 		/* Refresh slave-array if applicable!
3593 		 * If the setup does not use miimon or arpmon (mode-specific!),
3594 		 * then these events will not cause the slave-array to be
3595 		 * refreshed. This will cause xmit to use a slave that is not
3596 		 * usable. Avoid such situation by refeshing the array at these
3597 		 * events. If these (miimon/arpmon) parameters are configured
3598 		 * then array gets refreshed twice and that should be fine!
3599 		 */
3600 		if (bond_mode_can_use_xmit_hash(bond))
3601 			bond_update_slave_arr(bond, NULL);
3602 		break;
3603 	case NETDEV_CHANGEMTU:
3604 		/* TODO: Should slaves be allowed to
3605 		 * independently alter their MTU?  For
3606 		 * an active-backup bond, slaves need
3607 		 * not be the same type of device, so
3608 		 * MTUs may vary.  For other modes,
3609 		 * slaves arguably should have the
3610 		 * same MTUs. To do this, we'd need to
3611 		 * take over the slave's change_mtu
3612 		 * function for the duration of their
3613 		 * servitude.
3614 		 */
3615 		break;
3616 	case NETDEV_CHANGENAME:
3617 		/* we don't care if we don't have primary set */
3618 		if (!bond_uses_primary(bond) ||
3619 		    !bond->params.primary[0])
3620 			break;
3621 
3622 		if (slave == primary) {
3623 			/* slave's name changed - he's no longer primary */
3624 			RCU_INIT_POINTER(bond->primary_slave, NULL);
3625 		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
3626 			/* we have a new primary slave */
3627 			rcu_assign_pointer(bond->primary_slave, slave);
3628 		} else { /* we didn't change primary - exit */
3629 			break;
3630 		}
3631 
3632 		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3633 			    primary ? slave_dev->name : "none");
3634 
3635 		block_netpoll_tx();
3636 		bond_select_active_slave(bond);
3637 		unblock_netpoll_tx();
3638 		break;
3639 	case NETDEV_FEAT_CHANGE:
3640 		bond_compute_features(bond);
3641 		break;
3642 	case NETDEV_RESEND_IGMP:
3643 		/* Propagate to master device */
3644 		call_netdevice_notifiers(event, slave->bond->dev);
3645 		break;
3646 	default:
3647 		break;
3648 	}
3649 
3650 	return NOTIFY_DONE;
3651 }
3652 
3653 /* bond_netdev_event: handle netdev notifier chain events.
3654  *
3655  * This function receives events for the netdev chain.  The caller (an
3656  * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3657  * locks for us to safely manipulate the slave devices (RTNL lock,
3658  * dev_probe_lock).
3659  */
3660 static int bond_netdev_event(struct notifier_block *this,
3661 			     unsigned long event, void *ptr)
3662 {
3663 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3664 
3665 	netdev_dbg(event_dev, "%s received %s\n",
3666 		   __func__, netdev_cmd_to_name(event));
3667 
3668 	if (!(event_dev->priv_flags & IFF_BONDING))
3669 		return NOTIFY_DONE;
3670 
3671 	if (event_dev->flags & IFF_MASTER) {
3672 		int ret;
3673 
3674 		ret = bond_master_netdev_event(event, event_dev);
3675 		if (ret != NOTIFY_DONE)
3676 			return ret;
3677 	}
3678 
3679 	if (event_dev->flags & IFF_SLAVE)
3680 		return bond_slave_netdev_event(event, event_dev);
3681 
3682 	return NOTIFY_DONE;
3683 }
3684 
3685 static struct notifier_block bond_netdev_notifier = {
3686 	.notifier_call = bond_netdev_event,
3687 };
3688 
3689 /*---------------------------- Hashing Policies -----------------------------*/
3690 
3691 /* Helper to access data in a packet, with or without a backing skb.
3692  * If skb is given the data is linearized if necessary via pskb_may_pull.
3693  */
3694 static inline const void *bond_pull_data(struct sk_buff *skb,
3695 					 const void *data, int hlen, int n)
3696 {
3697 	if (likely(n <= hlen))
3698 		return data;
3699 	else if (skb && likely(pskb_may_pull(skb, n)))
3700 		return skb->head;
3701 
3702 	return NULL;
3703 }
3704 
3705 /* L2 hash helper */
3706 static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
3707 {
3708 	struct ethhdr *ep;
3709 
3710 	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
3711 	if (!data)
3712 		return 0;
3713 
3714 	ep = (struct ethhdr *)(data + mhoff);
3715 	return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
3716 }
3717 
3718 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
3719 			 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
3720 {
3721 	const struct ipv6hdr *iph6;
3722 	const struct iphdr *iph;
3723 
3724 	if (l2_proto == htons(ETH_P_IP)) {
3725 		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
3726 		if (!data)
3727 			return false;
3728 
3729 		iph = (const struct iphdr *)(data + *nhoff);
3730 		iph_to_flow_copy_v4addrs(fk, iph);
3731 		*nhoff += iph->ihl << 2;
3732 		if (!ip_is_fragment(iph))
3733 			*ip_proto = iph->protocol;
3734 	} else if (l2_proto == htons(ETH_P_IPV6)) {
3735 		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
3736 		if (!data)
3737 			return false;
3738 
3739 		iph6 = (const struct ipv6hdr *)(data + *nhoff);
3740 		iph_to_flow_copy_v6addrs(fk, iph6);
3741 		*nhoff += sizeof(*iph6);
3742 		*ip_proto = iph6->nexthdr;
3743 	} else {
3744 		return false;
3745 	}
3746 
3747 	if (l34 && *ip_proto >= 0)
3748 		fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
3749 
3750 	return true;
3751 }
3752 
3753 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
3754 {
3755 	u32 srcmac_vendor = 0, srcmac_dev = 0;
3756 	struct ethhdr *mac_hdr;
3757 	u16 vlan = 0;
3758 	int i;
3759 
3760 	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
3761 	if (!data)
3762 		return 0;
3763 	mac_hdr = (struct ethhdr *)(data + mhoff);
3764 
3765 	for (i = 0; i < 3; i++)
3766 		srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
3767 
3768 	for (i = 3; i < ETH_ALEN; i++)
3769 		srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
3770 
3771 	if (skb && skb_vlan_tag_present(skb))
3772 		vlan = skb_vlan_tag_get(skb);
3773 
3774 	return vlan ^ srcmac_vendor ^ srcmac_dev;
3775 }
3776 
3777 /* Extract the appropriate headers based on bond's xmit policy */
3778 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
3779 			      __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
3780 {
3781 	bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
3782 	int ip_proto = -1;
3783 
3784 	switch (bond->params.xmit_policy) {
3785 	case BOND_XMIT_POLICY_ENCAP23:
3786 	case BOND_XMIT_POLICY_ENCAP34:
3787 		memset(fk, 0, sizeof(*fk));
3788 		return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
3789 					  fk, data, l2_proto, nhoff, hlen, 0);
3790 	default:
3791 		break;
3792 	}
3793 
3794 	fk->ports.ports = 0;
3795 	memset(&fk->icmp, 0, sizeof(fk->icmp));
3796 	if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
3797 		return false;
3798 
3799 	/* ICMP error packets contains at least 8 bytes of the header
3800 	 * of the packet which generated the error. Use this information
3801 	 * to correlate ICMP error packets within the same flow which
3802 	 * generated the error.
3803 	 */
3804 	if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
3805 		skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
3806 		if (ip_proto == IPPROTO_ICMP) {
3807 			if (!icmp_is_err(fk->icmp.type))
3808 				return true;
3809 
3810 			nhoff += sizeof(struct icmphdr);
3811 		} else if (ip_proto == IPPROTO_ICMPV6) {
3812 			if (!icmpv6_is_err(fk->icmp.type))
3813 				return true;
3814 
3815 			nhoff += sizeof(struct icmp6hdr);
3816 		}
3817 		return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
3818 	}
3819 
3820 	return true;
3821 }
3822 
3823 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
3824 {
3825 	hash ^= (__force u32)flow_get_u32_dst(flow) ^
3826 		(__force u32)flow_get_u32_src(flow);
3827 	hash ^= (hash >> 16);
3828 	hash ^= (hash >> 8);
3829 	/* discard lowest hash bit to deal with the common even ports pattern */
3830 	return hash >> 1;
3831 }
3832 
3833 /* Generate hash based on xmit policy. If @skb is given it is used to linearize
3834  * the data as required, but this function can be used without it if the data is
3835  * known to be linear (e.g. with xdp_buff).
3836  */
3837 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
3838 			    __be16 l2_proto, int mhoff, int nhoff, int hlen)
3839 {
3840 	struct flow_keys flow;
3841 	u32 hash;
3842 
3843 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
3844 		return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
3845 
3846 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
3847 	    !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
3848 		return bond_eth_hash(skb, data, mhoff, hlen);
3849 
3850 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3851 	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
3852 		hash = bond_eth_hash(skb, data, mhoff, hlen);
3853 	} else {
3854 		if (flow.icmp.id)
3855 			memcpy(&hash, &flow.icmp, sizeof(hash));
3856 		else
3857 			memcpy(&hash, &flow.ports.ports, sizeof(hash));
3858 	}
3859 
3860 	return bond_ip_hash(hash, &flow);
3861 }
3862 
3863 /**
3864  * bond_xmit_hash - generate a hash value based on the xmit policy
3865  * @bond: bonding device
3866  * @skb: buffer to use for headers
3867  *
3868  * This function will extract the necessary headers from the skb buffer and use
3869  * them to generate a hash based on the xmit_policy set in the bonding device
3870  */
3871 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3872 {
3873 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
3874 	    skb->l4_hash)
3875 		return skb->hash;
3876 
3877 	return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
3878 				skb_mac_offset(skb), skb_network_offset(skb),
3879 				skb_headlen(skb));
3880 }
3881 
3882 /**
3883  * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
3884  * @bond: bonding device
3885  * @xdp: buffer to use for headers
3886  *
3887  * The XDP variant of bond_xmit_hash.
3888  */
3889 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
3890 {
3891 	struct ethhdr *eth;
3892 
3893 	if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
3894 		return 0;
3895 
3896 	eth = (struct ethhdr *)xdp->data;
3897 
3898 	return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
3899 				sizeof(struct ethhdr), xdp->data_end - xdp->data);
3900 }
3901 
3902 /*-------------------------- Device entry points ----------------------------*/
3903 
3904 void bond_work_init_all(struct bonding *bond)
3905 {
3906 	INIT_DELAYED_WORK(&bond->mcast_work,
3907 			  bond_resend_igmp_join_requests_delayed);
3908 	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3909 	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3910 	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3911 	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3912 	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3913 }
3914 
3915 static void bond_work_cancel_all(struct bonding *bond)
3916 {
3917 	cancel_delayed_work_sync(&bond->mii_work);
3918 	cancel_delayed_work_sync(&bond->arp_work);
3919 	cancel_delayed_work_sync(&bond->alb_work);
3920 	cancel_delayed_work_sync(&bond->ad_work);
3921 	cancel_delayed_work_sync(&bond->mcast_work);
3922 	cancel_delayed_work_sync(&bond->slave_arr_work);
3923 }
3924 
3925 static int bond_open(struct net_device *bond_dev)
3926 {
3927 	struct bonding *bond = netdev_priv(bond_dev);
3928 	struct list_head *iter;
3929 	struct slave *slave;
3930 
3931 	/* reset slave->backup and slave->inactive */
3932 	if (bond_has_slaves(bond)) {
3933 		bond_for_each_slave(bond, slave, iter) {
3934 			if (bond_uses_primary(bond) &&
3935 			    slave != rcu_access_pointer(bond->curr_active_slave)) {
3936 				bond_set_slave_inactive_flags(slave,
3937 							      BOND_SLAVE_NOTIFY_NOW);
3938 			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3939 				bond_set_slave_active_flags(slave,
3940 							    BOND_SLAVE_NOTIFY_NOW);
3941 			}
3942 		}
3943 	}
3944 
3945 	if (bond_is_lb(bond)) {
3946 		/* bond_alb_initialize must be called before the timer
3947 		 * is started.
3948 		 */
3949 		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3950 			return -ENOMEM;
3951 		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3952 			queue_delayed_work(bond->wq, &bond->alb_work, 0);
3953 	}
3954 
3955 	if (bond->params.miimon)  /* link check interval, in milliseconds. */
3956 		queue_delayed_work(bond->wq, &bond->mii_work, 0);
3957 
3958 	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
3959 		queue_delayed_work(bond->wq, &bond->arp_work, 0);
3960 		bond->recv_probe = bond_arp_rcv;
3961 	}
3962 
3963 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3964 		queue_delayed_work(bond->wq, &bond->ad_work, 0);
3965 		/* register to receive LACPDUs */
3966 		bond->recv_probe = bond_3ad_lacpdu_recv;
3967 		bond_3ad_initiate_agg_selection(bond, 1);
3968 	}
3969 
3970 	if (bond_mode_can_use_xmit_hash(bond))
3971 		bond_update_slave_arr(bond, NULL);
3972 
3973 	return 0;
3974 }
3975 
3976 static int bond_close(struct net_device *bond_dev)
3977 {
3978 	struct bonding *bond = netdev_priv(bond_dev);
3979 
3980 	bond_work_cancel_all(bond);
3981 	bond->send_peer_notif = 0;
3982 	if (bond_is_lb(bond))
3983 		bond_alb_deinitialize(bond);
3984 	bond->recv_probe = NULL;
3985 
3986 	return 0;
3987 }
3988 
3989 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but
3990  * that some drivers can provide 32bit values only.
3991  */
3992 static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3993 			    const struct rtnl_link_stats64 *_new,
3994 			    const struct rtnl_link_stats64 *_old)
3995 {
3996 	const u64 *new = (const u64 *)_new;
3997 	const u64 *old = (const u64 *)_old;
3998 	u64 *res = (u64 *)_res;
3999 	int i;
4000 
4001 	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
4002 		u64 nv = new[i];
4003 		u64 ov = old[i];
4004 		s64 delta = nv - ov;
4005 
4006 		/* detects if this particular field is 32bit only */
4007 		if (((nv | ov) >> 32) == 0)
4008 			delta = (s64)(s32)((u32)nv - (u32)ov);
4009 
4010 		/* filter anomalies, some drivers reset their stats
4011 		 * at down/up events.
4012 		 */
4013 		if (delta > 0)
4014 			res[i] += delta;
4015 	}
4016 }
4017 
4018 #ifdef CONFIG_LOCKDEP
4019 static int bond_get_lowest_level_rcu(struct net_device *dev)
4020 {
4021 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
4022 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
4023 	int cur = 0, max = 0;
4024 
4025 	now = dev;
4026 	iter = &dev->adj_list.lower;
4027 
4028 	while (1) {
4029 		next = NULL;
4030 		while (1) {
4031 			ldev = netdev_next_lower_dev_rcu(now, &iter);
4032 			if (!ldev)
4033 				break;
4034 
4035 			next = ldev;
4036 			niter = &ldev->adj_list.lower;
4037 			dev_stack[cur] = now;
4038 			iter_stack[cur++] = iter;
4039 			if (max <= cur)
4040 				max = cur;
4041 			break;
4042 		}
4043 
4044 		if (!next) {
4045 			if (!cur)
4046 				return max;
4047 			next = dev_stack[--cur];
4048 			niter = iter_stack[cur];
4049 		}
4050 
4051 		now = next;
4052 		iter = niter;
4053 	}
4054 
4055 	return max;
4056 }
4057 #endif
4058 
4059 static void bond_get_stats(struct net_device *bond_dev,
4060 			   struct rtnl_link_stats64 *stats)
4061 {
4062 	struct bonding *bond = netdev_priv(bond_dev);
4063 	struct rtnl_link_stats64 temp;
4064 	struct list_head *iter;
4065 	struct slave *slave;
4066 	int nest_level = 0;
4067 
4068 
4069 	rcu_read_lock();
4070 #ifdef CONFIG_LOCKDEP
4071 	nest_level = bond_get_lowest_level_rcu(bond_dev);
4072 #endif
4073 
4074 	spin_lock_nested(&bond->stats_lock, nest_level);
4075 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
4076 
4077 	bond_for_each_slave_rcu(bond, slave, iter) {
4078 		const struct rtnl_link_stats64 *new =
4079 			dev_get_stats(slave->dev, &temp);
4080 
4081 		bond_fold_stats(stats, new, &slave->slave_stats);
4082 
4083 		/* save off the slave stats for the next run */
4084 		memcpy(&slave->slave_stats, new, sizeof(*new));
4085 	}
4086 
4087 	memcpy(&bond->bond_stats, stats, sizeof(*stats));
4088 	spin_unlock(&bond->stats_lock);
4089 	rcu_read_unlock();
4090 }
4091 
4092 static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4093 {
4094 	struct bonding *bond = netdev_priv(bond_dev);
4095 	struct mii_ioctl_data *mii = NULL;
4096 	const struct net_device_ops *ops;
4097 	struct net_device *real_dev;
4098 	struct hwtstamp_config cfg;
4099 	struct ifreq ifrr;
4100 	int res = 0;
4101 
4102 	netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
4103 
4104 	switch (cmd) {
4105 	case SIOCGMIIPHY:
4106 		mii = if_mii(ifr);
4107 		if (!mii)
4108 			return -EINVAL;
4109 
4110 		mii->phy_id = 0;
4111 		fallthrough;
4112 	case SIOCGMIIREG:
4113 		/* We do this again just in case we were called by SIOCGMIIREG
4114 		 * instead of SIOCGMIIPHY.
4115 		 */
4116 		mii = if_mii(ifr);
4117 		if (!mii)
4118 			return -EINVAL;
4119 
4120 		if (mii->reg_num == 1) {
4121 			mii->val_out = 0;
4122 			if (netif_carrier_ok(bond->dev))
4123 				mii->val_out = BMSR_LSTATUS;
4124 		}
4125 
4126 		break;
4127 	case SIOCSHWTSTAMP:
4128 		if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4129 			return -EFAULT;
4130 
4131 		if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
4132 			return -EOPNOTSUPP;
4133 
4134 		fallthrough;
4135 	case SIOCGHWTSTAMP:
4136 		real_dev = bond_option_active_slave_get_rcu(bond);
4137 		if (!real_dev)
4138 			return -EOPNOTSUPP;
4139 
4140 		strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
4141 		ifrr.ifr_ifru = ifr->ifr_ifru;
4142 
4143 		ops = real_dev->netdev_ops;
4144 		if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
4145 			res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
4146 			if (res)
4147 				return res;
4148 
4149 			ifr->ifr_ifru = ifrr.ifr_ifru;
4150 			if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4151 				return -EFAULT;
4152 
4153 			/* Set the BOND_PHC_INDEX flag to notify user space */
4154 			cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
4155 
4156 			return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
4157 				-EFAULT : 0;
4158 		}
4159 		fallthrough;
4160 	default:
4161 		res = -EOPNOTSUPP;
4162 	}
4163 
4164 	return res;
4165 }
4166 
4167 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4168 {
4169 	struct bonding *bond = netdev_priv(bond_dev);
4170 	struct net_device *slave_dev = NULL;
4171 	struct ifbond k_binfo;
4172 	struct ifbond __user *u_binfo = NULL;
4173 	struct ifslave k_sinfo;
4174 	struct ifslave __user *u_sinfo = NULL;
4175 	struct bond_opt_value newval;
4176 	struct net *net;
4177 	int res = 0;
4178 
4179 	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
4180 
4181 	switch (cmd) {
4182 	case SIOCBONDINFOQUERY:
4183 		u_binfo = (struct ifbond __user *)ifr->ifr_data;
4184 
4185 		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
4186 			return -EFAULT;
4187 
4188 		bond_info_query(bond_dev, &k_binfo);
4189 		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
4190 			return -EFAULT;
4191 
4192 		return 0;
4193 	case SIOCBONDSLAVEINFOQUERY:
4194 		u_sinfo = (struct ifslave __user *)ifr->ifr_data;
4195 
4196 		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
4197 			return -EFAULT;
4198 
4199 		res = bond_slave_info_query(bond_dev, &k_sinfo);
4200 		if (res == 0 &&
4201 		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
4202 			return -EFAULT;
4203 
4204 		return res;
4205 	default:
4206 		break;
4207 	}
4208 
4209 	net = dev_net(bond_dev);
4210 
4211 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4212 		return -EPERM;
4213 
4214 	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
4215 
4216 	slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
4217 
4218 	if (!slave_dev)
4219 		return -ENODEV;
4220 
4221 	switch (cmd) {
4222 	case SIOCBONDENSLAVE:
4223 		res = bond_enslave(bond_dev, slave_dev, NULL);
4224 		break;
4225 	case SIOCBONDRELEASE:
4226 		res = bond_release(bond_dev, slave_dev);
4227 		break;
4228 	case SIOCBONDSETHWADDR:
4229 		res = bond_set_dev_addr(bond_dev, slave_dev);
4230 		break;
4231 	case SIOCBONDCHANGEACTIVE:
4232 		bond_opt_initstr(&newval, slave_dev->name);
4233 		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
4234 					    &newval);
4235 		break;
4236 	default:
4237 		res = -EOPNOTSUPP;
4238 	}
4239 
4240 	return res;
4241 }
4242 
4243 static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
4244 			       void __user *data, int cmd)
4245 {
4246 	struct ifreq ifrdata = { .ifr_data = data };
4247 
4248 	switch (cmd) {
4249 	case BOND_INFO_QUERY_OLD:
4250 		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
4251 	case BOND_SLAVE_INFO_QUERY_OLD:
4252 		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
4253 	case BOND_ENSLAVE_OLD:
4254 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
4255 	case BOND_RELEASE_OLD:
4256 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
4257 	case BOND_SETHWADDR_OLD:
4258 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
4259 	case BOND_CHANGE_ACTIVE_OLD:
4260 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
4261 	}
4262 
4263 	return -EOPNOTSUPP;
4264 }
4265 
4266 static void bond_change_rx_flags(struct net_device *bond_dev, int change)
4267 {
4268 	struct bonding *bond = netdev_priv(bond_dev);
4269 
4270 	if (change & IFF_PROMISC)
4271 		bond_set_promiscuity(bond,
4272 				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
4273 
4274 	if (change & IFF_ALLMULTI)
4275 		bond_set_allmulti(bond,
4276 				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
4277 }
4278 
4279 static void bond_set_rx_mode(struct net_device *bond_dev)
4280 {
4281 	struct bonding *bond = netdev_priv(bond_dev);
4282 	struct list_head *iter;
4283 	struct slave *slave;
4284 
4285 	rcu_read_lock();
4286 	if (bond_uses_primary(bond)) {
4287 		slave = rcu_dereference(bond->curr_active_slave);
4288 		if (slave) {
4289 			dev_uc_sync(slave->dev, bond_dev);
4290 			dev_mc_sync(slave->dev, bond_dev);
4291 		}
4292 	} else {
4293 		bond_for_each_slave_rcu(bond, slave, iter) {
4294 			dev_uc_sync_multiple(slave->dev, bond_dev);
4295 			dev_mc_sync_multiple(slave->dev, bond_dev);
4296 		}
4297 	}
4298 	rcu_read_unlock();
4299 }
4300 
4301 static int bond_neigh_init(struct neighbour *n)
4302 {
4303 	struct bonding *bond = netdev_priv(n->dev);
4304 	const struct net_device_ops *slave_ops;
4305 	struct neigh_parms parms;
4306 	struct slave *slave;
4307 	int ret = 0;
4308 
4309 	rcu_read_lock();
4310 	slave = bond_first_slave_rcu(bond);
4311 	if (!slave)
4312 		goto out;
4313 	slave_ops = slave->dev->netdev_ops;
4314 	if (!slave_ops->ndo_neigh_setup)
4315 		goto out;
4316 
4317 	/* TODO: find another way [1] to implement this.
4318 	 * Passing a zeroed structure is fragile,
4319 	 * but at least we do not pass garbage.
4320 	 *
4321 	 * [1] One way would be that ndo_neigh_setup() never touch
4322 	 *     struct neigh_parms, but propagate the new neigh_setup()
4323 	 *     back to ___neigh_create() / neigh_parms_alloc()
4324 	 */
4325 	memset(&parms, 0, sizeof(parms));
4326 	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
4327 
4328 	if (ret)
4329 		goto out;
4330 
4331 	if (parms.neigh_setup)
4332 		ret = parms.neigh_setup(n);
4333 out:
4334 	rcu_read_unlock();
4335 	return ret;
4336 }
4337 
4338 /* The bonding ndo_neigh_setup is called at init time beofre any
4339  * slave exists. So we must declare proxy setup function which will
4340  * be used at run time to resolve the actual slave neigh param setup.
4341  *
4342  * It's also called by master devices (such as vlans) to setup their
4343  * underlying devices. In that case - do nothing, we're already set up from
4344  * our init.
4345  */
4346 static int bond_neigh_setup(struct net_device *dev,
4347 			    struct neigh_parms *parms)
4348 {
4349 	/* modify only our neigh_parms */
4350 	if (parms->dev == dev)
4351 		parms->neigh_setup = bond_neigh_init;
4352 
4353 	return 0;
4354 }
4355 
4356 /* Change the MTU of all of a master's slaves to match the master */
4357 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4358 {
4359 	struct bonding *bond = netdev_priv(bond_dev);
4360 	struct slave *slave, *rollback_slave;
4361 	struct list_head *iter;
4362 	int res = 0;
4363 
4364 	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4365 
4366 	bond_for_each_slave(bond, slave, iter) {
4367 		slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4368 			   slave, slave->dev->netdev_ops->ndo_change_mtu);
4369 
4370 		res = dev_set_mtu(slave->dev, new_mtu);
4371 
4372 		if (res) {
4373 			/* If we failed to set the slave's mtu to the new value
4374 			 * we must abort the operation even in ACTIVE_BACKUP
4375 			 * mode, because if we allow the backup slaves to have
4376 			 * different mtu values than the active slave we'll
4377 			 * need to change their mtu when doing a failover. That
4378 			 * means changing their mtu from timer context, which
4379 			 * is probably not a good idea.
4380 			 */
4381 			slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4382 				  res, new_mtu);
4383 			goto unwind;
4384 		}
4385 	}
4386 
4387 	bond_dev->mtu = new_mtu;
4388 
4389 	return 0;
4390 
4391 unwind:
4392 	/* unwind from head to the slave that failed */
4393 	bond_for_each_slave(bond, rollback_slave, iter) {
4394 		int tmp_res;
4395 
4396 		if (rollback_slave == slave)
4397 			break;
4398 
4399 		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4400 		if (tmp_res)
4401 			slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4402 				  tmp_res);
4403 	}
4404 
4405 	return res;
4406 }
4407 
4408 /* Change HW address
4409  *
4410  * Note that many devices must be down to change the HW address, and
4411  * downing the master releases all slaves.  We can make bonds full of
4412  * bonding devices to test this, however.
4413  */
4414 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4415 {
4416 	struct bonding *bond = netdev_priv(bond_dev);
4417 	struct slave *slave, *rollback_slave;
4418 	struct sockaddr_storage *ss = addr, tmp_ss;
4419 	struct list_head *iter;
4420 	int res = 0;
4421 
4422 	if (BOND_MODE(bond) == BOND_MODE_ALB)
4423 		return bond_alb_set_mac_address(bond_dev, addr);
4424 
4425 
4426 	netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4427 
4428 	/* If fail_over_mac is enabled, do nothing and return success.
4429 	 * Returning an error causes ifenslave to fail.
4430 	 */
4431 	if (bond->params.fail_over_mac &&
4432 	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4433 		return 0;
4434 
4435 	if (!is_valid_ether_addr(ss->__data))
4436 		return -EADDRNOTAVAIL;
4437 
4438 	bond_for_each_slave(bond, slave, iter) {
4439 		slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4440 			  __func__, slave);
4441 		res = dev_set_mac_address(slave->dev, addr, NULL);
4442 		if (res) {
4443 			/* TODO: consider downing the slave
4444 			 * and retry ?
4445 			 * User should expect communications
4446 			 * breakage anyway until ARP finish
4447 			 * updating, so...
4448 			 */
4449 			slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4450 				  __func__, res);
4451 			goto unwind;
4452 		}
4453 	}
4454 
4455 	/* success */
4456 	dev_addr_set(bond_dev, ss->__data);
4457 	return 0;
4458 
4459 unwind:
4460 	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4461 	tmp_ss.ss_family = bond_dev->type;
4462 
4463 	/* unwind from head to the slave that failed */
4464 	bond_for_each_slave(bond, rollback_slave, iter) {
4465 		int tmp_res;
4466 
4467 		if (rollback_slave == slave)
4468 			break;
4469 
4470 		tmp_res = dev_set_mac_address(rollback_slave->dev,
4471 					      (struct sockaddr *)&tmp_ss, NULL);
4472 		if (tmp_res) {
4473 			slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4474 				   __func__, tmp_res);
4475 		}
4476 	}
4477 
4478 	return res;
4479 }
4480 
4481 /**
4482  * bond_get_slave_by_id - get xmit slave with slave_id
4483  * @bond: bonding device that is transmitting
4484  * @slave_id: slave id up to slave_cnt-1 through which to transmit
4485  *
4486  * This function tries to get slave with slave_id but in case
4487  * it fails, it tries to find the first available slave for transmission.
4488  */
4489 static struct slave *bond_get_slave_by_id(struct bonding *bond,
4490 					  int slave_id)
4491 {
4492 	struct list_head *iter;
4493 	struct slave *slave;
4494 	int i = slave_id;
4495 
4496 	/* Here we start from the slave with slave_id */
4497 	bond_for_each_slave_rcu(bond, slave, iter) {
4498 		if (--i < 0) {
4499 			if (bond_slave_can_tx(slave))
4500 				return slave;
4501 		}
4502 	}
4503 
4504 	/* Here we start from the first slave up to slave_id */
4505 	i = slave_id;
4506 	bond_for_each_slave_rcu(bond, slave, iter) {
4507 		if (--i < 0)
4508 			break;
4509 		if (bond_slave_can_tx(slave))
4510 			return slave;
4511 	}
4512 	/* no slave that can tx has been found */
4513 	return NULL;
4514 }
4515 
4516 /**
4517  * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4518  * @bond: bonding device to use
4519  *
4520  * Based on the value of the bonding device's packets_per_slave parameter
4521  * this function generates a slave id, which is usually used as the next
4522  * slave to transmit through.
4523  */
4524 static u32 bond_rr_gen_slave_id(struct bonding *bond)
4525 {
4526 	u32 slave_id;
4527 	struct reciprocal_value reciprocal_packets_per_slave;
4528 	int packets_per_slave = bond->params.packets_per_slave;
4529 
4530 	switch (packets_per_slave) {
4531 	case 0:
4532 		slave_id = prandom_u32();
4533 		break;
4534 	case 1:
4535 		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4536 		break;
4537 	default:
4538 		reciprocal_packets_per_slave =
4539 			bond->params.reciprocal_packets_per_slave;
4540 		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4541 		slave_id = reciprocal_divide(slave_id,
4542 					     reciprocal_packets_per_slave);
4543 		break;
4544 	}
4545 
4546 	return slave_id;
4547 }
4548 
4549 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4550 						    struct sk_buff *skb)
4551 {
4552 	struct slave *slave;
4553 	int slave_cnt;
4554 	u32 slave_id;
4555 
4556 	/* Start with the curr_active_slave that joined the bond as the
4557 	 * default for sending IGMP traffic.  For failover purposes one
4558 	 * needs to maintain some consistency for the interface that will
4559 	 * send the join/membership reports.  The curr_active_slave found
4560 	 * will send all of this type of traffic.
4561 	 */
4562 	if (skb->protocol == htons(ETH_P_IP)) {
4563 		int noff = skb_network_offset(skb);
4564 		struct iphdr *iph;
4565 
4566 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4567 			goto non_igmp;
4568 
4569 		iph = ip_hdr(skb);
4570 		if (iph->protocol == IPPROTO_IGMP) {
4571 			slave = rcu_dereference(bond->curr_active_slave);
4572 			if (slave)
4573 				return slave;
4574 			return bond_get_slave_by_id(bond, 0);
4575 		}
4576 	}
4577 
4578 non_igmp:
4579 	slave_cnt = READ_ONCE(bond->slave_cnt);
4580 	if (likely(slave_cnt)) {
4581 		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4582 		return bond_get_slave_by_id(bond, slave_id);
4583 	}
4584 	return NULL;
4585 }
4586 
4587 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
4588 							struct xdp_buff *xdp)
4589 {
4590 	struct slave *slave;
4591 	int slave_cnt;
4592 	u32 slave_id;
4593 	const struct ethhdr *eth;
4594 	void *data = xdp->data;
4595 
4596 	if (data + sizeof(struct ethhdr) > xdp->data_end)
4597 		goto non_igmp;
4598 
4599 	eth = (struct ethhdr *)data;
4600 	data += sizeof(struct ethhdr);
4601 
4602 	/* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
4603 	if (eth->h_proto == htons(ETH_P_IP)) {
4604 		const struct iphdr *iph;
4605 
4606 		if (data + sizeof(struct iphdr) > xdp->data_end)
4607 			goto non_igmp;
4608 
4609 		iph = (struct iphdr *)data;
4610 
4611 		if (iph->protocol == IPPROTO_IGMP) {
4612 			slave = rcu_dereference(bond->curr_active_slave);
4613 			if (slave)
4614 				return slave;
4615 			return bond_get_slave_by_id(bond, 0);
4616 		}
4617 	}
4618 
4619 non_igmp:
4620 	slave_cnt = READ_ONCE(bond->slave_cnt);
4621 	if (likely(slave_cnt)) {
4622 		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4623 		return bond_get_slave_by_id(bond, slave_id);
4624 	}
4625 	return NULL;
4626 }
4627 
4628 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4629 					struct net_device *bond_dev)
4630 {
4631 	struct bonding *bond = netdev_priv(bond_dev);
4632 	struct slave *slave;
4633 
4634 	slave = bond_xmit_roundrobin_slave_get(bond, skb);
4635 	if (likely(slave))
4636 		return bond_dev_queue_xmit(bond, skb, slave->dev);
4637 
4638 	return bond_tx_drop(bond_dev, skb);
4639 }
4640 
4641 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
4642 {
4643 	return rcu_dereference(bond->curr_active_slave);
4644 }
4645 
4646 /* In active-backup mode, we know that bond->curr_active_slave is always valid if
4647  * the bond has a usable interface.
4648  */
4649 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
4650 					  struct net_device *bond_dev)
4651 {
4652 	struct bonding *bond = netdev_priv(bond_dev);
4653 	struct slave *slave;
4654 
4655 	slave = bond_xmit_activebackup_slave_get(bond);
4656 	if (slave)
4657 		return bond_dev_queue_xmit(bond, skb, slave->dev);
4658 
4659 	return bond_tx_drop(bond_dev, skb);
4660 }
4661 
4662 /* Use this to update slave_array when (a) it's not appropriate to update
4663  * slave_array right away (note that update_slave_array() may sleep)
4664  * and / or (b) RTNL is not held.
4665  */
4666 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
4667 {
4668 	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
4669 }
4670 
4671 /* Slave array work handler. Holds only RTNL */
4672 static void bond_slave_arr_handler(struct work_struct *work)
4673 {
4674 	struct bonding *bond = container_of(work, struct bonding,
4675 					    slave_arr_work.work);
4676 	int ret;
4677 
4678 	if (!rtnl_trylock())
4679 		goto err;
4680 
4681 	ret = bond_update_slave_arr(bond, NULL);
4682 	rtnl_unlock();
4683 	if (ret) {
4684 		pr_warn_ratelimited("Failed to update slave array from WT\n");
4685 		goto err;
4686 	}
4687 	return;
4688 
4689 err:
4690 	bond_slave_arr_work_rearm(bond, 1);
4691 }
4692 
4693 static void bond_skip_slave(struct bond_up_slave *slaves,
4694 			    struct slave *skipslave)
4695 {
4696 	int idx;
4697 
4698 	/* Rare situation where caller has asked to skip a specific
4699 	 * slave but allocation failed (most likely!). BTW this is
4700 	 * only possible when the call is initiated from
4701 	 * __bond_release_one(). In this situation; overwrite the
4702 	 * skipslave entry in the array with the last entry from the
4703 	 * array to avoid a situation where the xmit path may choose
4704 	 * this to-be-skipped slave to send a packet out.
4705 	 */
4706 	for (idx = 0; slaves && idx < slaves->count; idx++) {
4707 		if (skipslave == slaves->arr[idx]) {
4708 			slaves->arr[idx] =
4709 				slaves->arr[slaves->count - 1];
4710 			slaves->count--;
4711 			break;
4712 		}
4713 	}
4714 }
4715 
4716 static void bond_set_slave_arr(struct bonding *bond,
4717 			       struct bond_up_slave *usable_slaves,
4718 			       struct bond_up_slave *all_slaves)
4719 {
4720 	struct bond_up_slave *usable, *all;
4721 
4722 	usable = rtnl_dereference(bond->usable_slaves);
4723 	rcu_assign_pointer(bond->usable_slaves, usable_slaves);
4724 	kfree_rcu(usable, rcu);
4725 
4726 	all = rtnl_dereference(bond->all_slaves);
4727 	rcu_assign_pointer(bond->all_slaves, all_slaves);
4728 	kfree_rcu(all, rcu);
4729 }
4730 
4731 static void bond_reset_slave_arr(struct bonding *bond)
4732 {
4733 	struct bond_up_slave *usable, *all;
4734 
4735 	usable = rtnl_dereference(bond->usable_slaves);
4736 	if (usable) {
4737 		RCU_INIT_POINTER(bond->usable_slaves, NULL);
4738 		kfree_rcu(usable, rcu);
4739 	}
4740 
4741 	all = rtnl_dereference(bond->all_slaves);
4742 	if (all) {
4743 		RCU_INIT_POINTER(bond->all_slaves, NULL);
4744 		kfree_rcu(all, rcu);
4745 	}
4746 }
4747 
4748 /* Build the usable slaves array in control path for modes that use xmit-hash
4749  * to determine the slave interface -
4750  * (a) BOND_MODE_8023AD
4751  * (b) BOND_MODE_XOR
4752  * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
4753  *
4754  * The caller is expected to hold RTNL only and NO other lock!
4755  */
4756 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
4757 {
4758 	struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
4759 	struct slave *slave;
4760 	struct list_head *iter;
4761 	int agg_id = 0;
4762 	int ret = 0;
4763 
4764 	might_sleep();
4765 
4766 	usable_slaves = kzalloc(struct_size(usable_slaves, arr,
4767 					    bond->slave_cnt), GFP_KERNEL);
4768 	all_slaves = kzalloc(struct_size(all_slaves, arr,
4769 					 bond->slave_cnt), GFP_KERNEL);
4770 	if (!usable_slaves || !all_slaves) {
4771 		ret = -ENOMEM;
4772 		goto out;
4773 	}
4774 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4775 		struct ad_info ad_info;
4776 
4777 		spin_lock_bh(&bond->mode_lock);
4778 		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
4779 			spin_unlock_bh(&bond->mode_lock);
4780 			pr_debug("bond_3ad_get_active_agg_info failed\n");
4781 			/* No active aggragator means it's not safe to use
4782 			 * the previous array.
4783 			 */
4784 			bond_reset_slave_arr(bond);
4785 			goto out;
4786 		}
4787 		spin_unlock_bh(&bond->mode_lock);
4788 		agg_id = ad_info.aggregator_id;
4789 	}
4790 	bond_for_each_slave(bond, slave, iter) {
4791 		if (skipslave == slave)
4792 			continue;
4793 
4794 		all_slaves->arr[all_slaves->count++] = slave;
4795 		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4796 			struct aggregator *agg;
4797 
4798 			agg = SLAVE_AD_INFO(slave)->port.aggregator;
4799 			if (!agg || agg->aggregator_identifier != agg_id)
4800 				continue;
4801 		}
4802 		if (!bond_slave_can_tx(slave))
4803 			continue;
4804 
4805 		slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
4806 			  usable_slaves->count);
4807 
4808 		usable_slaves->arr[usable_slaves->count++] = slave;
4809 	}
4810 
4811 	bond_set_slave_arr(bond, usable_slaves, all_slaves);
4812 	return ret;
4813 out:
4814 	if (ret != 0 && skipslave) {
4815 		bond_skip_slave(rtnl_dereference(bond->all_slaves),
4816 				skipslave);
4817 		bond_skip_slave(rtnl_dereference(bond->usable_slaves),
4818 				skipslave);
4819 	}
4820 	kfree_rcu(all_slaves, rcu);
4821 	kfree_rcu(usable_slaves, rcu);
4822 
4823 	return ret;
4824 }
4825 
4826 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
4827 						 struct sk_buff *skb,
4828 						 struct bond_up_slave *slaves)
4829 {
4830 	struct slave *slave;
4831 	unsigned int count;
4832 	u32 hash;
4833 
4834 	hash = bond_xmit_hash(bond, skb);
4835 	count = slaves ? READ_ONCE(slaves->count) : 0;
4836 	if (unlikely(!count))
4837 		return NULL;
4838 
4839 	slave = slaves->arr[hash % count];
4840 	return slave;
4841 }
4842 
4843 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
4844 						     struct xdp_buff *xdp)
4845 {
4846 	struct bond_up_slave *slaves;
4847 	unsigned int count;
4848 	u32 hash;
4849 
4850 	hash = bond_xmit_hash_xdp(bond, xdp);
4851 	slaves = rcu_dereference(bond->usable_slaves);
4852 	count = slaves ? READ_ONCE(slaves->count) : 0;
4853 	if (unlikely(!count))
4854 		return NULL;
4855 
4856 	return slaves->arr[hash % count];
4857 }
4858 
4859 /* Use this Xmit function for 3AD as well as XOR modes. The current
4860  * usable slave array is formed in the control path. The xmit function
4861  * just calculates hash and sends the packet out.
4862  */
4863 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
4864 				     struct net_device *dev)
4865 {
4866 	struct bonding *bond = netdev_priv(dev);
4867 	struct bond_up_slave *slaves;
4868 	struct slave *slave;
4869 
4870 	slaves = rcu_dereference(bond->usable_slaves);
4871 	slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
4872 	if (likely(slave))
4873 		return bond_dev_queue_xmit(bond, skb, slave->dev);
4874 
4875 	return bond_tx_drop(dev, skb);
4876 }
4877 
4878 /* in broadcast mode, we send everything to all usable interfaces. */
4879 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
4880 				       struct net_device *bond_dev)
4881 {
4882 	struct bonding *bond = netdev_priv(bond_dev);
4883 	struct slave *slave = NULL;
4884 	struct list_head *iter;
4885 	bool xmit_suc = false;
4886 	bool skb_used = false;
4887 
4888 	bond_for_each_slave_rcu(bond, slave, iter) {
4889 		struct sk_buff *skb2;
4890 
4891 		if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
4892 			continue;
4893 
4894 		if (bond_is_last_slave(bond, slave)) {
4895 			skb2 = skb;
4896 			skb_used = true;
4897 		} else {
4898 			skb2 = skb_clone(skb, GFP_ATOMIC);
4899 			if (!skb2) {
4900 				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
4901 						    bond_dev->name, __func__);
4902 				continue;
4903 			}
4904 		}
4905 
4906 		if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
4907 			xmit_suc = true;
4908 	}
4909 
4910 	if (!skb_used)
4911 		dev_kfree_skb_any(skb);
4912 
4913 	if (xmit_suc)
4914 		return NETDEV_TX_OK;
4915 
4916 	atomic_long_inc(&bond_dev->tx_dropped);
4917 	return NET_XMIT_DROP;
4918 }
4919 
4920 /*------------------------- Device initialization ---------------------------*/
4921 
4922 /* Lookup the slave that corresponds to a qid */
4923 static inline int bond_slave_override(struct bonding *bond,
4924 				      struct sk_buff *skb)
4925 {
4926 	struct slave *slave = NULL;
4927 	struct list_head *iter;
4928 
4929 	if (!skb_rx_queue_recorded(skb))
4930 		return 1;
4931 
4932 	/* Find out if any slaves have the same mapping as this skb. */
4933 	bond_for_each_slave_rcu(bond, slave, iter) {
4934 		if (slave->queue_id == skb_get_queue_mapping(skb)) {
4935 			if (bond_slave_is_up(slave) &&
4936 			    slave->link == BOND_LINK_UP) {
4937 				bond_dev_queue_xmit(bond, skb, slave->dev);
4938 				return 0;
4939 			}
4940 			/* If the slave isn't UP, use default transmit policy. */
4941 			break;
4942 		}
4943 	}
4944 
4945 	return 1;
4946 }
4947 
4948 
4949 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4950 			     struct net_device *sb_dev)
4951 {
4952 	/* This helper function exists to help dev_pick_tx get the correct
4953 	 * destination queue.  Using a helper function skips a call to
4954 	 * skb_tx_hash and will put the skbs in the queue we expect on their
4955 	 * way down to the bonding driver.
4956 	 */
4957 	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4958 
4959 	/* Save the original txq to restore before passing to the driver */
4960 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4961 
4962 	if (unlikely(txq >= dev->real_num_tx_queues)) {
4963 		do {
4964 			txq -= dev->real_num_tx_queues;
4965 		} while (txq >= dev->real_num_tx_queues);
4966 	}
4967 	return txq;
4968 }
4969 
4970 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
4971 					      struct sk_buff *skb,
4972 					      bool all_slaves)
4973 {
4974 	struct bonding *bond = netdev_priv(master_dev);
4975 	struct bond_up_slave *slaves;
4976 	struct slave *slave = NULL;
4977 
4978 	switch (BOND_MODE(bond)) {
4979 	case BOND_MODE_ROUNDROBIN:
4980 		slave = bond_xmit_roundrobin_slave_get(bond, skb);
4981 		break;
4982 	case BOND_MODE_ACTIVEBACKUP:
4983 		slave = bond_xmit_activebackup_slave_get(bond);
4984 		break;
4985 	case BOND_MODE_8023AD:
4986 	case BOND_MODE_XOR:
4987 		if (all_slaves)
4988 			slaves = rcu_dereference(bond->all_slaves);
4989 		else
4990 			slaves = rcu_dereference(bond->usable_slaves);
4991 		slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
4992 		break;
4993 	case BOND_MODE_BROADCAST:
4994 		break;
4995 	case BOND_MODE_ALB:
4996 		slave = bond_xmit_alb_slave_get(bond, skb);
4997 		break;
4998 	case BOND_MODE_TLB:
4999 		slave = bond_xmit_tlb_slave_get(bond, skb);
5000 		break;
5001 	default:
5002 		/* Should never happen, mode already checked */
5003 		WARN_ONCE(true, "Unknown bonding mode");
5004 		break;
5005 	}
5006 
5007 	if (slave)
5008 		return slave->dev;
5009 	return NULL;
5010 }
5011 
5012 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
5013 {
5014 	switch (sk->sk_family) {
5015 #if IS_ENABLED(CONFIG_IPV6)
5016 	case AF_INET6:
5017 		if (sk->sk_ipv6only ||
5018 		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
5019 			flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
5020 			flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
5021 			flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
5022 			break;
5023 		}
5024 		fallthrough;
5025 #endif
5026 	default: /* AF_INET */
5027 		flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
5028 		flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
5029 		flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
5030 		break;
5031 	}
5032 
5033 	flow->ports.src = inet_sk(sk)->inet_sport;
5034 	flow->ports.dst = inet_sk(sk)->inet_dport;
5035 }
5036 
5037 /**
5038  * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
5039  * @sk: socket to use for headers
5040  *
5041  * This function will extract the necessary field from the socket and use
5042  * them to generate a hash based on the LAYER34 xmit_policy.
5043  * Assumes that sk is a TCP or UDP socket.
5044  */
5045 static u32 bond_sk_hash_l34(struct sock *sk)
5046 {
5047 	struct flow_keys flow;
5048 	u32 hash;
5049 
5050 	bond_sk_to_flow(sk, &flow);
5051 
5052 	/* L4 */
5053 	memcpy(&hash, &flow.ports.ports, sizeof(hash));
5054 	/* L3 */
5055 	return bond_ip_hash(hash, &flow);
5056 }
5057 
5058 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
5059 						  struct sock *sk)
5060 {
5061 	struct bond_up_slave *slaves;
5062 	struct slave *slave;
5063 	unsigned int count;
5064 	u32 hash;
5065 
5066 	slaves = rcu_dereference(bond->usable_slaves);
5067 	count = slaves ? READ_ONCE(slaves->count) : 0;
5068 	if (unlikely(!count))
5069 		return NULL;
5070 
5071 	hash = bond_sk_hash_l34(sk);
5072 	slave = slaves->arr[hash % count];
5073 
5074 	return slave->dev;
5075 }
5076 
5077 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
5078 						struct sock *sk)
5079 {
5080 	struct bonding *bond = netdev_priv(dev);
5081 	struct net_device *lower = NULL;
5082 
5083 	rcu_read_lock();
5084 	if (bond_sk_check(bond))
5085 		lower = __bond_sk_get_lower_dev(bond, sk);
5086 	rcu_read_unlock();
5087 
5088 	return lower;
5089 }
5090 
5091 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5092 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
5093 					struct net_device *dev)
5094 {
5095 	if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
5096 		return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
5097 	return bond_tx_drop(dev, skb);
5098 }
5099 #endif
5100 
5101 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5102 {
5103 	struct bonding *bond = netdev_priv(dev);
5104 
5105 	if (bond_should_override_tx_queue(bond) &&
5106 	    !bond_slave_override(bond, skb))
5107 		return NETDEV_TX_OK;
5108 
5109 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5110 	if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
5111 		return bond_tls_device_xmit(bond, skb, dev);
5112 #endif
5113 
5114 	switch (BOND_MODE(bond)) {
5115 	case BOND_MODE_ROUNDROBIN:
5116 		return bond_xmit_roundrobin(skb, dev);
5117 	case BOND_MODE_ACTIVEBACKUP:
5118 		return bond_xmit_activebackup(skb, dev);
5119 	case BOND_MODE_8023AD:
5120 	case BOND_MODE_XOR:
5121 		return bond_3ad_xor_xmit(skb, dev);
5122 	case BOND_MODE_BROADCAST:
5123 		return bond_xmit_broadcast(skb, dev);
5124 	case BOND_MODE_ALB:
5125 		return bond_alb_xmit(skb, dev);
5126 	case BOND_MODE_TLB:
5127 		return bond_tlb_xmit(skb, dev);
5128 	default:
5129 		/* Should never happen, mode already checked */
5130 		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
5131 		WARN_ON_ONCE(1);
5132 		return bond_tx_drop(dev, skb);
5133 	}
5134 }
5135 
5136 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5137 {
5138 	struct bonding *bond = netdev_priv(dev);
5139 	netdev_tx_t ret = NETDEV_TX_OK;
5140 
5141 	/* If we risk deadlock from transmitting this in the
5142 	 * netpoll path, tell netpoll to queue the frame for later tx
5143 	 */
5144 	if (unlikely(is_netpoll_tx_blocked(dev)))
5145 		return NETDEV_TX_BUSY;
5146 
5147 	rcu_read_lock();
5148 	if (bond_has_slaves(bond))
5149 		ret = __bond_start_xmit(skb, dev);
5150 	else
5151 		ret = bond_tx_drop(dev, skb);
5152 	rcu_read_unlock();
5153 
5154 	return ret;
5155 }
5156 
5157 static struct net_device *
5158 bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
5159 {
5160 	struct bonding *bond = netdev_priv(bond_dev);
5161 	struct slave *slave;
5162 
5163 	/* Caller needs to hold rcu_read_lock() */
5164 
5165 	switch (BOND_MODE(bond)) {
5166 	case BOND_MODE_ROUNDROBIN:
5167 		slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
5168 		break;
5169 
5170 	case BOND_MODE_ACTIVEBACKUP:
5171 		slave = bond_xmit_activebackup_slave_get(bond);
5172 		break;
5173 
5174 	case BOND_MODE_8023AD:
5175 	case BOND_MODE_XOR:
5176 		slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
5177 		break;
5178 
5179 	default:
5180 		/* Should never happen. Mode guarded by bond_xdp_check() */
5181 		netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
5182 		WARN_ON_ONCE(1);
5183 		return NULL;
5184 	}
5185 
5186 	if (slave)
5187 		return slave->dev;
5188 
5189 	return NULL;
5190 }
5191 
5192 static int bond_xdp_xmit(struct net_device *bond_dev,
5193 			 int n, struct xdp_frame **frames, u32 flags)
5194 {
5195 	int nxmit, err = -ENXIO;
5196 
5197 	rcu_read_lock();
5198 
5199 	for (nxmit = 0; nxmit < n; nxmit++) {
5200 		struct xdp_frame *frame = frames[nxmit];
5201 		struct xdp_frame *frames1[] = {frame};
5202 		struct net_device *slave_dev;
5203 		struct xdp_buff xdp;
5204 
5205 		xdp_convert_frame_to_buff(frame, &xdp);
5206 
5207 		slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
5208 		if (!slave_dev) {
5209 			err = -ENXIO;
5210 			break;
5211 		}
5212 
5213 		err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
5214 		if (err < 1)
5215 			break;
5216 	}
5217 
5218 	rcu_read_unlock();
5219 
5220 	/* If error happened on the first frame then we can pass the error up, otherwise
5221 	 * report the number of frames that were xmitted.
5222 	 */
5223 	if (err < 0)
5224 		return (nxmit == 0 ? err : nxmit);
5225 
5226 	return nxmit;
5227 }
5228 
5229 static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5230 			struct netlink_ext_ack *extack)
5231 {
5232 	struct bonding *bond = netdev_priv(dev);
5233 	struct list_head *iter;
5234 	struct slave *slave, *rollback_slave;
5235 	struct bpf_prog *old_prog;
5236 	struct netdev_bpf xdp = {
5237 		.command = XDP_SETUP_PROG,
5238 		.flags   = 0,
5239 		.prog    = prog,
5240 		.extack  = extack,
5241 	};
5242 	int err;
5243 
5244 	ASSERT_RTNL();
5245 
5246 	if (!bond_xdp_check(bond))
5247 		return -EOPNOTSUPP;
5248 
5249 	old_prog = bond->xdp_prog;
5250 	bond->xdp_prog = prog;
5251 
5252 	bond_for_each_slave(bond, slave, iter) {
5253 		struct net_device *slave_dev = slave->dev;
5254 
5255 		if (!slave_dev->netdev_ops->ndo_bpf ||
5256 		    !slave_dev->netdev_ops->ndo_xdp_xmit) {
5257 			SLAVE_NL_ERR(dev, slave_dev, extack,
5258 				     "Slave device does not support XDP");
5259 			err = -EOPNOTSUPP;
5260 			goto err;
5261 		}
5262 
5263 		if (dev_xdp_prog_count(slave_dev) > 0) {
5264 			SLAVE_NL_ERR(dev, slave_dev, extack,
5265 				     "Slave has XDP program loaded, please unload before enslaving");
5266 			err = -EOPNOTSUPP;
5267 			goto err;
5268 		}
5269 
5270 		err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5271 		if (err < 0) {
5272 			/* ndo_bpf() sets extack error message */
5273 			slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
5274 			goto err;
5275 		}
5276 		if (prog)
5277 			bpf_prog_inc(prog);
5278 	}
5279 
5280 	if (prog) {
5281 		static_branch_inc(&bpf_master_redirect_enabled_key);
5282 	} else if (old_prog) {
5283 		bpf_prog_put(old_prog);
5284 		static_branch_dec(&bpf_master_redirect_enabled_key);
5285 	}
5286 
5287 	return 0;
5288 
5289 err:
5290 	/* unwind the program changes */
5291 	bond->xdp_prog = old_prog;
5292 	xdp.prog = old_prog;
5293 	xdp.extack = NULL; /* do not overwrite original error */
5294 
5295 	bond_for_each_slave(bond, rollback_slave, iter) {
5296 		struct net_device *slave_dev = rollback_slave->dev;
5297 		int err_unwind;
5298 
5299 		if (slave == rollback_slave)
5300 			break;
5301 
5302 		err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5303 		if (err_unwind < 0)
5304 			slave_err(dev, slave_dev,
5305 				  "Error %d when unwinding XDP program change\n", err_unwind);
5306 		else if (xdp.prog)
5307 			bpf_prog_inc(xdp.prog);
5308 	}
5309 	return err;
5310 }
5311 
5312 static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5313 {
5314 	switch (xdp->command) {
5315 	case XDP_SETUP_PROG:
5316 		return bond_xdp_set(dev, xdp->prog, xdp->extack);
5317 	default:
5318 		return -EINVAL;
5319 	}
5320 }
5321 
5322 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
5323 {
5324 	if (speed == 0 || speed == SPEED_UNKNOWN)
5325 		speed = slave->speed;
5326 	else
5327 		speed = min(speed, slave->speed);
5328 
5329 	return speed;
5330 }
5331 
5332 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
5333 					   struct ethtool_link_ksettings *cmd)
5334 {
5335 	struct bonding *bond = netdev_priv(bond_dev);
5336 	struct list_head *iter;
5337 	struct slave *slave;
5338 	u32 speed = 0;
5339 
5340 	cmd->base.duplex = DUPLEX_UNKNOWN;
5341 	cmd->base.port = PORT_OTHER;
5342 
5343 	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
5344 	 * do not need to check mode.  Though link speed might not represent
5345 	 * the true receive or transmit bandwidth (not all modes are symmetric)
5346 	 * this is an accurate maximum.
5347 	 */
5348 	bond_for_each_slave(bond, slave, iter) {
5349 		if (bond_slave_can_tx(slave)) {
5350 			if (slave->speed != SPEED_UNKNOWN) {
5351 				if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
5352 					speed = bond_mode_bcast_speed(slave,
5353 								      speed);
5354 				else
5355 					speed += slave->speed;
5356 			}
5357 			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
5358 			    slave->duplex != DUPLEX_UNKNOWN)
5359 				cmd->base.duplex = slave->duplex;
5360 		}
5361 	}
5362 	cmd->base.speed = speed ? : SPEED_UNKNOWN;
5363 
5364 	return 0;
5365 }
5366 
5367 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
5368 				     struct ethtool_drvinfo *drvinfo)
5369 {
5370 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
5371 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
5372 		 BOND_ABI_VERSION);
5373 }
5374 
5375 static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
5376 				    struct ethtool_ts_info *info)
5377 {
5378 	struct bonding *bond = netdev_priv(bond_dev);
5379 	const struct ethtool_ops *ops;
5380 	struct net_device *real_dev;
5381 	struct phy_device *phydev;
5382 
5383 	real_dev = bond_option_active_slave_get_rcu(bond);
5384 	if (real_dev) {
5385 		ops = real_dev->ethtool_ops;
5386 		phydev = real_dev->phydev;
5387 
5388 		if (phy_has_tsinfo(phydev)) {
5389 			return phy_ts_info(phydev, info);
5390 		} else if (ops->get_ts_info) {
5391 			return ops->get_ts_info(real_dev, info);
5392 		}
5393 	}
5394 
5395 	info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
5396 				SOF_TIMESTAMPING_SOFTWARE;
5397 	info->phc_index = -1;
5398 
5399 	return 0;
5400 }
5401 
5402 static const struct ethtool_ops bond_ethtool_ops = {
5403 	.get_drvinfo		= bond_ethtool_get_drvinfo,
5404 	.get_link		= ethtool_op_get_link,
5405 	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
5406 	.get_ts_info		= bond_ethtool_get_ts_info,
5407 };
5408 
5409 static const struct net_device_ops bond_netdev_ops = {
5410 	.ndo_init		= bond_init,
5411 	.ndo_uninit		= bond_uninit,
5412 	.ndo_open		= bond_open,
5413 	.ndo_stop		= bond_close,
5414 	.ndo_start_xmit		= bond_start_xmit,
5415 	.ndo_select_queue	= bond_select_queue,
5416 	.ndo_get_stats64	= bond_get_stats,
5417 	.ndo_eth_ioctl		= bond_eth_ioctl,
5418 	.ndo_siocbond		= bond_do_ioctl,
5419 	.ndo_siocdevprivate	= bond_siocdevprivate,
5420 	.ndo_change_rx_flags	= bond_change_rx_flags,
5421 	.ndo_set_rx_mode	= bond_set_rx_mode,
5422 	.ndo_change_mtu		= bond_change_mtu,
5423 	.ndo_set_mac_address	= bond_set_mac_address,
5424 	.ndo_neigh_setup	= bond_neigh_setup,
5425 	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
5426 	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
5427 #ifdef CONFIG_NET_POLL_CONTROLLER
5428 	.ndo_netpoll_setup	= bond_netpoll_setup,
5429 	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
5430 	.ndo_poll_controller	= bond_poll_controller,
5431 #endif
5432 	.ndo_add_slave		= bond_enslave,
5433 	.ndo_del_slave		= bond_release,
5434 	.ndo_fix_features	= bond_fix_features,
5435 	.ndo_features_check	= passthru_features_check,
5436 	.ndo_get_xmit_slave	= bond_xmit_get_slave,
5437 	.ndo_sk_get_lower_dev	= bond_sk_get_lower_dev,
5438 	.ndo_bpf		= bond_xdp,
5439 	.ndo_xdp_xmit           = bond_xdp_xmit,
5440 	.ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
5441 };
5442 
5443 static const struct device_type bond_type = {
5444 	.name = "bond",
5445 };
5446 
5447 static void bond_destructor(struct net_device *bond_dev)
5448 {
5449 	struct bonding *bond = netdev_priv(bond_dev);
5450 
5451 	if (bond->wq)
5452 		destroy_workqueue(bond->wq);
5453 
5454 	if (bond->rr_tx_counter)
5455 		free_percpu(bond->rr_tx_counter);
5456 }
5457 
5458 void bond_setup(struct net_device *bond_dev)
5459 {
5460 	struct bonding *bond = netdev_priv(bond_dev);
5461 
5462 	spin_lock_init(&bond->mode_lock);
5463 	bond->params = bonding_defaults;
5464 
5465 	/* Initialize pointers */
5466 	bond->dev = bond_dev;
5467 
5468 	/* Initialize the device entry points */
5469 	ether_setup(bond_dev);
5470 	bond_dev->max_mtu = ETH_MAX_MTU;
5471 	bond_dev->netdev_ops = &bond_netdev_ops;
5472 	bond_dev->ethtool_ops = &bond_ethtool_ops;
5473 
5474 	bond_dev->needs_free_netdev = true;
5475 	bond_dev->priv_destructor = bond_destructor;
5476 
5477 	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
5478 
5479 	/* Initialize the device options */
5480 	bond_dev->flags |= IFF_MASTER;
5481 	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5482 	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5483 
5484 #ifdef CONFIG_XFRM_OFFLOAD
5485 	/* set up xfrm device ops (only supported in active-backup right now) */
5486 	bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5487 	INIT_LIST_HEAD(&bond->ipsec_list);
5488 	spin_lock_init(&bond->ipsec_lock);
5489 #endif /* CONFIG_XFRM_OFFLOAD */
5490 
5491 	/* don't acquire bond device's netif_tx_lock when transmitting */
5492 	bond_dev->features |= NETIF_F_LLTX;
5493 
5494 	/* By default, we declare the bond to be fully
5495 	 * VLAN hardware accelerated capable. Special
5496 	 * care is taken in the various xmit functions
5497 	 * when there are slaves that are not hw accel
5498 	 * capable
5499 	 */
5500 
5501 	/* Don't allow bond devices to change network namespaces. */
5502 	bond_dev->features |= NETIF_F_NETNS_LOCAL;
5503 
5504 	bond_dev->hw_features = BOND_VLAN_FEATURES |
5505 				NETIF_F_HW_VLAN_CTAG_RX |
5506 				NETIF_F_HW_VLAN_CTAG_FILTER;
5507 
5508 	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5509 	bond_dev->features |= bond_dev->hw_features;
5510 	bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5511 #ifdef CONFIG_XFRM_OFFLOAD
5512 	bond_dev->hw_features |= BOND_XFRM_FEATURES;
5513 	/* Only enable XFRM features if this is an active-backup config */
5514 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
5515 		bond_dev->features |= BOND_XFRM_FEATURES;
5516 #endif /* CONFIG_XFRM_OFFLOAD */
5517 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5518 	if (bond_sk_check(bond))
5519 		bond_dev->features |= BOND_TLS_FEATURES;
5520 #endif
5521 }
5522 
5523 /* Destroy a bonding device.
5524  * Must be under rtnl_lock when this function is called.
5525  */
5526 static void bond_uninit(struct net_device *bond_dev)
5527 {
5528 	struct bonding *bond = netdev_priv(bond_dev);
5529 	struct bond_up_slave *usable, *all;
5530 	struct list_head *iter;
5531 	struct slave *slave;
5532 
5533 	bond_netpoll_cleanup(bond_dev);
5534 
5535 	/* Release the bonded slaves */
5536 	bond_for_each_slave(bond, slave, iter)
5537 		__bond_release_one(bond_dev, slave->dev, true, true);
5538 	netdev_info(bond_dev, "Released all slaves\n");
5539 
5540 	usable = rtnl_dereference(bond->usable_slaves);
5541 	if (usable) {
5542 		RCU_INIT_POINTER(bond->usable_slaves, NULL);
5543 		kfree_rcu(usable, rcu);
5544 	}
5545 
5546 	all = rtnl_dereference(bond->all_slaves);
5547 	if (all) {
5548 		RCU_INIT_POINTER(bond->all_slaves, NULL);
5549 		kfree_rcu(all, rcu);
5550 	}
5551 
5552 	list_del(&bond->bond_list);
5553 
5554 	bond_debug_unregister(bond);
5555 }
5556 
5557 /*------------------------- Module initialization ---------------------------*/
5558 
5559 static int bond_check_params(struct bond_params *params)
5560 {
5561 	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5562 	struct bond_opt_value newval;
5563 	const struct bond_opt_value *valptr;
5564 	int arp_all_targets_value = 0;
5565 	u16 ad_actor_sys_prio = 0;
5566 	u16 ad_user_port_key = 0;
5567 	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5568 	int arp_ip_count;
5569 	int bond_mode	= BOND_MODE_ROUNDROBIN;
5570 	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
5571 	int lacp_fast = 0;
5572 	int tlb_dynamic_lb;
5573 
5574 	/* Convert string parameters. */
5575 	if (mode) {
5576 		bond_opt_initstr(&newval, mode);
5577 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
5578 		if (!valptr) {
5579 			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
5580 			return -EINVAL;
5581 		}
5582 		bond_mode = valptr->value;
5583 	}
5584 
5585 	if (xmit_hash_policy) {
5586 		if (bond_mode == BOND_MODE_ROUNDROBIN ||
5587 		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
5588 		    bond_mode == BOND_MODE_BROADCAST) {
5589 			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
5590 				bond_mode_name(bond_mode));
5591 		} else {
5592 			bond_opt_initstr(&newval, xmit_hash_policy);
5593 			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
5594 						&newval);
5595 			if (!valptr) {
5596 				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5597 				       xmit_hash_policy);
5598 				return -EINVAL;
5599 			}
5600 			xmit_hashtype = valptr->value;
5601 		}
5602 	}
5603 
5604 	if (lacp_rate) {
5605 		if (bond_mode != BOND_MODE_8023AD) {
5606 			pr_info("lacp_rate param is irrelevant in mode %s\n",
5607 				bond_mode_name(bond_mode));
5608 		} else {
5609 			bond_opt_initstr(&newval, lacp_rate);
5610 			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
5611 						&newval);
5612 			if (!valptr) {
5613 				pr_err("Error: Invalid lacp rate \"%s\"\n",
5614 				       lacp_rate);
5615 				return -EINVAL;
5616 			}
5617 			lacp_fast = valptr->value;
5618 		}
5619 	}
5620 
5621 	if (ad_select) {
5622 		bond_opt_initstr(&newval, ad_select);
5623 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
5624 					&newval);
5625 		if (!valptr) {
5626 			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
5627 			return -EINVAL;
5628 		}
5629 		params->ad_select = valptr->value;
5630 		if (bond_mode != BOND_MODE_8023AD)
5631 			pr_warn("ad_select param only affects 802.3ad mode\n");
5632 	} else {
5633 		params->ad_select = BOND_AD_STABLE;
5634 	}
5635 
5636 	if (max_bonds < 0) {
5637 		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
5638 			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
5639 		max_bonds = BOND_DEFAULT_MAX_BONDS;
5640 	}
5641 
5642 	if (miimon < 0) {
5643 		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5644 			miimon, INT_MAX);
5645 		miimon = 0;
5646 	}
5647 
5648 	if (updelay < 0) {
5649 		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5650 			updelay, INT_MAX);
5651 		updelay = 0;
5652 	}
5653 
5654 	if (downdelay < 0) {
5655 		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5656 			downdelay, INT_MAX);
5657 		downdelay = 0;
5658 	}
5659 
5660 	if ((use_carrier != 0) && (use_carrier != 1)) {
5661 		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
5662 			use_carrier);
5663 		use_carrier = 1;
5664 	}
5665 
5666 	if (num_peer_notif < 0 || num_peer_notif > 255) {
5667 		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
5668 			num_peer_notif);
5669 		num_peer_notif = 1;
5670 	}
5671 
5672 	/* reset values for 802.3ad/TLB/ALB */
5673 	if (!bond_mode_uses_arp(bond_mode)) {
5674 		if (!miimon) {
5675 			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
5676 			pr_warn("Forcing miimon to 100msec\n");
5677 			miimon = BOND_DEFAULT_MIIMON;
5678 		}
5679 	}
5680 
5681 	if (tx_queues < 1 || tx_queues > 255) {
5682 		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
5683 			tx_queues, BOND_DEFAULT_TX_QUEUES);
5684 		tx_queues = BOND_DEFAULT_TX_QUEUES;
5685 	}
5686 
5687 	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
5688 		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
5689 			all_slaves_active);
5690 		all_slaves_active = 0;
5691 	}
5692 
5693 	if (resend_igmp < 0 || resend_igmp > 255) {
5694 		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
5695 			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
5696 		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
5697 	}
5698 
5699 	bond_opt_initval(&newval, packets_per_slave);
5700 	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
5701 		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
5702 			packets_per_slave, USHRT_MAX);
5703 		packets_per_slave = 1;
5704 	}
5705 
5706 	if (bond_mode == BOND_MODE_ALB) {
5707 		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
5708 			  updelay);
5709 	}
5710 
5711 	if (!miimon) {
5712 		if (updelay || downdelay) {
5713 			/* just warn the user the up/down delay will have
5714 			 * no effect since miimon is zero...
5715 			 */
5716 			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
5717 				updelay, downdelay);
5718 		}
5719 	} else {
5720 		/* don't allow arp monitoring */
5721 		if (arp_interval) {
5722 			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
5723 				miimon, arp_interval);
5724 			arp_interval = 0;
5725 		}
5726 
5727 		if ((updelay % miimon) != 0) {
5728 			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
5729 				updelay, miimon, (updelay / miimon) * miimon);
5730 		}
5731 
5732 		updelay /= miimon;
5733 
5734 		if ((downdelay % miimon) != 0) {
5735 			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
5736 				downdelay, miimon,
5737 				(downdelay / miimon) * miimon);
5738 		}
5739 
5740 		downdelay /= miimon;
5741 	}
5742 
5743 	if (arp_interval < 0) {
5744 		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5745 			arp_interval, INT_MAX);
5746 		arp_interval = 0;
5747 	}
5748 
5749 	for (arp_ip_count = 0, i = 0;
5750 	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
5751 		__be32 ip;
5752 
5753 		/* not a complete check, but good enough to catch mistakes */
5754 		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
5755 		    !bond_is_ip_target_ok(ip)) {
5756 			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
5757 				arp_ip_target[i]);
5758 			arp_interval = 0;
5759 		} else {
5760 			if (bond_get_targets_ip(arp_target, ip) == -1)
5761 				arp_target[arp_ip_count++] = ip;
5762 			else
5763 				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
5764 					&ip);
5765 		}
5766 	}
5767 
5768 	if (arp_interval && !arp_ip_count) {
5769 		/* don't allow arping if no arp_ip_target given... */
5770 		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
5771 			arp_interval);
5772 		arp_interval = 0;
5773 	}
5774 
5775 	if (arp_validate) {
5776 		if (!arp_interval) {
5777 			pr_err("arp_validate requires arp_interval\n");
5778 			return -EINVAL;
5779 		}
5780 
5781 		bond_opt_initstr(&newval, arp_validate);
5782 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
5783 					&newval);
5784 		if (!valptr) {
5785 			pr_err("Error: invalid arp_validate \"%s\"\n",
5786 			       arp_validate);
5787 			return -EINVAL;
5788 		}
5789 		arp_validate_value = valptr->value;
5790 	} else {
5791 		arp_validate_value = 0;
5792 	}
5793 
5794 	if (arp_all_targets) {
5795 		bond_opt_initstr(&newval, arp_all_targets);
5796 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
5797 					&newval);
5798 		if (!valptr) {
5799 			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
5800 			       arp_all_targets);
5801 			arp_all_targets_value = 0;
5802 		} else {
5803 			arp_all_targets_value = valptr->value;
5804 		}
5805 	}
5806 
5807 	if (miimon) {
5808 		pr_info("MII link monitoring set to %d ms\n", miimon);
5809 	} else if (arp_interval) {
5810 		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
5811 					  arp_validate_value);
5812 		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
5813 			arp_interval, valptr->string, arp_ip_count);
5814 
5815 		for (i = 0; i < arp_ip_count; i++)
5816 			pr_cont(" %s", arp_ip_target[i]);
5817 
5818 		pr_cont("\n");
5819 
5820 	} else if (max_bonds) {
5821 		/* miimon and arp_interval not set, we need one so things
5822 		 * work as expected, see bonding.txt for details
5823 		 */
5824 		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
5825 	}
5826 
5827 	if (primary && !bond_mode_uses_primary(bond_mode)) {
5828 		/* currently, using a primary only makes sense
5829 		 * in active backup, TLB or ALB modes
5830 		 */
5831 		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
5832 			primary, bond_mode_name(bond_mode));
5833 		primary = NULL;
5834 	}
5835 
5836 	if (primary && primary_reselect) {
5837 		bond_opt_initstr(&newval, primary_reselect);
5838 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
5839 					&newval);
5840 		if (!valptr) {
5841 			pr_err("Error: Invalid primary_reselect \"%s\"\n",
5842 			       primary_reselect);
5843 			return -EINVAL;
5844 		}
5845 		primary_reselect_value = valptr->value;
5846 	} else {
5847 		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
5848 	}
5849 
5850 	if (fail_over_mac) {
5851 		bond_opt_initstr(&newval, fail_over_mac);
5852 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
5853 					&newval);
5854 		if (!valptr) {
5855 			pr_err("Error: invalid fail_over_mac \"%s\"\n",
5856 			       fail_over_mac);
5857 			return -EINVAL;
5858 		}
5859 		fail_over_mac_value = valptr->value;
5860 		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
5861 			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
5862 	} else {
5863 		fail_over_mac_value = BOND_FOM_NONE;
5864 	}
5865 
5866 	bond_opt_initstr(&newval, "default");
5867 	valptr = bond_opt_parse(
5868 			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
5869 				     &newval);
5870 	if (!valptr) {
5871 		pr_err("Error: No ad_actor_sys_prio default value");
5872 		return -EINVAL;
5873 	}
5874 	ad_actor_sys_prio = valptr->value;
5875 
5876 	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
5877 				&newval);
5878 	if (!valptr) {
5879 		pr_err("Error: No ad_user_port_key default value");
5880 		return -EINVAL;
5881 	}
5882 	ad_user_port_key = valptr->value;
5883 
5884 	bond_opt_initstr(&newval, "default");
5885 	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
5886 	if (!valptr) {
5887 		pr_err("Error: No tlb_dynamic_lb default value");
5888 		return -EINVAL;
5889 	}
5890 	tlb_dynamic_lb = valptr->value;
5891 
5892 	if (lp_interval == 0) {
5893 		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
5894 			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
5895 		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
5896 	}
5897 
5898 	/* fill params struct with the proper values */
5899 	params->mode = bond_mode;
5900 	params->xmit_policy = xmit_hashtype;
5901 	params->miimon = miimon;
5902 	params->num_peer_notif = num_peer_notif;
5903 	params->arp_interval = arp_interval;
5904 	params->arp_validate = arp_validate_value;
5905 	params->arp_all_targets = arp_all_targets_value;
5906 	params->missed_max = 2;
5907 	params->updelay = updelay;
5908 	params->downdelay = downdelay;
5909 	params->peer_notif_delay = 0;
5910 	params->use_carrier = use_carrier;
5911 	params->lacp_active = 1;
5912 	params->lacp_fast = lacp_fast;
5913 	params->primary[0] = 0;
5914 	params->primary_reselect = primary_reselect_value;
5915 	params->fail_over_mac = fail_over_mac_value;
5916 	params->tx_queues = tx_queues;
5917 	params->all_slaves_active = all_slaves_active;
5918 	params->resend_igmp = resend_igmp;
5919 	params->min_links = min_links;
5920 	params->lp_interval = lp_interval;
5921 	params->packets_per_slave = packets_per_slave;
5922 	params->tlb_dynamic_lb = tlb_dynamic_lb;
5923 	params->ad_actor_sys_prio = ad_actor_sys_prio;
5924 	eth_zero_addr(params->ad_actor_system);
5925 	params->ad_user_port_key = ad_user_port_key;
5926 	if (packets_per_slave > 0) {
5927 		params->reciprocal_packets_per_slave =
5928 			reciprocal_value(packets_per_slave);
5929 	} else {
5930 		/* reciprocal_packets_per_slave is unused if
5931 		 * packets_per_slave is 0 or 1, just initialize it
5932 		 */
5933 		params->reciprocal_packets_per_slave =
5934 			(struct reciprocal_value) { 0 };
5935 	}
5936 
5937 	if (primary)
5938 		strscpy_pad(params->primary, primary, sizeof(params->primary));
5939 
5940 	memcpy(params->arp_targets, arp_target, sizeof(arp_target));
5941 
5942 	return 0;
5943 }
5944 
5945 /* Called from registration process */
5946 static int bond_init(struct net_device *bond_dev)
5947 {
5948 	struct bonding *bond = netdev_priv(bond_dev);
5949 	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
5950 
5951 	netdev_dbg(bond_dev, "Begin bond_init\n");
5952 
5953 	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
5954 	if (!bond->wq)
5955 		return -ENOMEM;
5956 
5957 	if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
5958 		bond->rr_tx_counter = alloc_percpu(u32);
5959 		if (!bond->rr_tx_counter) {
5960 			destroy_workqueue(bond->wq);
5961 			bond->wq = NULL;
5962 			return -ENOMEM;
5963 		}
5964 	}
5965 
5966 	spin_lock_init(&bond->stats_lock);
5967 	netdev_lockdep_set_classes(bond_dev);
5968 
5969 	list_add_tail(&bond->bond_list, &bn->dev_list);
5970 
5971 	bond_prepare_sysfs_group(bond);
5972 
5973 	bond_debug_register(bond);
5974 
5975 	/* Ensure valid dev_addr */
5976 	if (is_zero_ether_addr(bond_dev->dev_addr) &&
5977 	    bond_dev->addr_assign_type == NET_ADDR_PERM)
5978 		eth_hw_addr_random(bond_dev);
5979 
5980 	return 0;
5981 }
5982 
5983 unsigned int bond_get_num_tx_queues(void)
5984 {
5985 	return tx_queues;
5986 }
5987 
5988 /* Create a new bond based on the specified name and bonding parameters.
5989  * If name is NULL, obtain a suitable "bond%d" name for us.
5990  * Caller must NOT hold rtnl_lock; we need to release it here before we
5991  * set up our sysfs entries.
5992  */
5993 int bond_create(struct net *net, const char *name)
5994 {
5995 	struct net_device *bond_dev;
5996 	struct bonding *bond;
5997 	struct alb_bond_info *bond_info;
5998 	int res;
5999 
6000 	rtnl_lock();
6001 
6002 	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
6003 				   name ? name : "bond%d", NET_NAME_UNKNOWN,
6004 				   bond_setup, tx_queues);
6005 	if (!bond_dev) {
6006 		pr_err("%s: eek! can't alloc netdev!\n", name);
6007 		rtnl_unlock();
6008 		return -ENOMEM;
6009 	}
6010 
6011 	/*
6012 	 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
6013 	 * It is set to 0 by default which is wrong.
6014 	 */
6015 	bond = netdev_priv(bond_dev);
6016 	bond_info = &(BOND_ALB_INFO(bond));
6017 	bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
6018 
6019 	dev_net_set(bond_dev, net);
6020 	bond_dev->rtnl_link_ops = &bond_link_ops;
6021 
6022 	res = register_netdevice(bond_dev);
6023 	if (res < 0) {
6024 		free_netdev(bond_dev);
6025 		rtnl_unlock();
6026 
6027 		return res;
6028 	}
6029 
6030 	netif_carrier_off(bond_dev);
6031 
6032 	bond_work_init_all(bond);
6033 
6034 	rtnl_unlock();
6035 	return 0;
6036 }
6037 
6038 static int __net_init bond_net_init(struct net *net)
6039 {
6040 	struct bond_net *bn = net_generic(net, bond_net_id);
6041 
6042 	bn->net = net;
6043 	INIT_LIST_HEAD(&bn->dev_list);
6044 
6045 	bond_create_proc_dir(bn);
6046 	bond_create_sysfs(bn);
6047 
6048 	return 0;
6049 }
6050 
6051 static void __net_exit bond_net_exit(struct net *net)
6052 {
6053 	struct bond_net *bn = net_generic(net, bond_net_id);
6054 	struct bonding *bond, *tmp_bond;
6055 	LIST_HEAD(list);
6056 
6057 	bond_destroy_sysfs(bn);
6058 
6059 	/* Kill off any bonds created after unregistering bond rtnl ops */
6060 	rtnl_lock();
6061 	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
6062 		unregister_netdevice_queue(bond->dev, &list);
6063 	unregister_netdevice_many(&list);
6064 	rtnl_unlock();
6065 
6066 	bond_destroy_proc_dir(bn);
6067 }
6068 
6069 static struct pernet_operations bond_net_ops = {
6070 	.init = bond_net_init,
6071 	.exit = bond_net_exit,
6072 	.id   = &bond_net_id,
6073 	.size = sizeof(struct bond_net),
6074 };
6075 
6076 static int __init bonding_init(void)
6077 {
6078 	int i;
6079 	int res;
6080 
6081 	res = bond_check_params(&bonding_defaults);
6082 	if (res)
6083 		goto out;
6084 
6085 	res = register_pernet_subsys(&bond_net_ops);
6086 	if (res)
6087 		goto out;
6088 
6089 	res = bond_netlink_init();
6090 	if (res)
6091 		goto err_link;
6092 
6093 	bond_create_debugfs();
6094 
6095 	for (i = 0; i < max_bonds; i++) {
6096 		res = bond_create(&init_net, NULL);
6097 		if (res)
6098 			goto err;
6099 	}
6100 
6101 	skb_flow_dissector_init(&flow_keys_bonding,
6102 				flow_keys_bonding_keys,
6103 				ARRAY_SIZE(flow_keys_bonding_keys));
6104 
6105 	register_netdevice_notifier(&bond_netdev_notifier);
6106 out:
6107 	return res;
6108 err:
6109 	bond_destroy_debugfs();
6110 	bond_netlink_fini();
6111 err_link:
6112 	unregister_pernet_subsys(&bond_net_ops);
6113 	goto out;
6114 
6115 }
6116 
6117 static void __exit bonding_exit(void)
6118 {
6119 	unregister_netdevice_notifier(&bond_netdev_notifier);
6120 
6121 	bond_destroy_debugfs();
6122 
6123 	bond_netlink_fini();
6124 	unregister_pernet_subsys(&bond_net_ops);
6125 
6126 #ifdef CONFIG_NET_POLL_CONTROLLER
6127 	/* Make sure we don't have an imbalance on our netpoll blocking */
6128 	WARN_ON(atomic_read(&netpoll_block_tx));
6129 #endif
6130 }
6131 
6132 module_init(bonding_init);
6133 module_exit(bonding_exit);
6134 MODULE_LICENSE("GPL");
6135 MODULE_DESCRIPTION(DRV_DESCRIPTION);
6136 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
6137