xref: /openbmc/linux/drivers/net/team/team.c (revision 11976fe2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/team/team.c - Network team device driver
4  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5  */
6 
7 #include <linux/ethtool.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/rcupdate.h>
14 #include <linux/errno.h>
15 #include <linux/ctype.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netpoll.h>
19 #include <linux/if_vlan.h>
20 #include <linux/if_arp.h>
21 #include <linux/socket.h>
22 #include <linux/etherdevice.h>
23 #include <linux/rtnetlink.h>
24 #include <net/rtnetlink.h>
25 #include <net/genetlink.h>
26 #include <net/netlink.h>
27 #include <net/sch_generic.h>
28 #include <generated/utsrelease.h>
29 #include <linux/if_team.h>
30 
31 #define DRV_NAME "team"
32 
33 
34 /**********
35  * Helpers
36  **********/
37 
38 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
39 {
40 	struct team_port *port = rtnl_dereference(dev->rx_handler_data);
41 
42 	return netif_is_team_port(dev) ? port : NULL;
43 }
44 
45 /*
46  * Since the ability to change device address for open port device is tested in
47  * team_port_add, this function can be called without control of return value
48  */
49 static int __set_port_dev_addr(struct net_device *port_dev,
50 			       const unsigned char *dev_addr)
51 {
52 	struct sockaddr_storage addr;
53 
54 	memcpy(addr.__data, dev_addr, port_dev->addr_len);
55 	addr.ss_family = port_dev->type;
56 	return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
57 }
58 
59 static int team_port_set_orig_dev_addr(struct team_port *port)
60 {
61 	return __set_port_dev_addr(port->dev, port->orig.dev_addr);
62 }
63 
64 static int team_port_set_team_dev_addr(struct team *team,
65 				       struct team_port *port)
66 {
67 	return __set_port_dev_addr(port->dev, team->dev->dev_addr);
68 }
69 
70 int team_modeop_port_enter(struct team *team, struct team_port *port)
71 {
72 	return team_port_set_team_dev_addr(team, port);
73 }
74 EXPORT_SYMBOL(team_modeop_port_enter);
75 
76 void team_modeop_port_change_dev_addr(struct team *team,
77 				      struct team_port *port)
78 {
79 	team_port_set_team_dev_addr(team, port);
80 }
81 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
82 
83 static void team_lower_state_changed(struct team_port *port)
84 {
85 	struct netdev_lag_lower_state_info info;
86 
87 	info.link_up = port->linkup;
88 	info.tx_enabled = team_port_enabled(port);
89 	netdev_lower_state_changed(port->dev, &info);
90 }
91 
92 static void team_refresh_port_linkup(struct team_port *port)
93 {
94 	bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
95 						      port->state.linkup;
96 
97 	if (port->linkup != new_linkup) {
98 		port->linkup = new_linkup;
99 		team_lower_state_changed(port);
100 	}
101 }
102 
103 
104 /*******************
105  * Options handling
106  *******************/
107 
108 struct team_option_inst { /* One for each option instance */
109 	struct list_head list;
110 	struct list_head tmp_list;
111 	struct team_option *option;
112 	struct team_option_inst_info info;
113 	bool changed;
114 	bool removed;
115 };
116 
117 static struct team_option *__team_find_option(struct team *team,
118 					      const char *opt_name)
119 {
120 	struct team_option *option;
121 
122 	list_for_each_entry(option, &team->option_list, list) {
123 		if (strcmp(option->name, opt_name) == 0)
124 			return option;
125 	}
126 	return NULL;
127 }
128 
129 static void __team_option_inst_del(struct team_option_inst *opt_inst)
130 {
131 	list_del(&opt_inst->list);
132 	kfree(opt_inst);
133 }
134 
135 static void __team_option_inst_del_option(struct team *team,
136 					  struct team_option *option)
137 {
138 	struct team_option_inst *opt_inst, *tmp;
139 
140 	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
141 		if (opt_inst->option == option)
142 			__team_option_inst_del(opt_inst);
143 	}
144 }
145 
146 static int __team_option_inst_add(struct team *team, struct team_option *option,
147 				  struct team_port *port)
148 {
149 	struct team_option_inst *opt_inst;
150 	unsigned int array_size;
151 	unsigned int i;
152 	int err;
153 
154 	array_size = option->array_size;
155 	if (!array_size)
156 		array_size = 1; /* No array but still need one instance */
157 
158 	for (i = 0; i < array_size; i++) {
159 		opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
160 		if (!opt_inst)
161 			return -ENOMEM;
162 		opt_inst->option = option;
163 		opt_inst->info.port = port;
164 		opt_inst->info.array_index = i;
165 		opt_inst->changed = true;
166 		opt_inst->removed = false;
167 		list_add_tail(&opt_inst->list, &team->option_inst_list);
168 		if (option->init) {
169 			err = option->init(team, &opt_inst->info);
170 			if (err)
171 				return err;
172 		}
173 
174 	}
175 	return 0;
176 }
177 
178 static int __team_option_inst_add_option(struct team *team,
179 					 struct team_option *option)
180 {
181 	int err;
182 
183 	if (!option->per_port) {
184 		err = __team_option_inst_add(team, option, NULL);
185 		if (err)
186 			goto inst_del_option;
187 	}
188 	return 0;
189 
190 inst_del_option:
191 	__team_option_inst_del_option(team, option);
192 	return err;
193 }
194 
195 static void __team_option_inst_mark_removed_option(struct team *team,
196 						   struct team_option *option)
197 {
198 	struct team_option_inst *opt_inst;
199 
200 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
201 		if (opt_inst->option == option) {
202 			opt_inst->changed = true;
203 			opt_inst->removed = true;
204 		}
205 	}
206 }
207 
208 static void __team_option_inst_del_port(struct team *team,
209 					struct team_port *port)
210 {
211 	struct team_option_inst *opt_inst, *tmp;
212 
213 	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
214 		if (opt_inst->option->per_port &&
215 		    opt_inst->info.port == port)
216 			__team_option_inst_del(opt_inst);
217 	}
218 }
219 
220 static int __team_option_inst_add_port(struct team *team,
221 				       struct team_port *port)
222 {
223 	struct team_option *option;
224 	int err;
225 
226 	list_for_each_entry(option, &team->option_list, list) {
227 		if (!option->per_port)
228 			continue;
229 		err = __team_option_inst_add(team, option, port);
230 		if (err)
231 			goto inst_del_port;
232 	}
233 	return 0;
234 
235 inst_del_port:
236 	__team_option_inst_del_port(team, port);
237 	return err;
238 }
239 
240 static void __team_option_inst_mark_removed_port(struct team *team,
241 						 struct team_port *port)
242 {
243 	struct team_option_inst *opt_inst;
244 
245 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
246 		if (opt_inst->info.port == port) {
247 			opt_inst->changed = true;
248 			opt_inst->removed = true;
249 		}
250 	}
251 }
252 
253 static int __team_options_register(struct team *team,
254 				   const struct team_option *option,
255 				   size_t option_count)
256 {
257 	int i;
258 	struct team_option **dst_opts;
259 	int err;
260 
261 	dst_opts = kcalloc(option_count, sizeof(struct team_option *),
262 			   GFP_KERNEL);
263 	if (!dst_opts)
264 		return -ENOMEM;
265 	for (i = 0; i < option_count; i++, option++) {
266 		if (__team_find_option(team, option->name)) {
267 			err = -EEXIST;
268 			goto alloc_rollback;
269 		}
270 		dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
271 		if (!dst_opts[i]) {
272 			err = -ENOMEM;
273 			goto alloc_rollback;
274 		}
275 	}
276 
277 	for (i = 0; i < option_count; i++) {
278 		err = __team_option_inst_add_option(team, dst_opts[i]);
279 		if (err)
280 			goto inst_rollback;
281 		list_add_tail(&dst_opts[i]->list, &team->option_list);
282 	}
283 
284 	kfree(dst_opts);
285 	return 0;
286 
287 inst_rollback:
288 	for (i--; i >= 0; i--)
289 		__team_option_inst_del_option(team, dst_opts[i]);
290 
291 	i = option_count;
292 alloc_rollback:
293 	for (i--; i >= 0; i--)
294 		kfree(dst_opts[i]);
295 
296 	kfree(dst_opts);
297 	return err;
298 }
299 
300 static void __team_options_mark_removed(struct team *team,
301 					const struct team_option *option,
302 					size_t option_count)
303 {
304 	int i;
305 
306 	for (i = 0; i < option_count; i++, option++) {
307 		struct team_option *del_opt;
308 
309 		del_opt = __team_find_option(team, option->name);
310 		if (del_opt)
311 			__team_option_inst_mark_removed_option(team, del_opt);
312 	}
313 }
314 
315 static void __team_options_unregister(struct team *team,
316 				      const struct team_option *option,
317 				      size_t option_count)
318 {
319 	int i;
320 
321 	for (i = 0; i < option_count; i++, option++) {
322 		struct team_option *del_opt;
323 
324 		del_opt = __team_find_option(team, option->name);
325 		if (del_opt) {
326 			__team_option_inst_del_option(team, del_opt);
327 			list_del(&del_opt->list);
328 			kfree(del_opt);
329 		}
330 	}
331 }
332 
333 static void __team_options_change_check(struct team *team);
334 
335 int team_options_register(struct team *team,
336 			  const struct team_option *option,
337 			  size_t option_count)
338 {
339 	int err;
340 
341 	err = __team_options_register(team, option, option_count);
342 	if (err)
343 		return err;
344 	__team_options_change_check(team);
345 	return 0;
346 }
347 EXPORT_SYMBOL(team_options_register);
348 
349 void team_options_unregister(struct team *team,
350 			     const struct team_option *option,
351 			     size_t option_count)
352 {
353 	__team_options_mark_removed(team, option, option_count);
354 	__team_options_change_check(team);
355 	__team_options_unregister(team, option, option_count);
356 }
357 EXPORT_SYMBOL(team_options_unregister);
358 
359 static int team_option_get(struct team *team,
360 			   struct team_option_inst *opt_inst,
361 			   struct team_gsetter_ctx *ctx)
362 {
363 	if (!opt_inst->option->getter)
364 		return -EOPNOTSUPP;
365 	return opt_inst->option->getter(team, ctx);
366 }
367 
368 static int team_option_set(struct team *team,
369 			   struct team_option_inst *opt_inst,
370 			   struct team_gsetter_ctx *ctx)
371 {
372 	if (!opt_inst->option->setter)
373 		return -EOPNOTSUPP;
374 	return opt_inst->option->setter(team, ctx);
375 }
376 
377 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
378 {
379 	struct team_option_inst *opt_inst;
380 
381 	opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
382 	opt_inst->changed = true;
383 }
384 EXPORT_SYMBOL(team_option_inst_set_change);
385 
386 void team_options_change_check(struct team *team)
387 {
388 	__team_options_change_check(team);
389 }
390 EXPORT_SYMBOL(team_options_change_check);
391 
392 
393 /****************
394  * Mode handling
395  ****************/
396 
397 static LIST_HEAD(mode_list);
398 static DEFINE_SPINLOCK(mode_list_lock);
399 
400 struct team_mode_item {
401 	struct list_head list;
402 	const struct team_mode *mode;
403 };
404 
405 static struct team_mode_item *__find_mode(const char *kind)
406 {
407 	struct team_mode_item *mitem;
408 
409 	list_for_each_entry(mitem, &mode_list, list) {
410 		if (strcmp(mitem->mode->kind, kind) == 0)
411 			return mitem;
412 	}
413 	return NULL;
414 }
415 
416 static bool is_good_mode_name(const char *name)
417 {
418 	while (*name != '\0') {
419 		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
420 			return false;
421 		name++;
422 	}
423 	return true;
424 }
425 
426 int team_mode_register(const struct team_mode *mode)
427 {
428 	int err = 0;
429 	struct team_mode_item *mitem;
430 
431 	if (!is_good_mode_name(mode->kind) ||
432 	    mode->priv_size > TEAM_MODE_PRIV_SIZE)
433 		return -EINVAL;
434 
435 	mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
436 	if (!mitem)
437 		return -ENOMEM;
438 
439 	spin_lock(&mode_list_lock);
440 	if (__find_mode(mode->kind)) {
441 		err = -EEXIST;
442 		kfree(mitem);
443 		goto unlock;
444 	}
445 	mitem->mode = mode;
446 	list_add_tail(&mitem->list, &mode_list);
447 unlock:
448 	spin_unlock(&mode_list_lock);
449 	return err;
450 }
451 EXPORT_SYMBOL(team_mode_register);
452 
453 void team_mode_unregister(const struct team_mode *mode)
454 {
455 	struct team_mode_item *mitem;
456 
457 	spin_lock(&mode_list_lock);
458 	mitem = __find_mode(mode->kind);
459 	if (mitem) {
460 		list_del_init(&mitem->list);
461 		kfree(mitem);
462 	}
463 	spin_unlock(&mode_list_lock);
464 }
465 EXPORT_SYMBOL(team_mode_unregister);
466 
467 static const struct team_mode *team_mode_get(const char *kind)
468 {
469 	struct team_mode_item *mitem;
470 	const struct team_mode *mode = NULL;
471 
472 	if (!try_module_get(THIS_MODULE))
473 		return NULL;
474 
475 	spin_lock(&mode_list_lock);
476 	mitem = __find_mode(kind);
477 	if (!mitem) {
478 		spin_unlock(&mode_list_lock);
479 		request_module("team-mode-%s", kind);
480 		spin_lock(&mode_list_lock);
481 		mitem = __find_mode(kind);
482 	}
483 	if (mitem) {
484 		mode = mitem->mode;
485 		if (!try_module_get(mode->owner))
486 			mode = NULL;
487 	}
488 
489 	spin_unlock(&mode_list_lock);
490 	module_put(THIS_MODULE);
491 	return mode;
492 }
493 
494 static void team_mode_put(const struct team_mode *mode)
495 {
496 	module_put(mode->owner);
497 }
498 
499 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
500 {
501 	dev_kfree_skb_any(skb);
502 	return false;
503 }
504 
505 static rx_handler_result_t team_dummy_receive(struct team *team,
506 					      struct team_port *port,
507 					      struct sk_buff *skb)
508 {
509 	return RX_HANDLER_ANOTHER;
510 }
511 
512 static const struct team_mode __team_no_mode = {
513 	.kind		= "*NOMODE*",
514 };
515 
516 static bool team_is_mode_set(struct team *team)
517 {
518 	return team->mode != &__team_no_mode;
519 }
520 
521 static void team_set_no_mode(struct team *team)
522 {
523 	team->user_carrier_enabled = false;
524 	team->mode = &__team_no_mode;
525 }
526 
527 static void team_adjust_ops(struct team *team)
528 {
529 	/*
530 	 * To avoid checks in rx/tx skb paths, ensure here that non-null and
531 	 * correct ops are always set.
532 	 */
533 
534 	if (!team->en_port_count || !team_is_mode_set(team) ||
535 	    !team->mode->ops->transmit)
536 		team->ops.transmit = team_dummy_transmit;
537 	else
538 		team->ops.transmit = team->mode->ops->transmit;
539 
540 	if (!team->en_port_count || !team_is_mode_set(team) ||
541 	    !team->mode->ops->receive)
542 		team->ops.receive = team_dummy_receive;
543 	else
544 		team->ops.receive = team->mode->ops->receive;
545 }
546 
547 /*
548  * We can benefit from the fact that it's ensured no port is present
549  * at the time of mode change. Therefore no packets are in fly so there's no
550  * need to set mode operations in any special way.
551  */
552 static int __team_change_mode(struct team *team,
553 			      const struct team_mode *new_mode)
554 {
555 	/* Check if mode was previously set and do cleanup if so */
556 	if (team_is_mode_set(team)) {
557 		void (*exit_op)(struct team *team) = team->ops.exit;
558 
559 		/* Clear ops area so no callback is called any longer */
560 		memset(&team->ops, 0, sizeof(struct team_mode_ops));
561 		team_adjust_ops(team);
562 
563 		if (exit_op)
564 			exit_op(team);
565 		team_mode_put(team->mode);
566 		team_set_no_mode(team);
567 		/* zero private data area */
568 		memset(&team->mode_priv, 0,
569 		       sizeof(struct team) - offsetof(struct team, mode_priv));
570 	}
571 
572 	if (!new_mode)
573 		return 0;
574 
575 	if (new_mode->ops->init) {
576 		int err;
577 
578 		err = new_mode->ops->init(team);
579 		if (err)
580 			return err;
581 	}
582 
583 	team->mode = new_mode;
584 	memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
585 	team_adjust_ops(team);
586 
587 	return 0;
588 }
589 
590 static int team_change_mode(struct team *team, const char *kind)
591 {
592 	const struct team_mode *new_mode;
593 	struct net_device *dev = team->dev;
594 	int err;
595 
596 	if (!list_empty(&team->port_list)) {
597 		netdev_err(dev, "No ports can be present during mode change\n");
598 		return -EBUSY;
599 	}
600 
601 	if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
602 		netdev_err(dev, "Unable to change to the same mode the team is in\n");
603 		return -EINVAL;
604 	}
605 
606 	new_mode = team_mode_get(kind);
607 	if (!new_mode) {
608 		netdev_err(dev, "Mode \"%s\" not found\n", kind);
609 		return -EINVAL;
610 	}
611 
612 	err = __team_change_mode(team, new_mode);
613 	if (err) {
614 		netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
615 		team_mode_put(new_mode);
616 		return err;
617 	}
618 
619 	netdev_info(dev, "Mode changed to \"%s\"\n", kind);
620 	return 0;
621 }
622 
623 
624 /*********************
625  * Peers notification
626  *********************/
627 
628 static void team_notify_peers_work(struct work_struct *work)
629 {
630 	struct team *team;
631 	int val;
632 
633 	team = container_of(work, struct team, notify_peers.dw.work);
634 
635 	if (!rtnl_trylock()) {
636 		schedule_delayed_work(&team->notify_peers.dw, 0);
637 		return;
638 	}
639 	val = atomic_dec_if_positive(&team->notify_peers.count_pending);
640 	if (val < 0) {
641 		rtnl_unlock();
642 		return;
643 	}
644 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
645 	rtnl_unlock();
646 	if (val)
647 		schedule_delayed_work(&team->notify_peers.dw,
648 				      msecs_to_jiffies(team->notify_peers.interval));
649 }
650 
651 static void team_notify_peers(struct team *team)
652 {
653 	if (!team->notify_peers.count || !netif_running(team->dev))
654 		return;
655 	atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
656 	schedule_delayed_work(&team->notify_peers.dw, 0);
657 }
658 
659 static void team_notify_peers_init(struct team *team)
660 {
661 	INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
662 }
663 
664 static void team_notify_peers_fini(struct team *team)
665 {
666 	cancel_delayed_work_sync(&team->notify_peers.dw);
667 }
668 
669 
670 /*******************************
671  * Send multicast group rejoins
672  *******************************/
673 
674 static void team_mcast_rejoin_work(struct work_struct *work)
675 {
676 	struct team *team;
677 	int val;
678 
679 	team = container_of(work, struct team, mcast_rejoin.dw.work);
680 
681 	if (!rtnl_trylock()) {
682 		schedule_delayed_work(&team->mcast_rejoin.dw, 0);
683 		return;
684 	}
685 	val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
686 	if (val < 0) {
687 		rtnl_unlock();
688 		return;
689 	}
690 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
691 	rtnl_unlock();
692 	if (val)
693 		schedule_delayed_work(&team->mcast_rejoin.dw,
694 				      msecs_to_jiffies(team->mcast_rejoin.interval));
695 }
696 
697 static void team_mcast_rejoin(struct team *team)
698 {
699 	if (!team->mcast_rejoin.count || !netif_running(team->dev))
700 		return;
701 	atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
702 	schedule_delayed_work(&team->mcast_rejoin.dw, 0);
703 }
704 
705 static void team_mcast_rejoin_init(struct team *team)
706 {
707 	INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
708 }
709 
710 static void team_mcast_rejoin_fini(struct team *team)
711 {
712 	cancel_delayed_work_sync(&team->mcast_rejoin.dw);
713 }
714 
715 
716 /************************
717  * Rx path frame handler
718  ************************/
719 
720 /* note: already called with rcu_read_lock */
721 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
722 {
723 	struct sk_buff *skb = *pskb;
724 	struct team_port *port;
725 	struct team *team;
726 	rx_handler_result_t res;
727 
728 	skb = skb_share_check(skb, GFP_ATOMIC);
729 	if (!skb)
730 		return RX_HANDLER_CONSUMED;
731 
732 	*pskb = skb;
733 
734 	port = team_port_get_rcu(skb->dev);
735 	team = port->team;
736 	if (!team_port_enabled(port)) {
737 		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
738 			/* link-local packets are mostly useful when stack receives them
739 			 * with the link they arrive on.
740 			 */
741 			return RX_HANDLER_PASS;
742 		/* allow exact match delivery for disabled ports */
743 		res = RX_HANDLER_EXACT;
744 	} else {
745 		res = team->ops.receive(team, port, skb);
746 	}
747 	if (res == RX_HANDLER_ANOTHER) {
748 		struct team_pcpu_stats *pcpu_stats;
749 
750 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
751 		u64_stats_update_begin(&pcpu_stats->syncp);
752 		u64_stats_inc(&pcpu_stats->rx_packets);
753 		u64_stats_add(&pcpu_stats->rx_bytes, skb->len);
754 		if (skb->pkt_type == PACKET_MULTICAST)
755 			u64_stats_inc(&pcpu_stats->rx_multicast);
756 		u64_stats_update_end(&pcpu_stats->syncp);
757 
758 		skb->dev = team->dev;
759 	} else if (res == RX_HANDLER_EXACT) {
760 		this_cpu_inc(team->pcpu_stats->rx_nohandler);
761 	} else {
762 		this_cpu_inc(team->pcpu_stats->rx_dropped);
763 	}
764 
765 	return res;
766 }
767 
768 
769 /*************************************
770  * Multiqueue Tx port select override
771  *************************************/
772 
773 static int team_queue_override_init(struct team *team)
774 {
775 	struct list_head *listarr;
776 	unsigned int queue_cnt = team->dev->num_tx_queues - 1;
777 	unsigned int i;
778 
779 	if (!queue_cnt)
780 		return 0;
781 	listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
782 				GFP_KERNEL);
783 	if (!listarr)
784 		return -ENOMEM;
785 	team->qom_lists = listarr;
786 	for (i = 0; i < queue_cnt; i++)
787 		INIT_LIST_HEAD(listarr++);
788 	return 0;
789 }
790 
791 static void team_queue_override_fini(struct team *team)
792 {
793 	kfree(team->qom_lists);
794 }
795 
796 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
797 {
798 	return &team->qom_lists[queue_id - 1];
799 }
800 
801 /*
802  * note: already called with rcu_read_lock
803  */
804 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
805 {
806 	struct list_head *qom_list;
807 	struct team_port *port;
808 
809 	if (!team->queue_override_enabled || !skb->queue_mapping)
810 		return false;
811 	qom_list = __team_get_qom_list(team, skb->queue_mapping);
812 	list_for_each_entry_rcu(port, qom_list, qom_list) {
813 		if (!team_dev_queue_xmit(team, port, skb))
814 			return true;
815 	}
816 	return false;
817 }
818 
819 static void __team_queue_override_port_del(struct team *team,
820 					   struct team_port *port)
821 {
822 	if (!port->queue_id)
823 		return;
824 	list_del_rcu(&port->qom_list);
825 }
826 
827 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
828 						      struct team_port *cur)
829 {
830 	if (port->priority < cur->priority)
831 		return true;
832 	if (port->priority > cur->priority)
833 		return false;
834 	if (port->index < cur->index)
835 		return true;
836 	return false;
837 }
838 
839 static void __team_queue_override_port_add(struct team *team,
840 					   struct team_port *port)
841 {
842 	struct team_port *cur;
843 	struct list_head *qom_list;
844 	struct list_head *node;
845 
846 	if (!port->queue_id)
847 		return;
848 	qom_list = __team_get_qom_list(team, port->queue_id);
849 	node = qom_list;
850 	list_for_each_entry(cur, qom_list, qom_list) {
851 		if (team_queue_override_port_has_gt_prio_than(port, cur))
852 			break;
853 		node = &cur->qom_list;
854 	}
855 	list_add_tail_rcu(&port->qom_list, node);
856 }
857 
858 static void __team_queue_override_enabled_check(struct team *team)
859 {
860 	struct team_port *port;
861 	bool enabled = false;
862 
863 	list_for_each_entry(port, &team->port_list, list) {
864 		if (port->queue_id) {
865 			enabled = true;
866 			break;
867 		}
868 	}
869 	if (enabled == team->queue_override_enabled)
870 		return;
871 	netdev_dbg(team->dev, "%s queue override\n",
872 		   enabled ? "Enabling" : "Disabling");
873 	team->queue_override_enabled = enabled;
874 }
875 
876 static void team_queue_override_port_prio_changed(struct team *team,
877 						  struct team_port *port)
878 {
879 	if (!port->queue_id || team_port_enabled(port))
880 		return;
881 	__team_queue_override_port_del(team, port);
882 	__team_queue_override_port_add(team, port);
883 	__team_queue_override_enabled_check(team);
884 }
885 
886 static void team_queue_override_port_change_queue_id(struct team *team,
887 						     struct team_port *port,
888 						     u16 new_queue_id)
889 {
890 	if (team_port_enabled(port)) {
891 		__team_queue_override_port_del(team, port);
892 		port->queue_id = new_queue_id;
893 		__team_queue_override_port_add(team, port);
894 		__team_queue_override_enabled_check(team);
895 	} else {
896 		port->queue_id = new_queue_id;
897 	}
898 }
899 
900 static void team_queue_override_port_add(struct team *team,
901 					 struct team_port *port)
902 {
903 	__team_queue_override_port_add(team, port);
904 	__team_queue_override_enabled_check(team);
905 }
906 
907 static void team_queue_override_port_del(struct team *team,
908 					 struct team_port *port)
909 {
910 	__team_queue_override_port_del(team, port);
911 	__team_queue_override_enabled_check(team);
912 }
913 
914 
915 /****************
916  * Port handling
917  ****************/
918 
919 static bool team_port_find(const struct team *team,
920 			   const struct team_port *port)
921 {
922 	struct team_port *cur;
923 
924 	list_for_each_entry(cur, &team->port_list, list)
925 		if (cur == port)
926 			return true;
927 	return false;
928 }
929 
930 /*
931  * Enable/disable port by adding to enabled port hashlist and setting
932  * port->index (Might be racy so reader could see incorrect ifindex when
933  * processing a flying packet, but that is not a problem). Write guarded
934  * by team->lock.
935  */
936 static void team_port_enable(struct team *team,
937 			     struct team_port *port)
938 {
939 	if (team_port_enabled(port))
940 		return;
941 	port->index = team->en_port_count++;
942 	hlist_add_head_rcu(&port->hlist,
943 			   team_port_index_hash(team, port->index));
944 	team_adjust_ops(team);
945 	team_queue_override_port_add(team, port);
946 	if (team->ops.port_enabled)
947 		team->ops.port_enabled(team, port);
948 	team_notify_peers(team);
949 	team_mcast_rejoin(team);
950 	team_lower_state_changed(port);
951 }
952 
953 static void __reconstruct_port_hlist(struct team *team, int rm_index)
954 {
955 	int i;
956 	struct team_port *port;
957 
958 	for (i = rm_index + 1; i < team->en_port_count; i++) {
959 		port = team_get_port_by_index(team, i);
960 		hlist_del_rcu(&port->hlist);
961 		port->index--;
962 		hlist_add_head_rcu(&port->hlist,
963 				   team_port_index_hash(team, port->index));
964 	}
965 }
966 
967 static void team_port_disable(struct team *team,
968 			      struct team_port *port)
969 {
970 	if (!team_port_enabled(port))
971 		return;
972 	if (team->ops.port_disabled)
973 		team->ops.port_disabled(team, port);
974 	hlist_del_rcu(&port->hlist);
975 	__reconstruct_port_hlist(team, port->index);
976 	port->index = -1;
977 	team->en_port_count--;
978 	team_queue_override_port_del(team, port);
979 	team_adjust_ops(team);
980 	team_lower_state_changed(port);
981 }
982 
983 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
984 			    NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
985 			    NETIF_F_HIGHDMA | NETIF_F_LRO)
986 
987 #define TEAM_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
988 				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
989 
990 static void __team_compute_features(struct team *team)
991 {
992 	struct team_port *port;
993 	netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
994 					  NETIF_F_ALL_FOR_ALL;
995 	netdev_features_t enc_features  = TEAM_ENC_FEATURES;
996 	unsigned short max_hard_header_len = ETH_HLEN;
997 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
998 					IFF_XMIT_DST_RELEASE_PERM;
999 
1000 	rcu_read_lock();
1001 	list_for_each_entry_rcu(port, &team->port_list, list) {
1002 		vlan_features = netdev_increment_features(vlan_features,
1003 					port->dev->vlan_features,
1004 					TEAM_VLAN_FEATURES);
1005 		enc_features =
1006 			netdev_increment_features(enc_features,
1007 						  port->dev->hw_enc_features,
1008 						  TEAM_ENC_FEATURES);
1009 
1010 
1011 		dst_release_flag &= port->dev->priv_flags;
1012 		if (port->dev->hard_header_len > max_hard_header_len)
1013 			max_hard_header_len = port->dev->hard_header_len;
1014 	}
1015 	rcu_read_unlock();
1016 
1017 	team->dev->vlan_features = vlan_features;
1018 	team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1019 				     NETIF_F_HW_VLAN_CTAG_TX |
1020 				     NETIF_F_HW_VLAN_STAG_TX;
1021 	team->dev->hard_header_len = max_hard_header_len;
1022 
1023 	team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1024 	if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1025 		team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1026 }
1027 
1028 static void team_compute_features(struct team *team)
1029 {
1030 	__team_compute_features(team);
1031 	netdev_change_features(team->dev);
1032 }
1033 
1034 static int team_port_enter(struct team *team, struct team_port *port)
1035 {
1036 	int err = 0;
1037 
1038 	dev_hold(team->dev);
1039 	if (team->ops.port_enter) {
1040 		err = team->ops.port_enter(team, port);
1041 		if (err) {
1042 			netdev_err(team->dev, "Device %s failed to enter team mode\n",
1043 				   port->dev->name);
1044 			goto err_port_enter;
1045 		}
1046 	}
1047 
1048 	return 0;
1049 
1050 err_port_enter:
1051 	dev_put(team->dev);
1052 
1053 	return err;
1054 }
1055 
1056 static void team_port_leave(struct team *team, struct team_port *port)
1057 {
1058 	if (team->ops.port_leave)
1059 		team->ops.port_leave(team, port);
1060 	dev_put(team->dev);
1061 }
1062 
1063 #ifdef CONFIG_NET_POLL_CONTROLLER
1064 static int __team_port_enable_netpoll(struct team_port *port)
1065 {
1066 	struct netpoll *np;
1067 	int err;
1068 
1069 	np = kzalloc(sizeof(*np), GFP_KERNEL);
1070 	if (!np)
1071 		return -ENOMEM;
1072 
1073 	err = __netpoll_setup(np, port->dev);
1074 	if (err) {
1075 		kfree(np);
1076 		return err;
1077 	}
1078 	port->np = np;
1079 	return err;
1080 }
1081 
1082 static int team_port_enable_netpoll(struct team_port *port)
1083 {
1084 	if (!port->team->dev->npinfo)
1085 		return 0;
1086 
1087 	return __team_port_enable_netpoll(port);
1088 }
1089 
1090 static void team_port_disable_netpoll(struct team_port *port)
1091 {
1092 	struct netpoll *np = port->np;
1093 
1094 	if (!np)
1095 		return;
1096 	port->np = NULL;
1097 
1098 	__netpoll_free(np);
1099 }
1100 #else
1101 static int team_port_enable_netpoll(struct team_port *port)
1102 {
1103 	return 0;
1104 }
1105 static void team_port_disable_netpoll(struct team_port *port)
1106 {
1107 }
1108 #endif
1109 
1110 static int team_upper_dev_link(struct team *team, struct team_port *port,
1111 			       struct netlink_ext_ack *extack)
1112 {
1113 	struct netdev_lag_upper_info lag_upper_info;
1114 	int err;
1115 
1116 	lag_upper_info.tx_type = team->mode->lag_tx_type;
1117 	lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1118 	err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1119 					   &lag_upper_info, extack);
1120 	if (err)
1121 		return err;
1122 	port->dev->priv_flags |= IFF_TEAM_PORT;
1123 	return 0;
1124 }
1125 
1126 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1127 {
1128 	netdev_upper_dev_unlink(port->dev, team->dev);
1129 	port->dev->priv_flags &= ~IFF_TEAM_PORT;
1130 }
1131 
1132 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1133 static int team_dev_type_check_change(struct net_device *dev,
1134 				      struct net_device *port_dev);
1135 
1136 static int team_port_add(struct team *team, struct net_device *port_dev,
1137 			 struct netlink_ext_ack *extack)
1138 {
1139 	struct net_device *dev = team->dev;
1140 	struct team_port *port;
1141 	char *portname = port_dev->name;
1142 	int err;
1143 
1144 	if (port_dev->flags & IFF_LOOPBACK) {
1145 		NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1146 		netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1147 			   portname);
1148 		return -EINVAL;
1149 	}
1150 
1151 	if (netif_is_team_port(port_dev)) {
1152 		NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1153 		netdev_err(dev, "Device %s is already a port "
1154 				"of a team device\n", portname);
1155 		return -EBUSY;
1156 	}
1157 
1158 	if (dev == port_dev) {
1159 		NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1160 		netdev_err(dev, "Cannot enslave team device to itself\n");
1161 		return -EINVAL;
1162 	}
1163 
1164 	if (netdev_has_upper_dev(dev, port_dev)) {
1165 		NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1166 		netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1167 			   portname);
1168 		return -EBUSY;
1169 	}
1170 
1171 	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1172 	    vlan_uses_dev(dev)) {
1173 		NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1174 		netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1175 			   portname);
1176 		return -EPERM;
1177 	}
1178 
1179 	err = team_dev_type_check_change(dev, port_dev);
1180 	if (err)
1181 		return err;
1182 
1183 	if (port_dev->flags & IFF_UP) {
1184 		NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1185 		netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1186 			   portname);
1187 		return -EBUSY;
1188 	}
1189 
1190 	port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1191 		       GFP_KERNEL);
1192 	if (!port)
1193 		return -ENOMEM;
1194 
1195 	port->dev = port_dev;
1196 	port->team = team;
1197 	INIT_LIST_HEAD(&port->qom_list);
1198 
1199 	port->orig.mtu = port_dev->mtu;
1200 	err = dev_set_mtu(port_dev, dev->mtu);
1201 	if (err) {
1202 		netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1203 		goto err_set_mtu;
1204 	}
1205 
1206 	memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1207 
1208 	err = team_port_enter(team, port);
1209 	if (err) {
1210 		netdev_err(dev, "Device %s failed to enter team mode\n",
1211 			   portname);
1212 		goto err_port_enter;
1213 	}
1214 
1215 	err = dev_open(port_dev, extack);
1216 	if (err) {
1217 		netdev_dbg(dev, "Device %s opening failed\n",
1218 			   portname);
1219 		goto err_dev_open;
1220 	}
1221 
1222 	err = vlan_vids_add_by_dev(port_dev, dev);
1223 	if (err) {
1224 		netdev_err(dev, "Failed to add vlan ids to device %s\n",
1225 				portname);
1226 		goto err_vids_add;
1227 	}
1228 
1229 	err = team_port_enable_netpoll(port);
1230 	if (err) {
1231 		netdev_err(dev, "Failed to enable netpoll on device %s\n",
1232 			   portname);
1233 		goto err_enable_netpoll;
1234 	}
1235 
1236 	if (!(dev->features & NETIF_F_LRO))
1237 		dev_disable_lro(port_dev);
1238 
1239 	err = netdev_rx_handler_register(port_dev, team_handle_frame,
1240 					 port);
1241 	if (err) {
1242 		netdev_err(dev, "Device %s failed to register rx_handler\n",
1243 			   portname);
1244 		goto err_handler_register;
1245 	}
1246 
1247 	err = team_upper_dev_link(team, port, extack);
1248 	if (err) {
1249 		netdev_err(dev, "Device %s failed to set upper link\n",
1250 			   portname);
1251 		goto err_set_upper_link;
1252 	}
1253 
1254 	err = __team_option_inst_add_port(team, port);
1255 	if (err) {
1256 		netdev_err(dev, "Device %s failed to add per-port options\n",
1257 			   portname);
1258 		goto err_option_port_add;
1259 	}
1260 
1261 	/* set promiscuity level to new slave */
1262 	if (dev->flags & IFF_PROMISC) {
1263 		err = dev_set_promiscuity(port_dev, 1);
1264 		if (err)
1265 			goto err_set_slave_promisc;
1266 	}
1267 
1268 	/* set allmulti level to new slave */
1269 	if (dev->flags & IFF_ALLMULTI) {
1270 		err = dev_set_allmulti(port_dev, 1);
1271 		if (err) {
1272 			if (dev->flags & IFF_PROMISC)
1273 				dev_set_promiscuity(port_dev, -1);
1274 			goto err_set_slave_promisc;
1275 		}
1276 	}
1277 
1278 	if (dev->flags & IFF_UP) {
1279 		netif_addr_lock_bh(dev);
1280 		dev_uc_sync_multiple(port_dev, dev);
1281 		dev_mc_sync_multiple(port_dev, dev);
1282 		netif_addr_unlock_bh(dev);
1283 	}
1284 
1285 	port->index = -1;
1286 	list_add_tail_rcu(&port->list, &team->port_list);
1287 	team_port_enable(team, port);
1288 	__team_compute_features(team);
1289 	__team_port_change_port_added(port, !!netif_oper_up(port_dev));
1290 	__team_options_change_check(team);
1291 
1292 	netdev_info(dev, "Port device %s added\n", portname);
1293 
1294 	return 0;
1295 
1296 err_set_slave_promisc:
1297 	__team_option_inst_del_port(team, port);
1298 
1299 err_option_port_add:
1300 	team_upper_dev_unlink(team, port);
1301 
1302 err_set_upper_link:
1303 	netdev_rx_handler_unregister(port_dev);
1304 
1305 err_handler_register:
1306 	team_port_disable_netpoll(port);
1307 
1308 err_enable_netpoll:
1309 	vlan_vids_del_by_dev(port_dev, dev);
1310 
1311 err_vids_add:
1312 	dev_close(port_dev);
1313 
1314 err_dev_open:
1315 	team_port_leave(team, port);
1316 	team_port_set_orig_dev_addr(port);
1317 
1318 err_port_enter:
1319 	dev_set_mtu(port_dev, port->orig.mtu);
1320 
1321 err_set_mtu:
1322 	kfree(port);
1323 
1324 	return err;
1325 }
1326 
1327 static void __team_port_change_port_removed(struct team_port *port);
1328 
1329 static int team_port_del(struct team *team, struct net_device *port_dev)
1330 {
1331 	struct net_device *dev = team->dev;
1332 	struct team_port *port;
1333 	char *portname = port_dev->name;
1334 
1335 	port = team_port_get_rtnl(port_dev);
1336 	if (!port || !team_port_find(team, port)) {
1337 		netdev_err(dev, "Device %s does not act as a port of this team\n",
1338 			   portname);
1339 		return -ENOENT;
1340 	}
1341 
1342 	team_port_disable(team, port);
1343 	list_del_rcu(&port->list);
1344 
1345 	if (dev->flags & IFF_PROMISC)
1346 		dev_set_promiscuity(port_dev, -1);
1347 	if (dev->flags & IFF_ALLMULTI)
1348 		dev_set_allmulti(port_dev, -1);
1349 
1350 	team_upper_dev_unlink(team, port);
1351 	netdev_rx_handler_unregister(port_dev);
1352 	team_port_disable_netpoll(port);
1353 	vlan_vids_del_by_dev(port_dev, dev);
1354 	if (dev->flags & IFF_UP) {
1355 		dev_uc_unsync(port_dev, dev);
1356 		dev_mc_unsync(port_dev, dev);
1357 	}
1358 	dev_close(port_dev);
1359 	team_port_leave(team, port);
1360 
1361 	__team_option_inst_mark_removed_port(team, port);
1362 	__team_options_change_check(team);
1363 	__team_option_inst_del_port(team, port);
1364 	__team_port_change_port_removed(port);
1365 
1366 	team_port_set_orig_dev_addr(port);
1367 	dev_set_mtu(port_dev, port->orig.mtu);
1368 	kfree_rcu(port, rcu);
1369 	netdev_info(dev, "Port device %s removed\n", portname);
1370 	__team_compute_features(team);
1371 
1372 	return 0;
1373 }
1374 
1375 
1376 /*****************
1377  * Net device ops
1378  *****************/
1379 
1380 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1381 {
1382 	ctx->data.str_val = team->mode->kind;
1383 	return 0;
1384 }
1385 
1386 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1387 {
1388 	return team_change_mode(team, ctx->data.str_val);
1389 }
1390 
1391 static int team_notify_peers_count_get(struct team *team,
1392 				       struct team_gsetter_ctx *ctx)
1393 {
1394 	ctx->data.u32_val = team->notify_peers.count;
1395 	return 0;
1396 }
1397 
1398 static int team_notify_peers_count_set(struct team *team,
1399 				       struct team_gsetter_ctx *ctx)
1400 {
1401 	team->notify_peers.count = ctx->data.u32_val;
1402 	return 0;
1403 }
1404 
1405 static int team_notify_peers_interval_get(struct team *team,
1406 					  struct team_gsetter_ctx *ctx)
1407 {
1408 	ctx->data.u32_val = team->notify_peers.interval;
1409 	return 0;
1410 }
1411 
1412 static int team_notify_peers_interval_set(struct team *team,
1413 					  struct team_gsetter_ctx *ctx)
1414 {
1415 	team->notify_peers.interval = ctx->data.u32_val;
1416 	return 0;
1417 }
1418 
1419 static int team_mcast_rejoin_count_get(struct team *team,
1420 				       struct team_gsetter_ctx *ctx)
1421 {
1422 	ctx->data.u32_val = team->mcast_rejoin.count;
1423 	return 0;
1424 }
1425 
1426 static int team_mcast_rejoin_count_set(struct team *team,
1427 				       struct team_gsetter_ctx *ctx)
1428 {
1429 	team->mcast_rejoin.count = ctx->data.u32_val;
1430 	return 0;
1431 }
1432 
1433 static int team_mcast_rejoin_interval_get(struct team *team,
1434 					  struct team_gsetter_ctx *ctx)
1435 {
1436 	ctx->data.u32_val = team->mcast_rejoin.interval;
1437 	return 0;
1438 }
1439 
1440 static int team_mcast_rejoin_interval_set(struct team *team,
1441 					  struct team_gsetter_ctx *ctx)
1442 {
1443 	team->mcast_rejoin.interval = ctx->data.u32_val;
1444 	return 0;
1445 }
1446 
1447 static int team_port_en_option_get(struct team *team,
1448 				   struct team_gsetter_ctx *ctx)
1449 {
1450 	struct team_port *port = ctx->info->port;
1451 
1452 	ctx->data.bool_val = team_port_enabled(port);
1453 	return 0;
1454 }
1455 
1456 static int team_port_en_option_set(struct team *team,
1457 				   struct team_gsetter_ctx *ctx)
1458 {
1459 	struct team_port *port = ctx->info->port;
1460 
1461 	if (ctx->data.bool_val)
1462 		team_port_enable(team, port);
1463 	else
1464 		team_port_disable(team, port);
1465 	return 0;
1466 }
1467 
1468 static int team_user_linkup_option_get(struct team *team,
1469 				       struct team_gsetter_ctx *ctx)
1470 {
1471 	struct team_port *port = ctx->info->port;
1472 
1473 	ctx->data.bool_val = port->user.linkup;
1474 	return 0;
1475 }
1476 
1477 static void __team_carrier_check(struct team *team);
1478 
1479 static int team_user_linkup_option_set(struct team *team,
1480 				       struct team_gsetter_ctx *ctx)
1481 {
1482 	struct team_port *port = ctx->info->port;
1483 
1484 	port->user.linkup = ctx->data.bool_val;
1485 	team_refresh_port_linkup(port);
1486 	__team_carrier_check(port->team);
1487 	return 0;
1488 }
1489 
1490 static int team_user_linkup_en_option_get(struct team *team,
1491 					  struct team_gsetter_ctx *ctx)
1492 {
1493 	struct team_port *port = ctx->info->port;
1494 
1495 	ctx->data.bool_val = port->user.linkup_enabled;
1496 	return 0;
1497 }
1498 
1499 static int team_user_linkup_en_option_set(struct team *team,
1500 					  struct team_gsetter_ctx *ctx)
1501 {
1502 	struct team_port *port = ctx->info->port;
1503 
1504 	port->user.linkup_enabled = ctx->data.bool_val;
1505 	team_refresh_port_linkup(port);
1506 	__team_carrier_check(port->team);
1507 	return 0;
1508 }
1509 
1510 static int team_priority_option_get(struct team *team,
1511 				    struct team_gsetter_ctx *ctx)
1512 {
1513 	struct team_port *port = ctx->info->port;
1514 
1515 	ctx->data.s32_val = port->priority;
1516 	return 0;
1517 }
1518 
1519 static int team_priority_option_set(struct team *team,
1520 				    struct team_gsetter_ctx *ctx)
1521 {
1522 	struct team_port *port = ctx->info->port;
1523 	s32 priority = ctx->data.s32_val;
1524 
1525 	if (port->priority == priority)
1526 		return 0;
1527 	port->priority = priority;
1528 	team_queue_override_port_prio_changed(team, port);
1529 	return 0;
1530 }
1531 
1532 static int team_queue_id_option_get(struct team *team,
1533 				    struct team_gsetter_ctx *ctx)
1534 {
1535 	struct team_port *port = ctx->info->port;
1536 
1537 	ctx->data.u32_val = port->queue_id;
1538 	return 0;
1539 }
1540 
1541 static int team_queue_id_option_set(struct team *team,
1542 				    struct team_gsetter_ctx *ctx)
1543 {
1544 	struct team_port *port = ctx->info->port;
1545 	u16 new_queue_id = ctx->data.u32_val;
1546 
1547 	if (port->queue_id == new_queue_id)
1548 		return 0;
1549 	if (new_queue_id >= team->dev->real_num_tx_queues)
1550 		return -EINVAL;
1551 	team_queue_override_port_change_queue_id(team, port, new_queue_id);
1552 	return 0;
1553 }
1554 
1555 static const struct team_option team_options[] = {
1556 	{
1557 		.name = "mode",
1558 		.type = TEAM_OPTION_TYPE_STRING,
1559 		.getter = team_mode_option_get,
1560 		.setter = team_mode_option_set,
1561 	},
1562 	{
1563 		.name = "notify_peers_count",
1564 		.type = TEAM_OPTION_TYPE_U32,
1565 		.getter = team_notify_peers_count_get,
1566 		.setter = team_notify_peers_count_set,
1567 	},
1568 	{
1569 		.name = "notify_peers_interval",
1570 		.type = TEAM_OPTION_TYPE_U32,
1571 		.getter = team_notify_peers_interval_get,
1572 		.setter = team_notify_peers_interval_set,
1573 	},
1574 	{
1575 		.name = "mcast_rejoin_count",
1576 		.type = TEAM_OPTION_TYPE_U32,
1577 		.getter = team_mcast_rejoin_count_get,
1578 		.setter = team_mcast_rejoin_count_set,
1579 	},
1580 	{
1581 		.name = "mcast_rejoin_interval",
1582 		.type = TEAM_OPTION_TYPE_U32,
1583 		.getter = team_mcast_rejoin_interval_get,
1584 		.setter = team_mcast_rejoin_interval_set,
1585 	},
1586 	{
1587 		.name = "enabled",
1588 		.type = TEAM_OPTION_TYPE_BOOL,
1589 		.per_port = true,
1590 		.getter = team_port_en_option_get,
1591 		.setter = team_port_en_option_set,
1592 	},
1593 	{
1594 		.name = "user_linkup",
1595 		.type = TEAM_OPTION_TYPE_BOOL,
1596 		.per_port = true,
1597 		.getter = team_user_linkup_option_get,
1598 		.setter = team_user_linkup_option_set,
1599 	},
1600 	{
1601 		.name = "user_linkup_enabled",
1602 		.type = TEAM_OPTION_TYPE_BOOL,
1603 		.per_port = true,
1604 		.getter = team_user_linkup_en_option_get,
1605 		.setter = team_user_linkup_en_option_set,
1606 	},
1607 	{
1608 		.name = "priority",
1609 		.type = TEAM_OPTION_TYPE_S32,
1610 		.per_port = true,
1611 		.getter = team_priority_option_get,
1612 		.setter = team_priority_option_set,
1613 	},
1614 	{
1615 		.name = "queue_id",
1616 		.type = TEAM_OPTION_TYPE_U32,
1617 		.per_port = true,
1618 		.getter = team_queue_id_option_get,
1619 		.setter = team_queue_id_option_set,
1620 	},
1621 };
1622 
1623 
1624 static int team_init(struct net_device *dev)
1625 {
1626 	struct team *team = netdev_priv(dev);
1627 	int i;
1628 	int err;
1629 
1630 	team->dev = dev;
1631 	team_set_no_mode(team);
1632 	team->notifier_ctx = false;
1633 
1634 	team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1635 	if (!team->pcpu_stats)
1636 		return -ENOMEM;
1637 
1638 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1639 		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1640 	INIT_LIST_HEAD(&team->port_list);
1641 	err = team_queue_override_init(team);
1642 	if (err)
1643 		goto err_team_queue_override_init;
1644 
1645 	team_adjust_ops(team);
1646 
1647 	INIT_LIST_HEAD(&team->option_list);
1648 	INIT_LIST_HEAD(&team->option_inst_list);
1649 
1650 	team_notify_peers_init(team);
1651 	team_mcast_rejoin_init(team);
1652 
1653 	err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1654 	if (err)
1655 		goto err_options_register;
1656 	netif_carrier_off(dev);
1657 
1658 	lockdep_register_key(&team->team_lock_key);
1659 	__mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
1660 	netdev_lockdep_set_classes(dev);
1661 
1662 	return 0;
1663 
1664 err_options_register:
1665 	team_mcast_rejoin_fini(team);
1666 	team_notify_peers_fini(team);
1667 	team_queue_override_fini(team);
1668 err_team_queue_override_init:
1669 	free_percpu(team->pcpu_stats);
1670 
1671 	return err;
1672 }
1673 
1674 static void team_uninit(struct net_device *dev)
1675 {
1676 	struct team *team = netdev_priv(dev);
1677 	struct team_port *port;
1678 	struct team_port *tmp;
1679 
1680 	mutex_lock(&team->lock);
1681 	list_for_each_entry_safe(port, tmp, &team->port_list, list)
1682 		team_port_del(team, port->dev);
1683 
1684 	__team_change_mode(team, NULL); /* cleanup */
1685 	__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1686 	team_mcast_rejoin_fini(team);
1687 	team_notify_peers_fini(team);
1688 	team_queue_override_fini(team);
1689 	mutex_unlock(&team->lock);
1690 	netdev_change_features(dev);
1691 	lockdep_unregister_key(&team->team_lock_key);
1692 }
1693 
1694 static void team_destructor(struct net_device *dev)
1695 {
1696 	struct team *team = netdev_priv(dev);
1697 
1698 	free_percpu(team->pcpu_stats);
1699 }
1700 
1701 static int team_open(struct net_device *dev)
1702 {
1703 	return 0;
1704 }
1705 
1706 static int team_close(struct net_device *dev)
1707 {
1708 	struct team *team = netdev_priv(dev);
1709 	struct team_port *port;
1710 
1711 	list_for_each_entry(port, &team->port_list, list) {
1712 		dev_uc_unsync(port->dev, dev);
1713 		dev_mc_unsync(port->dev, dev);
1714 	}
1715 
1716 	return 0;
1717 }
1718 
1719 /*
1720  * note: already called with rcu_read_lock
1721  */
1722 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1723 {
1724 	struct team *team = netdev_priv(dev);
1725 	bool tx_success;
1726 	unsigned int len = skb->len;
1727 
1728 	tx_success = team_queue_override_transmit(team, skb);
1729 	if (!tx_success)
1730 		tx_success = team->ops.transmit(team, skb);
1731 	if (tx_success) {
1732 		struct team_pcpu_stats *pcpu_stats;
1733 
1734 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1735 		u64_stats_update_begin(&pcpu_stats->syncp);
1736 		u64_stats_inc(&pcpu_stats->tx_packets);
1737 		u64_stats_add(&pcpu_stats->tx_bytes, len);
1738 		u64_stats_update_end(&pcpu_stats->syncp);
1739 	} else {
1740 		this_cpu_inc(team->pcpu_stats->tx_dropped);
1741 	}
1742 
1743 	return NETDEV_TX_OK;
1744 }
1745 
1746 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1747 			     struct net_device *sb_dev)
1748 {
1749 	/*
1750 	 * This helper function exists to help dev_pick_tx get the correct
1751 	 * destination queue.  Using a helper function skips a call to
1752 	 * skb_tx_hash and will put the skbs in the queue we expect on their
1753 	 * way down to the team driver.
1754 	 */
1755 	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1756 
1757 	/*
1758 	 * Save the original txq to restore before passing to the driver
1759 	 */
1760 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1761 
1762 	if (unlikely(txq >= dev->real_num_tx_queues)) {
1763 		do {
1764 			txq -= dev->real_num_tx_queues;
1765 		} while (txq >= dev->real_num_tx_queues);
1766 	}
1767 	return txq;
1768 }
1769 
1770 static void team_change_rx_flags(struct net_device *dev, int change)
1771 {
1772 	struct team *team = netdev_priv(dev);
1773 	struct team_port *port;
1774 	int inc;
1775 
1776 	rcu_read_lock();
1777 	list_for_each_entry_rcu(port, &team->port_list, list) {
1778 		if (change & IFF_PROMISC) {
1779 			inc = dev->flags & IFF_PROMISC ? 1 : -1;
1780 			dev_set_promiscuity(port->dev, inc);
1781 		}
1782 		if (change & IFF_ALLMULTI) {
1783 			inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1784 			dev_set_allmulti(port->dev, inc);
1785 		}
1786 	}
1787 	rcu_read_unlock();
1788 }
1789 
1790 static void team_set_rx_mode(struct net_device *dev)
1791 {
1792 	struct team *team = netdev_priv(dev);
1793 	struct team_port *port;
1794 
1795 	rcu_read_lock();
1796 	list_for_each_entry_rcu(port, &team->port_list, list) {
1797 		dev_uc_sync_multiple(port->dev, dev);
1798 		dev_mc_sync_multiple(port->dev, dev);
1799 	}
1800 	rcu_read_unlock();
1801 }
1802 
1803 static int team_set_mac_address(struct net_device *dev, void *p)
1804 {
1805 	struct sockaddr *addr = p;
1806 	struct team *team = netdev_priv(dev);
1807 	struct team_port *port;
1808 
1809 	if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1810 		return -EADDRNOTAVAIL;
1811 	dev_addr_set(dev, addr->sa_data);
1812 	mutex_lock(&team->lock);
1813 	list_for_each_entry(port, &team->port_list, list)
1814 		if (team->ops.port_change_dev_addr)
1815 			team->ops.port_change_dev_addr(team, port);
1816 	mutex_unlock(&team->lock);
1817 	return 0;
1818 }
1819 
1820 static int team_change_mtu(struct net_device *dev, int new_mtu)
1821 {
1822 	struct team *team = netdev_priv(dev);
1823 	struct team_port *port;
1824 	int err;
1825 
1826 	/*
1827 	 * Alhough this is reader, it's guarded by team lock. It's not possible
1828 	 * to traverse list in reverse under rcu_read_lock
1829 	 */
1830 	mutex_lock(&team->lock);
1831 	team->port_mtu_change_allowed = true;
1832 	list_for_each_entry(port, &team->port_list, list) {
1833 		err = dev_set_mtu(port->dev, new_mtu);
1834 		if (err) {
1835 			netdev_err(dev, "Device %s failed to change mtu",
1836 				   port->dev->name);
1837 			goto unwind;
1838 		}
1839 	}
1840 	team->port_mtu_change_allowed = false;
1841 	mutex_unlock(&team->lock);
1842 
1843 	dev->mtu = new_mtu;
1844 
1845 	return 0;
1846 
1847 unwind:
1848 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
1849 		dev_set_mtu(port->dev, dev->mtu);
1850 	team->port_mtu_change_allowed = false;
1851 	mutex_unlock(&team->lock);
1852 
1853 	return err;
1854 }
1855 
1856 static void
1857 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1858 {
1859 	struct team *team = netdev_priv(dev);
1860 	struct team_pcpu_stats *p;
1861 	u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1862 	u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1863 	unsigned int start;
1864 	int i;
1865 
1866 	for_each_possible_cpu(i) {
1867 		p = per_cpu_ptr(team->pcpu_stats, i);
1868 		do {
1869 			start = u64_stats_fetch_begin(&p->syncp);
1870 			rx_packets	= u64_stats_read(&p->rx_packets);
1871 			rx_bytes	= u64_stats_read(&p->rx_bytes);
1872 			rx_multicast	= u64_stats_read(&p->rx_multicast);
1873 			tx_packets	= u64_stats_read(&p->tx_packets);
1874 			tx_bytes	= u64_stats_read(&p->tx_bytes);
1875 		} while (u64_stats_fetch_retry(&p->syncp, start));
1876 
1877 		stats->rx_packets	+= rx_packets;
1878 		stats->rx_bytes		+= rx_bytes;
1879 		stats->multicast	+= rx_multicast;
1880 		stats->tx_packets	+= tx_packets;
1881 		stats->tx_bytes		+= tx_bytes;
1882 		/*
1883 		 * rx_dropped, tx_dropped & rx_nohandler are u32,
1884 		 * updated without syncp protection.
1885 		 */
1886 		rx_dropped	+= READ_ONCE(p->rx_dropped);
1887 		tx_dropped	+= READ_ONCE(p->tx_dropped);
1888 		rx_nohandler	+= READ_ONCE(p->rx_nohandler);
1889 	}
1890 	stats->rx_dropped	= rx_dropped;
1891 	stats->tx_dropped	= tx_dropped;
1892 	stats->rx_nohandler	= rx_nohandler;
1893 }
1894 
1895 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1896 {
1897 	struct team *team = netdev_priv(dev);
1898 	struct team_port *port;
1899 	int err;
1900 
1901 	/*
1902 	 * Alhough this is reader, it's guarded by team lock. It's not possible
1903 	 * to traverse list in reverse under rcu_read_lock
1904 	 */
1905 	mutex_lock(&team->lock);
1906 	list_for_each_entry(port, &team->port_list, list) {
1907 		err = vlan_vid_add(port->dev, proto, vid);
1908 		if (err)
1909 			goto unwind;
1910 	}
1911 	mutex_unlock(&team->lock);
1912 
1913 	return 0;
1914 
1915 unwind:
1916 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
1917 		vlan_vid_del(port->dev, proto, vid);
1918 	mutex_unlock(&team->lock);
1919 
1920 	return err;
1921 }
1922 
1923 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1924 {
1925 	struct team *team = netdev_priv(dev);
1926 	struct team_port *port;
1927 
1928 	mutex_lock(&team->lock);
1929 	list_for_each_entry(port, &team->port_list, list)
1930 		vlan_vid_del(port->dev, proto, vid);
1931 	mutex_unlock(&team->lock);
1932 
1933 	return 0;
1934 }
1935 
1936 #ifdef CONFIG_NET_POLL_CONTROLLER
1937 static void team_poll_controller(struct net_device *dev)
1938 {
1939 }
1940 
1941 static void __team_netpoll_cleanup(struct team *team)
1942 {
1943 	struct team_port *port;
1944 
1945 	list_for_each_entry(port, &team->port_list, list)
1946 		team_port_disable_netpoll(port);
1947 }
1948 
1949 static void team_netpoll_cleanup(struct net_device *dev)
1950 {
1951 	struct team *team = netdev_priv(dev);
1952 
1953 	mutex_lock(&team->lock);
1954 	__team_netpoll_cleanup(team);
1955 	mutex_unlock(&team->lock);
1956 }
1957 
1958 static int team_netpoll_setup(struct net_device *dev,
1959 			      struct netpoll_info *npifo)
1960 {
1961 	struct team *team = netdev_priv(dev);
1962 	struct team_port *port;
1963 	int err = 0;
1964 
1965 	mutex_lock(&team->lock);
1966 	list_for_each_entry(port, &team->port_list, list) {
1967 		err = __team_port_enable_netpoll(port);
1968 		if (err) {
1969 			__team_netpoll_cleanup(team);
1970 			break;
1971 		}
1972 	}
1973 	mutex_unlock(&team->lock);
1974 	return err;
1975 }
1976 #endif
1977 
1978 static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1979 			  struct netlink_ext_ack *extack)
1980 {
1981 	struct team *team = netdev_priv(dev);
1982 	int err;
1983 
1984 	mutex_lock(&team->lock);
1985 	err = team_port_add(team, port_dev, extack);
1986 	mutex_unlock(&team->lock);
1987 
1988 	if (!err)
1989 		netdev_change_features(dev);
1990 
1991 	return err;
1992 }
1993 
1994 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1995 {
1996 	struct team *team = netdev_priv(dev);
1997 	int err;
1998 
1999 	mutex_lock(&team->lock);
2000 	err = team_port_del(team, port_dev);
2001 	mutex_unlock(&team->lock);
2002 
2003 	if (err)
2004 		return err;
2005 
2006 	if (netif_is_team_master(port_dev)) {
2007 		lockdep_unregister_key(&team->team_lock_key);
2008 		lockdep_register_key(&team->team_lock_key);
2009 		lockdep_set_class(&team->lock, &team->team_lock_key);
2010 	}
2011 	netdev_change_features(dev);
2012 
2013 	return err;
2014 }
2015 
2016 static netdev_features_t team_fix_features(struct net_device *dev,
2017 					   netdev_features_t features)
2018 {
2019 	struct team_port *port;
2020 	struct team *team = netdev_priv(dev);
2021 	netdev_features_t mask;
2022 
2023 	mask = features;
2024 	features &= ~NETIF_F_ONE_FOR_ALL;
2025 	features |= NETIF_F_ALL_FOR_ALL;
2026 
2027 	rcu_read_lock();
2028 	list_for_each_entry_rcu(port, &team->port_list, list) {
2029 		features = netdev_increment_features(features,
2030 						     port->dev->features,
2031 						     mask);
2032 	}
2033 	rcu_read_unlock();
2034 
2035 	features = netdev_add_tso_features(features, mask);
2036 
2037 	return features;
2038 }
2039 
2040 static int team_change_carrier(struct net_device *dev, bool new_carrier)
2041 {
2042 	struct team *team = netdev_priv(dev);
2043 
2044 	team->user_carrier_enabled = true;
2045 
2046 	if (new_carrier)
2047 		netif_carrier_on(dev);
2048 	else
2049 		netif_carrier_off(dev);
2050 	return 0;
2051 }
2052 
2053 static const struct net_device_ops team_netdev_ops = {
2054 	.ndo_init		= team_init,
2055 	.ndo_uninit		= team_uninit,
2056 	.ndo_open		= team_open,
2057 	.ndo_stop		= team_close,
2058 	.ndo_start_xmit		= team_xmit,
2059 	.ndo_select_queue	= team_select_queue,
2060 	.ndo_change_rx_flags	= team_change_rx_flags,
2061 	.ndo_set_rx_mode	= team_set_rx_mode,
2062 	.ndo_set_mac_address	= team_set_mac_address,
2063 	.ndo_change_mtu		= team_change_mtu,
2064 	.ndo_get_stats64	= team_get_stats64,
2065 	.ndo_vlan_rx_add_vid	= team_vlan_rx_add_vid,
2066 	.ndo_vlan_rx_kill_vid	= team_vlan_rx_kill_vid,
2067 #ifdef CONFIG_NET_POLL_CONTROLLER
2068 	.ndo_poll_controller	= team_poll_controller,
2069 	.ndo_netpoll_setup	= team_netpoll_setup,
2070 	.ndo_netpoll_cleanup	= team_netpoll_cleanup,
2071 #endif
2072 	.ndo_add_slave		= team_add_slave,
2073 	.ndo_del_slave		= team_del_slave,
2074 	.ndo_fix_features	= team_fix_features,
2075 	.ndo_change_carrier     = team_change_carrier,
2076 	.ndo_features_check	= passthru_features_check,
2077 };
2078 
2079 /***********************
2080  * ethtool interface
2081  ***********************/
2082 
2083 static void team_ethtool_get_drvinfo(struct net_device *dev,
2084 				     struct ethtool_drvinfo *drvinfo)
2085 {
2086 	strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2087 	strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2088 }
2089 
2090 static int team_ethtool_get_link_ksettings(struct net_device *dev,
2091 					   struct ethtool_link_ksettings *cmd)
2092 {
2093 	struct team *team= netdev_priv(dev);
2094 	unsigned long speed = 0;
2095 	struct team_port *port;
2096 
2097 	cmd->base.duplex = DUPLEX_UNKNOWN;
2098 	cmd->base.port = PORT_OTHER;
2099 
2100 	rcu_read_lock();
2101 	list_for_each_entry_rcu(port, &team->port_list, list) {
2102 		if (team_port_txable(port)) {
2103 			if (port->state.speed != SPEED_UNKNOWN)
2104 				speed += port->state.speed;
2105 			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
2106 			    port->state.duplex != DUPLEX_UNKNOWN)
2107 				cmd->base.duplex = port->state.duplex;
2108 		}
2109 	}
2110 	rcu_read_unlock();
2111 
2112 	cmd->base.speed = speed ? : SPEED_UNKNOWN;
2113 
2114 	return 0;
2115 }
2116 
2117 static const struct ethtool_ops team_ethtool_ops = {
2118 	.get_drvinfo		= team_ethtool_get_drvinfo,
2119 	.get_link		= ethtool_op_get_link,
2120 	.get_link_ksettings	= team_ethtool_get_link_ksettings,
2121 };
2122 
2123 /***********************
2124  * rt netlink interface
2125  ***********************/
2126 
2127 static void team_setup_by_port(struct net_device *dev,
2128 			       struct net_device *port_dev)
2129 {
2130 	dev->header_ops	= port_dev->header_ops;
2131 	dev->type = port_dev->type;
2132 	dev->hard_header_len = port_dev->hard_header_len;
2133 	dev->needed_headroom = port_dev->needed_headroom;
2134 	dev->addr_len = port_dev->addr_len;
2135 	dev->mtu = port_dev->mtu;
2136 	memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2137 	eth_hw_addr_inherit(dev, port_dev);
2138 }
2139 
2140 static int team_dev_type_check_change(struct net_device *dev,
2141 				      struct net_device *port_dev)
2142 {
2143 	struct team *team = netdev_priv(dev);
2144 	char *portname = port_dev->name;
2145 	int err;
2146 
2147 	if (dev->type == port_dev->type)
2148 		return 0;
2149 	if (!list_empty(&team->port_list)) {
2150 		netdev_err(dev, "Device %s is of different type\n", portname);
2151 		return -EBUSY;
2152 	}
2153 	err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2154 	err = notifier_to_errno(err);
2155 	if (err) {
2156 		netdev_err(dev, "Refused to change device type\n");
2157 		return err;
2158 	}
2159 	dev_uc_flush(dev);
2160 	dev_mc_flush(dev);
2161 	team_setup_by_port(dev, port_dev);
2162 	call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2163 	return 0;
2164 }
2165 
2166 static void team_setup(struct net_device *dev)
2167 {
2168 	ether_setup(dev);
2169 	dev->max_mtu = ETH_MAX_MTU;
2170 
2171 	dev->netdev_ops = &team_netdev_ops;
2172 	dev->ethtool_ops = &team_ethtool_ops;
2173 	dev->needs_free_netdev = true;
2174 	dev->priv_destructor = team_destructor;
2175 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2176 	dev->priv_flags |= IFF_NO_QUEUE;
2177 	dev->priv_flags |= IFF_TEAM;
2178 
2179 	/*
2180 	 * Indicate we support unicast address filtering. That way core won't
2181 	 * bring us to promisc mode in case a unicast addr is added.
2182 	 * Let this up to underlay drivers.
2183 	 */
2184 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2185 
2186 	dev->features |= NETIF_F_LLTX;
2187 	dev->features |= NETIF_F_GRO;
2188 
2189 	/* Don't allow team devices to change network namespaces. */
2190 	dev->features |= NETIF_F_NETNS_LOCAL;
2191 
2192 	dev->hw_features = TEAM_VLAN_FEATURES |
2193 			   NETIF_F_HW_VLAN_CTAG_RX |
2194 			   NETIF_F_HW_VLAN_CTAG_FILTER;
2195 
2196 	dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
2197 	dev->features |= dev->hw_features;
2198 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2199 }
2200 
2201 static int team_newlink(struct net *src_net, struct net_device *dev,
2202 			struct nlattr *tb[], struct nlattr *data[],
2203 			struct netlink_ext_ack *extack)
2204 {
2205 	if (tb[IFLA_ADDRESS] == NULL)
2206 		eth_hw_addr_random(dev);
2207 
2208 	return register_netdevice(dev);
2209 }
2210 
2211 static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2212 			 struct netlink_ext_ack *extack)
2213 {
2214 	if (tb[IFLA_ADDRESS]) {
2215 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2216 			return -EINVAL;
2217 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2218 			return -EADDRNOTAVAIL;
2219 	}
2220 	return 0;
2221 }
2222 
2223 static unsigned int team_get_num_tx_queues(void)
2224 {
2225 	return TEAM_DEFAULT_NUM_TX_QUEUES;
2226 }
2227 
2228 static unsigned int team_get_num_rx_queues(void)
2229 {
2230 	return TEAM_DEFAULT_NUM_RX_QUEUES;
2231 }
2232 
2233 static struct rtnl_link_ops team_link_ops __read_mostly = {
2234 	.kind			= DRV_NAME,
2235 	.priv_size		= sizeof(struct team),
2236 	.setup			= team_setup,
2237 	.newlink		= team_newlink,
2238 	.validate		= team_validate,
2239 	.get_num_tx_queues	= team_get_num_tx_queues,
2240 	.get_num_rx_queues	= team_get_num_rx_queues,
2241 };
2242 
2243 
2244 /***********************************
2245  * Generic netlink custom interface
2246  ***********************************/
2247 
2248 static struct genl_family team_nl_family;
2249 
2250 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2251 	[TEAM_ATTR_UNSPEC]			= { .type = NLA_UNSPEC, },
2252 	[TEAM_ATTR_TEAM_IFINDEX]		= { .type = NLA_U32 },
2253 	[TEAM_ATTR_LIST_OPTION]			= { .type = NLA_NESTED },
2254 	[TEAM_ATTR_LIST_PORT]			= { .type = NLA_NESTED },
2255 };
2256 
2257 static const struct nla_policy
2258 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2259 	[TEAM_ATTR_OPTION_UNSPEC]		= { .type = NLA_UNSPEC, },
2260 	[TEAM_ATTR_OPTION_NAME] = {
2261 		.type = NLA_STRING,
2262 		.len = TEAM_STRING_MAX_LEN,
2263 	},
2264 	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
2265 	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
2266 	[TEAM_ATTR_OPTION_DATA]			= { .type = NLA_BINARY },
2267 	[TEAM_ATTR_OPTION_PORT_IFINDEX]		= { .type = NLA_U32 },
2268 	[TEAM_ATTR_OPTION_ARRAY_INDEX]		= { .type = NLA_U32 },
2269 };
2270 
2271 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2272 {
2273 	struct sk_buff *msg;
2274 	void *hdr;
2275 	int err;
2276 
2277 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2278 	if (!msg)
2279 		return -ENOMEM;
2280 
2281 	hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2282 			  &team_nl_family, 0, TEAM_CMD_NOOP);
2283 	if (!hdr) {
2284 		err = -EMSGSIZE;
2285 		goto err_msg_put;
2286 	}
2287 
2288 	genlmsg_end(msg, hdr);
2289 
2290 	return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2291 
2292 err_msg_put:
2293 	nlmsg_free(msg);
2294 
2295 	return err;
2296 }
2297 
2298 /*
2299  * Netlink cmd functions should be locked by following two functions.
2300  * Since dev gets held here, that ensures dev won't disappear in between.
2301  */
2302 static struct team *team_nl_team_get(struct genl_info *info)
2303 {
2304 	struct net *net = genl_info_net(info);
2305 	int ifindex;
2306 	struct net_device *dev;
2307 	struct team *team;
2308 
2309 	if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2310 		return NULL;
2311 
2312 	ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2313 	dev = dev_get_by_index(net, ifindex);
2314 	if (!dev || dev->netdev_ops != &team_netdev_ops) {
2315 		if (dev)
2316 			dev_put(dev);
2317 		return NULL;
2318 	}
2319 
2320 	team = netdev_priv(dev);
2321 	mutex_lock(&team->lock);
2322 	return team;
2323 }
2324 
2325 static void team_nl_team_put(struct team *team)
2326 {
2327 	mutex_unlock(&team->lock);
2328 	dev_put(team->dev);
2329 }
2330 
2331 typedef int team_nl_send_func_t(struct sk_buff *skb,
2332 				struct team *team, u32 portid);
2333 
2334 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2335 {
2336 	return genlmsg_unicast(dev_net(team->dev), skb, portid);
2337 }
2338 
2339 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2340 				       struct team_option_inst *opt_inst)
2341 {
2342 	struct nlattr *option_item;
2343 	struct team_option *option = opt_inst->option;
2344 	struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2345 	struct team_gsetter_ctx ctx;
2346 	int err;
2347 
2348 	ctx.info = opt_inst_info;
2349 	err = team_option_get(team, opt_inst, &ctx);
2350 	if (err)
2351 		return err;
2352 
2353 	option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
2354 	if (!option_item)
2355 		return -EMSGSIZE;
2356 
2357 	if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2358 		goto nest_cancel;
2359 	if (opt_inst_info->port &&
2360 	    nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2361 			opt_inst_info->port->dev->ifindex))
2362 		goto nest_cancel;
2363 	if (opt_inst->option->array_size &&
2364 	    nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2365 			opt_inst_info->array_index))
2366 		goto nest_cancel;
2367 
2368 	switch (option->type) {
2369 	case TEAM_OPTION_TYPE_U32:
2370 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2371 			goto nest_cancel;
2372 		if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2373 			goto nest_cancel;
2374 		break;
2375 	case TEAM_OPTION_TYPE_STRING:
2376 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2377 			goto nest_cancel;
2378 		if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2379 				   ctx.data.str_val))
2380 			goto nest_cancel;
2381 		break;
2382 	case TEAM_OPTION_TYPE_BINARY:
2383 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2384 			goto nest_cancel;
2385 		if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2386 			    ctx.data.bin_val.ptr))
2387 			goto nest_cancel;
2388 		break;
2389 	case TEAM_OPTION_TYPE_BOOL:
2390 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2391 			goto nest_cancel;
2392 		if (ctx.data.bool_val &&
2393 		    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2394 			goto nest_cancel;
2395 		break;
2396 	case TEAM_OPTION_TYPE_S32:
2397 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2398 			goto nest_cancel;
2399 		if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2400 			goto nest_cancel;
2401 		break;
2402 	default:
2403 		BUG();
2404 	}
2405 	if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2406 		goto nest_cancel;
2407 	if (opt_inst->changed) {
2408 		if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2409 			goto nest_cancel;
2410 		opt_inst->changed = false;
2411 	}
2412 	nla_nest_end(skb, option_item);
2413 	return 0;
2414 
2415 nest_cancel:
2416 	nla_nest_cancel(skb, option_item);
2417 	return -EMSGSIZE;
2418 }
2419 
2420 static int __send_and_alloc_skb(struct sk_buff **pskb,
2421 				struct team *team, u32 portid,
2422 				team_nl_send_func_t *send_func)
2423 {
2424 	int err;
2425 
2426 	if (*pskb) {
2427 		err = send_func(*pskb, team, portid);
2428 		if (err)
2429 			return err;
2430 	}
2431 	*pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2432 	if (!*pskb)
2433 		return -ENOMEM;
2434 	return 0;
2435 }
2436 
2437 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2438 				    int flags, team_nl_send_func_t *send_func,
2439 				    struct list_head *sel_opt_inst_list)
2440 {
2441 	struct nlattr *option_list;
2442 	struct nlmsghdr *nlh;
2443 	void *hdr;
2444 	struct team_option_inst *opt_inst;
2445 	int err;
2446 	struct sk_buff *skb = NULL;
2447 	bool incomplete;
2448 	int i;
2449 
2450 	opt_inst = list_first_entry(sel_opt_inst_list,
2451 				    struct team_option_inst, tmp_list);
2452 
2453 start_again:
2454 	err = __send_and_alloc_skb(&skb, team, portid, send_func);
2455 	if (err)
2456 		return err;
2457 
2458 	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2459 			  TEAM_CMD_OPTIONS_GET);
2460 	if (!hdr) {
2461 		nlmsg_free(skb);
2462 		return -EMSGSIZE;
2463 	}
2464 
2465 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2466 		goto nla_put_failure;
2467 	option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
2468 	if (!option_list)
2469 		goto nla_put_failure;
2470 
2471 	i = 0;
2472 	incomplete = false;
2473 	list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2474 		err = team_nl_fill_one_option_get(skb, team, opt_inst);
2475 		if (err) {
2476 			if (err == -EMSGSIZE) {
2477 				if (!i)
2478 					goto errout;
2479 				incomplete = true;
2480 				break;
2481 			}
2482 			goto errout;
2483 		}
2484 		i++;
2485 	}
2486 
2487 	nla_nest_end(skb, option_list);
2488 	genlmsg_end(skb, hdr);
2489 	if (incomplete)
2490 		goto start_again;
2491 
2492 send_done:
2493 	nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2494 	if (!nlh) {
2495 		err = __send_and_alloc_skb(&skb, team, portid, send_func);
2496 		if (err)
2497 			return err;
2498 		goto send_done;
2499 	}
2500 
2501 	return send_func(skb, team, portid);
2502 
2503 nla_put_failure:
2504 	err = -EMSGSIZE;
2505 errout:
2506 	nlmsg_free(skb);
2507 	return err;
2508 }
2509 
2510 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2511 {
2512 	struct team *team;
2513 	struct team_option_inst *opt_inst;
2514 	int err;
2515 	LIST_HEAD(sel_opt_inst_list);
2516 
2517 	team = team_nl_team_get(info);
2518 	if (!team)
2519 		return -EINVAL;
2520 
2521 	list_for_each_entry(opt_inst, &team->option_inst_list, list)
2522 		list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2523 	err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2524 				       NLM_F_ACK, team_nl_send_unicast,
2525 				       &sel_opt_inst_list);
2526 
2527 	team_nl_team_put(team);
2528 
2529 	return err;
2530 }
2531 
2532 static int team_nl_send_event_options_get(struct team *team,
2533 					  struct list_head *sel_opt_inst_list);
2534 
2535 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2536 {
2537 	struct team *team;
2538 	int err = 0;
2539 	int i;
2540 	struct nlattr *nl_option;
2541 
2542 	rtnl_lock();
2543 
2544 	team = team_nl_team_get(info);
2545 	if (!team) {
2546 		err = -EINVAL;
2547 		goto rtnl_unlock;
2548 	}
2549 
2550 	err = -EINVAL;
2551 	if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2552 		err = -EINVAL;
2553 		goto team_put;
2554 	}
2555 
2556 	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2557 		struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2558 		struct nlattr *attr;
2559 		struct nlattr *attr_data;
2560 		LIST_HEAD(opt_inst_list);
2561 		enum team_option_type opt_type;
2562 		int opt_port_ifindex = 0; /* != 0 for per-port options */
2563 		u32 opt_array_index = 0;
2564 		bool opt_is_array = false;
2565 		struct team_option_inst *opt_inst;
2566 		char *opt_name;
2567 		bool opt_found = false;
2568 
2569 		if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2570 			err = -EINVAL;
2571 			goto team_put;
2572 		}
2573 		err = nla_parse_nested_deprecated(opt_attrs,
2574 						  TEAM_ATTR_OPTION_MAX,
2575 						  nl_option,
2576 						  team_nl_option_policy,
2577 						  info->extack);
2578 		if (err)
2579 			goto team_put;
2580 		if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2581 		    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2582 			err = -EINVAL;
2583 			goto team_put;
2584 		}
2585 		switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2586 		case NLA_U32:
2587 			opt_type = TEAM_OPTION_TYPE_U32;
2588 			break;
2589 		case NLA_STRING:
2590 			opt_type = TEAM_OPTION_TYPE_STRING;
2591 			break;
2592 		case NLA_BINARY:
2593 			opt_type = TEAM_OPTION_TYPE_BINARY;
2594 			break;
2595 		case NLA_FLAG:
2596 			opt_type = TEAM_OPTION_TYPE_BOOL;
2597 			break;
2598 		case NLA_S32:
2599 			opt_type = TEAM_OPTION_TYPE_S32;
2600 			break;
2601 		default:
2602 			goto team_put;
2603 		}
2604 
2605 		attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2606 		if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2607 			err = -EINVAL;
2608 			goto team_put;
2609 		}
2610 
2611 		opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2612 		attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2613 		if (attr)
2614 			opt_port_ifindex = nla_get_u32(attr);
2615 
2616 		attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2617 		if (attr) {
2618 			opt_is_array = true;
2619 			opt_array_index = nla_get_u32(attr);
2620 		}
2621 
2622 		list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2623 			struct team_option *option = opt_inst->option;
2624 			struct team_gsetter_ctx ctx;
2625 			struct team_option_inst_info *opt_inst_info;
2626 			int tmp_ifindex;
2627 
2628 			opt_inst_info = &opt_inst->info;
2629 			tmp_ifindex = opt_inst_info->port ?
2630 				      opt_inst_info->port->dev->ifindex : 0;
2631 			if (option->type != opt_type ||
2632 			    strcmp(option->name, opt_name) ||
2633 			    tmp_ifindex != opt_port_ifindex ||
2634 			    (option->array_size && !opt_is_array) ||
2635 			    opt_inst_info->array_index != opt_array_index)
2636 				continue;
2637 			opt_found = true;
2638 			ctx.info = opt_inst_info;
2639 			switch (opt_type) {
2640 			case TEAM_OPTION_TYPE_U32:
2641 				ctx.data.u32_val = nla_get_u32(attr_data);
2642 				break;
2643 			case TEAM_OPTION_TYPE_STRING:
2644 				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2645 					err = -EINVAL;
2646 					goto team_put;
2647 				}
2648 				ctx.data.str_val = nla_data(attr_data);
2649 				break;
2650 			case TEAM_OPTION_TYPE_BINARY:
2651 				ctx.data.bin_val.len = nla_len(attr_data);
2652 				ctx.data.bin_val.ptr = nla_data(attr_data);
2653 				break;
2654 			case TEAM_OPTION_TYPE_BOOL:
2655 				ctx.data.bool_val = attr_data ? true : false;
2656 				break;
2657 			case TEAM_OPTION_TYPE_S32:
2658 				ctx.data.s32_val = nla_get_s32(attr_data);
2659 				break;
2660 			default:
2661 				BUG();
2662 			}
2663 			err = team_option_set(team, opt_inst, &ctx);
2664 			if (err)
2665 				goto team_put;
2666 			opt_inst->changed = true;
2667 			list_add(&opt_inst->tmp_list, &opt_inst_list);
2668 		}
2669 		if (!opt_found) {
2670 			err = -ENOENT;
2671 			goto team_put;
2672 		}
2673 
2674 		err = team_nl_send_event_options_get(team, &opt_inst_list);
2675 		if (err)
2676 			break;
2677 	}
2678 
2679 team_put:
2680 	team_nl_team_put(team);
2681 rtnl_unlock:
2682 	rtnl_unlock();
2683 	return err;
2684 }
2685 
2686 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2687 				     struct team_port *port)
2688 {
2689 	struct nlattr *port_item;
2690 
2691 	port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
2692 	if (!port_item)
2693 		goto nest_cancel;
2694 	if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2695 		goto nest_cancel;
2696 	if (port->changed) {
2697 		if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2698 			goto nest_cancel;
2699 		port->changed = false;
2700 	}
2701 	if ((port->removed &&
2702 	     nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2703 	    (port->state.linkup &&
2704 	     nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2705 	    nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2706 	    nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2707 		goto nest_cancel;
2708 	nla_nest_end(skb, port_item);
2709 	return 0;
2710 
2711 nest_cancel:
2712 	nla_nest_cancel(skb, port_item);
2713 	return -EMSGSIZE;
2714 }
2715 
2716 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2717 				      int flags, team_nl_send_func_t *send_func,
2718 				      struct team_port *one_port)
2719 {
2720 	struct nlattr *port_list;
2721 	struct nlmsghdr *nlh;
2722 	void *hdr;
2723 	struct team_port *port;
2724 	int err;
2725 	struct sk_buff *skb = NULL;
2726 	bool incomplete;
2727 	int i;
2728 
2729 	port = list_first_entry_or_null(&team->port_list,
2730 					struct team_port, list);
2731 
2732 start_again:
2733 	err = __send_and_alloc_skb(&skb, team, portid, send_func);
2734 	if (err)
2735 		return err;
2736 
2737 	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2738 			  TEAM_CMD_PORT_LIST_GET);
2739 	if (!hdr) {
2740 		nlmsg_free(skb);
2741 		return -EMSGSIZE;
2742 	}
2743 
2744 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2745 		goto nla_put_failure;
2746 	port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
2747 	if (!port_list)
2748 		goto nla_put_failure;
2749 
2750 	i = 0;
2751 	incomplete = false;
2752 
2753 	/* If one port is selected, called wants to send port list containing
2754 	 * only this port. Otherwise go through all listed ports and send all
2755 	 */
2756 	if (one_port) {
2757 		err = team_nl_fill_one_port_get(skb, one_port);
2758 		if (err)
2759 			goto errout;
2760 	} else if (port) {
2761 		list_for_each_entry_from(port, &team->port_list, list) {
2762 			err = team_nl_fill_one_port_get(skb, port);
2763 			if (err) {
2764 				if (err == -EMSGSIZE) {
2765 					if (!i)
2766 						goto errout;
2767 					incomplete = true;
2768 					break;
2769 				}
2770 				goto errout;
2771 			}
2772 			i++;
2773 		}
2774 	}
2775 
2776 	nla_nest_end(skb, port_list);
2777 	genlmsg_end(skb, hdr);
2778 	if (incomplete)
2779 		goto start_again;
2780 
2781 send_done:
2782 	nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2783 	if (!nlh) {
2784 		err = __send_and_alloc_skb(&skb, team, portid, send_func);
2785 		if (err)
2786 			return err;
2787 		goto send_done;
2788 	}
2789 
2790 	return send_func(skb, team, portid);
2791 
2792 nla_put_failure:
2793 	err = -EMSGSIZE;
2794 errout:
2795 	nlmsg_free(skb);
2796 	return err;
2797 }
2798 
2799 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2800 				     struct genl_info *info)
2801 {
2802 	struct team *team;
2803 	int err;
2804 
2805 	team = team_nl_team_get(info);
2806 	if (!team)
2807 		return -EINVAL;
2808 
2809 	err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2810 					 NLM_F_ACK, team_nl_send_unicast, NULL);
2811 
2812 	team_nl_team_put(team);
2813 
2814 	return err;
2815 }
2816 
2817 static const struct genl_small_ops team_nl_ops[] = {
2818 	{
2819 		.cmd = TEAM_CMD_NOOP,
2820 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2821 		.doit = team_nl_cmd_noop,
2822 	},
2823 	{
2824 		.cmd = TEAM_CMD_OPTIONS_SET,
2825 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2826 		.doit = team_nl_cmd_options_set,
2827 		.flags = GENL_ADMIN_PERM,
2828 	},
2829 	{
2830 		.cmd = TEAM_CMD_OPTIONS_GET,
2831 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2832 		.doit = team_nl_cmd_options_get,
2833 		.flags = GENL_ADMIN_PERM,
2834 	},
2835 	{
2836 		.cmd = TEAM_CMD_PORT_LIST_GET,
2837 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2838 		.doit = team_nl_cmd_port_list_get,
2839 		.flags = GENL_ADMIN_PERM,
2840 	},
2841 };
2842 
2843 static const struct genl_multicast_group team_nl_mcgrps[] = {
2844 	{ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2845 };
2846 
2847 static struct genl_family team_nl_family __ro_after_init = {
2848 	.name		= TEAM_GENL_NAME,
2849 	.version	= TEAM_GENL_VERSION,
2850 	.maxattr	= TEAM_ATTR_MAX,
2851 	.policy = team_nl_policy,
2852 	.netnsok	= true,
2853 	.module		= THIS_MODULE,
2854 	.small_ops	= team_nl_ops,
2855 	.n_small_ops	= ARRAY_SIZE(team_nl_ops),
2856 	.resv_start_op	= TEAM_CMD_PORT_LIST_GET + 1,
2857 	.mcgrps		= team_nl_mcgrps,
2858 	.n_mcgrps	= ARRAY_SIZE(team_nl_mcgrps),
2859 };
2860 
2861 static int team_nl_send_multicast(struct sk_buff *skb,
2862 				  struct team *team, u32 portid)
2863 {
2864 	return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2865 				       skb, 0, 0, GFP_KERNEL);
2866 }
2867 
2868 static int team_nl_send_event_options_get(struct team *team,
2869 					  struct list_head *sel_opt_inst_list)
2870 {
2871 	return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2872 					sel_opt_inst_list);
2873 }
2874 
2875 static int team_nl_send_event_port_get(struct team *team,
2876 				       struct team_port *port)
2877 {
2878 	return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2879 					  port);
2880 }
2881 
2882 static int __init team_nl_init(void)
2883 {
2884 	return genl_register_family(&team_nl_family);
2885 }
2886 
2887 static void team_nl_fini(void)
2888 {
2889 	genl_unregister_family(&team_nl_family);
2890 }
2891 
2892 
2893 /******************
2894  * Change checkers
2895  ******************/
2896 
2897 static void __team_options_change_check(struct team *team)
2898 {
2899 	int err;
2900 	struct team_option_inst *opt_inst;
2901 	LIST_HEAD(sel_opt_inst_list);
2902 
2903 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2904 		if (opt_inst->changed)
2905 			list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2906 	}
2907 	err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2908 	if (err && err != -ESRCH)
2909 		netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2910 			    err);
2911 }
2912 
2913 /* rtnl lock is held */
2914 
2915 static void __team_port_change_send(struct team_port *port, bool linkup)
2916 {
2917 	int err;
2918 
2919 	port->changed = true;
2920 	port->state.linkup = linkup;
2921 	team_refresh_port_linkup(port);
2922 	if (linkup) {
2923 		struct ethtool_link_ksettings ecmd;
2924 
2925 		err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2926 		if (!err) {
2927 			port->state.speed = ecmd.base.speed;
2928 			port->state.duplex = ecmd.base.duplex;
2929 			goto send_event;
2930 		}
2931 	}
2932 	port->state.speed = 0;
2933 	port->state.duplex = 0;
2934 
2935 send_event:
2936 	err = team_nl_send_event_port_get(port->team, port);
2937 	if (err && err != -ESRCH)
2938 		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2939 			    port->dev->name, err);
2940 
2941 }
2942 
2943 static void __team_carrier_check(struct team *team)
2944 {
2945 	struct team_port *port;
2946 	bool team_linkup;
2947 
2948 	if (team->user_carrier_enabled)
2949 		return;
2950 
2951 	team_linkup = false;
2952 	list_for_each_entry(port, &team->port_list, list) {
2953 		if (port->linkup) {
2954 			team_linkup = true;
2955 			break;
2956 		}
2957 	}
2958 
2959 	if (team_linkup)
2960 		netif_carrier_on(team->dev);
2961 	else
2962 		netif_carrier_off(team->dev);
2963 }
2964 
2965 static void __team_port_change_check(struct team_port *port, bool linkup)
2966 {
2967 	if (port->state.linkup != linkup)
2968 		__team_port_change_send(port, linkup);
2969 	__team_carrier_check(port->team);
2970 }
2971 
2972 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2973 {
2974 	__team_port_change_send(port, linkup);
2975 	__team_carrier_check(port->team);
2976 }
2977 
2978 static void __team_port_change_port_removed(struct team_port *port)
2979 {
2980 	port->removed = true;
2981 	__team_port_change_send(port, false);
2982 	__team_carrier_check(port->team);
2983 }
2984 
2985 static void team_port_change_check(struct team_port *port, bool linkup)
2986 {
2987 	struct team *team = port->team;
2988 
2989 	mutex_lock(&team->lock);
2990 	__team_port_change_check(port, linkup);
2991 	mutex_unlock(&team->lock);
2992 }
2993 
2994 
2995 /************************************
2996  * Net device notifier event handler
2997  ************************************/
2998 
2999 static int team_device_event(struct notifier_block *unused,
3000 			     unsigned long event, void *ptr)
3001 {
3002 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3003 	struct team_port *port;
3004 
3005 	port = team_port_get_rtnl(dev);
3006 	if (!port)
3007 		return NOTIFY_DONE;
3008 
3009 	switch (event) {
3010 	case NETDEV_UP:
3011 		if (netif_oper_up(dev))
3012 			team_port_change_check(port, true);
3013 		break;
3014 	case NETDEV_DOWN:
3015 		team_port_change_check(port, false);
3016 		break;
3017 	case NETDEV_CHANGE:
3018 		if (netif_running(port->dev))
3019 			team_port_change_check(port,
3020 					       !!netif_oper_up(port->dev));
3021 		break;
3022 	case NETDEV_UNREGISTER:
3023 		team_del_slave(port->team->dev, dev);
3024 		break;
3025 	case NETDEV_FEAT_CHANGE:
3026 		if (!port->team->notifier_ctx) {
3027 			port->team->notifier_ctx = true;
3028 			team_compute_features(port->team);
3029 			port->team->notifier_ctx = false;
3030 		}
3031 		break;
3032 	case NETDEV_PRECHANGEMTU:
3033 		/* Forbid to change mtu of underlaying device */
3034 		if (!port->team->port_mtu_change_allowed)
3035 			return NOTIFY_BAD;
3036 		break;
3037 	case NETDEV_PRE_TYPE_CHANGE:
3038 		/* Forbid to change type of underlaying device */
3039 		return NOTIFY_BAD;
3040 	case NETDEV_RESEND_IGMP:
3041 		/* Propagate to master device */
3042 		call_netdevice_notifiers(event, port->team->dev);
3043 		break;
3044 	}
3045 	return NOTIFY_DONE;
3046 }
3047 
3048 static struct notifier_block team_notifier_block __read_mostly = {
3049 	.notifier_call = team_device_event,
3050 };
3051 
3052 
3053 /***********************
3054  * Module init and exit
3055  ***********************/
3056 
3057 static int __init team_module_init(void)
3058 {
3059 	int err;
3060 
3061 	register_netdevice_notifier(&team_notifier_block);
3062 
3063 	err = rtnl_link_register(&team_link_ops);
3064 	if (err)
3065 		goto err_rtnl_reg;
3066 
3067 	err = team_nl_init();
3068 	if (err)
3069 		goto err_nl_init;
3070 
3071 	return 0;
3072 
3073 err_nl_init:
3074 	rtnl_link_unregister(&team_link_ops);
3075 
3076 err_rtnl_reg:
3077 	unregister_netdevice_notifier(&team_notifier_block);
3078 
3079 	return err;
3080 }
3081 
3082 static void __exit team_module_exit(void)
3083 {
3084 	team_nl_fini();
3085 	rtnl_link_unregister(&team_link_ops);
3086 	unregister_netdevice_notifier(&team_notifier_block);
3087 }
3088 
3089 module_init(team_module_init);
3090 module_exit(team_module_exit);
3091 
3092 MODULE_LICENSE("GPL v2");
3093 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3094 MODULE_DESCRIPTION("Ethernet team device driver");
3095 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
3096