xref: /openbmc/linux/drivers/net/team/team.c (revision 52a4fd77)
1 /*
2  * drivers/net/team/team.c - Network team device driver
3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_arp.h>
23 #include <linux/socket.h>
24 #include <linux/etherdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <net/rtnetlink.h>
27 #include <net/genetlink.h>
28 #include <net/netlink.h>
29 #include <linux/if_team.h>
30 
31 #define DRV_NAME "team"
32 
33 
34 /**********
35  * Helpers
36  **********/
37 
38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
39 
40 static struct team_port *team_port_get_rcu(const struct net_device *dev)
41 {
42 	struct team_port *port = rcu_dereference(dev->rx_handler_data);
43 
44 	return team_port_exists(dev) ? port : NULL;
45 }
46 
47 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
48 {
49 	struct team_port *port = rtnl_dereference(dev->rx_handler_data);
50 
51 	return team_port_exists(dev) ? port : NULL;
52 }
53 
54 /*
55  * Since the ability to change mac address for open port device is tested in
56  * team_port_add, this function can be called without control of return value
57  */
58 static int __set_port_mac(struct net_device *port_dev,
59 			  const unsigned char *dev_addr)
60 {
61 	struct sockaddr addr;
62 
63 	memcpy(addr.sa_data, dev_addr, ETH_ALEN);
64 	addr.sa_family = ARPHRD_ETHER;
65 	return dev_set_mac_address(port_dev, &addr);
66 }
67 
68 static int team_port_set_orig_mac(struct team_port *port)
69 {
70 	return __set_port_mac(port->dev, port->orig.dev_addr);
71 }
72 
73 int team_port_set_team_mac(struct team_port *port)
74 {
75 	return __set_port_mac(port->dev, port->team->dev->dev_addr);
76 }
77 EXPORT_SYMBOL(team_port_set_team_mac);
78 
79 static void team_refresh_port_linkup(struct team_port *port)
80 {
81 	port->linkup = port->user.linkup_enabled ? port->user.linkup :
82 						   port->state.linkup;
83 }
84 
85 
86 /*******************
87  * Options handling
88  *******************/
89 
90 struct team_option_inst { /* One for each option instance */
91 	struct list_head list;
92 	struct list_head tmp_list;
93 	struct team_option *option;
94 	struct team_option_inst_info info;
95 	bool changed;
96 	bool removed;
97 };
98 
99 static struct team_option *__team_find_option(struct team *team,
100 					      const char *opt_name)
101 {
102 	struct team_option *option;
103 
104 	list_for_each_entry(option, &team->option_list, list) {
105 		if (strcmp(option->name, opt_name) == 0)
106 			return option;
107 	}
108 	return NULL;
109 }
110 
111 static void __team_option_inst_del(struct team_option_inst *opt_inst)
112 {
113 	list_del(&opt_inst->list);
114 	kfree(opt_inst);
115 }
116 
117 static void __team_option_inst_del_option(struct team *team,
118 					  struct team_option *option)
119 {
120 	struct team_option_inst *opt_inst, *tmp;
121 
122 	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
123 		if (opt_inst->option == option)
124 			__team_option_inst_del(opt_inst);
125 	}
126 }
127 
128 static int __team_option_inst_add(struct team *team, struct team_option *option,
129 				  struct team_port *port)
130 {
131 	struct team_option_inst *opt_inst;
132 	unsigned int array_size;
133 	unsigned int i;
134 	int err;
135 
136 	array_size = option->array_size;
137 	if (!array_size)
138 		array_size = 1; /* No array but still need one instance */
139 
140 	for (i = 0; i < array_size; i++) {
141 		opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
142 		if (!opt_inst)
143 			return -ENOMEM;
144 		opt_inst->option = option;
145 		opt_inst->info.port = port;
146 		opt_inst->info.array_index = i;
147 		opt_inst->changed = true;
148 		opt_inst->removed = false;
149 		list_add_tail(&opt_inst->list, &team->option_inst_list);
150 		if (option->init) {
151 			err = option->init(team, &opt_inst->info);
152 			if (err)
153 				return err;
154 		}
155 
156 	}
157 	return 0;
158 }
159 
160 static int __team_option_inst_add_option(struct team *team,
161 					 struct team_option *option)
162 {
163 	struct team_port *port;
164 	int err;
165 
166 	if (!option->per_port) {
167 		err = __team_option_inst_add(team, option, NULL);
168 		if (err)
169 			goto inst_del_option;
170 	}
171 
172 	list_for_each_entry(port, &team->port_list, list) {
173 		err = __team_option_inst_add(team, option, port);
174 		if (err)
175 			goto inst_del_option;
176 	}
177 	return 0;
178 
179 inst_del_option:
180 	__team_option_inst_del_option(team, option);
181 	return err;
182 }
183 
184 static void __team_option_inst_mark_removed_option(struct team *team,
185 						   struct team_option *option)
186 {
187 	struct team_option_inst *opt_inst;
188 
189 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
190 		if (opt_inst->option == option) {
191 			opt_inst->changed = true;
192 			opt_inst->removed = true;
193 		}
194 	}
195 }
196 
197 static void __team_option_inst_del_port(struct team *team,
198 					struct team_port *port)
199 {
200 	struct team_option_inst *opt_inst, *tmp;
201 
202 	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
203 		if (opt_inst->option->per_port &&
204 		    opt_inst->info.port == port)
205 			__team_option_inst_del(opt_inst);
206 	}
207 }
208 
209 static int __team_option_inst_add_port(struct team *team,
210 				       struct team_port *port)
211 {
212 	struct team_option *option;
213 	int err;
214 
215 	list_for_each_entry(option, &team->option_list, list) {
216 		if (!option->per_port)
217 			continue;
218 		err = __team_option_inst_add(team, option, port);
219 		if (err)
220 			goto inst_del_port;
221 	}
222 	return 0;
223 
224 inst_del_port:
225 	__team_option_inst_del_port(team, port);
226 	return err;
227 }
228 
229 static void __team_option_inst_mark_removed_port(struct team *team,
230 						 struct team_port *port)
231 {
232 	struct team_option_inst *opt_inst;
233 
234 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
235 		if (opt_inst->info.port == port) {
236 			opt_inst->changed = true;
237 			opt_inst->removed = true;
238 		}
239 	}
240 }
241 
242 static int __team_options_register(struct team *team,
243 				   const struct team_option *option,
244 				   size_t option_count)
245 {
246 	int i;
247 	struct team_option **dst_opts;
248 	int err;
249 
250 	dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
251 			   GFP_KERNEL);
252 	if (!dst_opts)
253 		return -ENOMEM;
254 	for (i = 0; i < option_count; i++, option++) {
255 		if (__team_find_option(team, option->name)) {
256 			err = -EEXIST;
257 			goto alloc_rollback;
258 		}
259 		dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
260 		if (!dst_opts[i]) {
261 			err = -ENOMEM;
262 			goto alloc_rollback;
263 		}
264 	}
265 
266 	for (i = 0; i < option_count; i++) {
267 		err = __team_option_inst_add_option(team, dst_opts[i]);
268 		if (err)
269 			goto inst_rollback;
270 		list_add_tail(&dst_opts[i]->list, &team->option_list);
271 	}
272 
273 	kfree(dst_opts);
274 	return 0;
275 
276 inst_rollback:
277 	for (i--; i >= 0; i--)
278 		__team_option_inst_del_option(team, dst_opts[i]);
279 
280 	i = option_count - 1;
281 alloc_rollback:
282 	for (i--; i >= 0; i--)
283 		kfree(dst_opts[i]);
284 
285 	kfree(dst_opts);
286 	return err;
287 }
288 
289 static void __team_options_mark_removed(struct team *team,
290 					const struct team_option *option,
291 					size_t option_count)
292 {
293 	int i;
294 
295 	for (i = 0; i < option_count; i++, option++) {
296 		struct team_option *del_opt;
297 
298 		del_opt = __team_find_option(team, option->name);
299 		if (del_opt)
300 			__team_option_inst_mark_removed_option(team, del_opt);
301 	}
302 }
303 
304 static void __team_options_unregister(struct team *team,
305 				      const struct team_option *option,
306 				      size_t option_count)
307 {
308 	int i;
309 
310 	for (i = 0; i < option_count; i++, option++) {
311 		struct team_option *del_opt;
312 
313 		del_opt = __team_find_option(team, option->name);
314 		if (del_opt) {
315 			__team_option_inst_del_option(team, del_opt);
316 			list_del(&del_opt->list);
317 			kfree(del_opt);
318 		}
319 	}
320 }
321 
322 static void __team_options_change_check(struct team *team);
323 
324 int team_options_register(struct team *team,
325 			  const struct team_option *option,
326 			  size_t option_count)
327 {
328 	int err;
329 
330 	err = __team_options_register(team, option, option_count);
331 	if (err)
332 		return err;
333 	__team_options_change_check(team);
334 	return 0;
335 }
336 EXPORT_SYMBOL(team_options_register);
337 
338 void team_options_unregister(struct team *team,
339 			     const struct team_option *option,
340 			     size_t option_count)
341 {
342 	__team_options_mark_removed(team, option, option_count);
343 	__team_options_change_check(team);
344 	__team_options_unregister(team, option, option_count);
345 }
346 EXPORT_SYMBOL(team_options_unregister);
347 
348 static int team_option_get(struct team *team,
349 			   struct team_option_inst *opt_inst,
350 			   struct team_gsetter_ctx *ctx)
351 {
352 	if (!opt_inst->option->getter)
353 		return -EOPNOTSUPP;
354 	return opt_inst->option->getter(team, ctx);
355 }
356 
357 static int team_option_set(struct team *team,
358 			   struct team_option_inst *opt_inst,
359 			   struct team_gsetter_ctx *ctx)
360 {
361 	if (!opt_inst->option->setter)
362 		return -EOPNOTSUPP;
363 	return opt_inst->option->setter(team, ctx);
364 }
365 
366 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
367 {
368 	struct team_option_inst *opt_inst;
369 
370 	opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
371 	opt_inst->changed = true;
372 }
373 EXPORT_SYMBOL(team_option_inst_set_change);
374 
375 void team_options_change_check(struct team *team)
376 {
377 	__team_options_change_check(team);
378 }
379 EXPORT_SYMBOL(team_options_change_check);
380 
381 
382 /****************
383  * Mode handling
384  ****************/
385 
386 static LIST_HEAD(mode_list);
387 static DEFINE_SPINLOCK(mode_list_lock);
388 
389 struct team_mode_item {
390 	struct list_head list;
391 	const struct team_mode *mode;
392 };
393 
394 static struct team_mode_item *__find_mode(const char *kind)
395 {
396 	struct team_mode_item *mitem;
397 
398 	list_for_each_entry(mitem, &mode_list, list) {
399 		if (strcmp(mitem->mode->kind, kind) == 0)
400 			return mitem;
401 	}
402 	return NULL;
403 }
404 
405 static bool is_good_mode_name(const char *name)
406 {
407 	while (*name != '\0') {
408 		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
409 			return false;
410 		name++;
411 	}
412 	return true;
413 }
414 
415 int team_mode_register(const struct team_mode *mode)
416 {
417 	int err = 0;
418 	struct team_mode_item *mitem;
419 
420 	if (!is_good_mode_name(mode->kind) ||
421 	    mode->priv_size > TEAM_MODE_PRIV_SIZE)
422 		return -EINVAL;
423 
424 	mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
425 	if (!mitem)
426 		return -ENOMEM;
427 
428 	spin_lock(&mode_list_lock);
429 	if (__find_mode(mode->kind)) {
430 		err = -EEXIST;
431 		kfree(mitem);
432 		goto unlock;
433 	}
434 	mitem->mode = mode;
435 	list_add_tail(&mitem->list, &mode_list);
436 unlock:
437 	spin_unlock(&mode_list_lock);
438 	return err;
439 }
440 EXPORT_SYMBOL(team_mode_register);
441 
442 void team_mode_unregister(const struct team_mode *mode)
443 {
444 	struct team_mode_item *mitem;
445 
446 	spin_lock(&mode_list_lock);
447 	mitem = __find_mode(mode->kind);
448 	if (mitem) {
449 		list_del_init(&mitem->list);
450 		kfree(mitem);
451 	}
452 	spin_unlock(&mode_list_lock);
453 }
454 EXPORT_SYMBOL(team_mode_unregister);
455 
456 static const struct team_mode *team_mode_get(const char *kind)
457 {
458 	struct team_mode_item *mitem;
459 	const struct team_mode *mode = NULL;
460 
461 	spin_lock(&mode_list_lock);
462 	mitem = __find_mode(kind);
463 	if (!mitem) {
464 		spin_unlock(&mode_list_lock);
465 		request_module("team-mode-%s", kind);
466 		spin_lock(&mode_list_lock);
467 		mitem = __find_mode(kind);
468 	}
469 	if (mitem) {
470 		mode = mitem->mode;
471 		if (!try_module_get(mode->owner))
472 			mode = NULL;
473 	}
474 
475 	spin_unlock(&mode_list_lock);
476 	return mode;
477 }
478 
479 static void team_mode_put(const struct team_mode *mode)
480 {
481 	module_put(mode->owner);
482 }
483 
484 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
485 {
486 	dev_kfree_skb_any(skb);
487 	return false;
488 }
489 
490 rx_handler_result_t team_dummy_receive(struct team *team,
491 				       struct team_port *port,
492 				       struct sk_buff *skb)
493 {
494 	return RX_HANDLER_ANOTHER;
495 }
496 
497 static const struct team_mode __team_no_mode = {
498 	.kind		= "*NOMODE*",
499 };
500 
501 static bool team_is_mode_set(struct team *team)
502 {
503 	return team->mode != &__team_no_mode;
504 }
505 
506 static void team_set_no_mode(struct team *team)
507 {
508 	team->mode = &__team_no_mode;
509 }
510 
511 static void __team_adjust_ops(struct team *team, int en_port_count)
512 {
513 	/*
514 	 * To avoid checks in rx/tx skb paths, ensure here that non-null and
515 	 * correct ops are always set.
516 	 */
517 
518 	if (!en_port_count || !team_is_mode_set(team) ||
519 	    !team->mode->ops->transmit)
520 		team->ops.transmit = team_dummy_transmit;
521 	else
522 		team->ops.transmit = team->mode->ops->transmit;
523 
524 	if (!en_port_count || !team_is_mode_set(team) ||
525 	    !team->mode->ops->receive)
526 		team->ops.receive = team_dummy_receive;
527 	else
528 		team->ops.receive = team->mode->ops->receive;
529 }
530 
531 static void team_adjust_ops(struct team *team)
532 {
533 	__team_adjust_ops(team, team->en_port_count);
534 }
535 
536 /*
537  * We can benefit from the fact that it's ensured no port is present
538  * at the time of mode change. Therefore no packets are in fly so there's no
539  * need to set mode operations in any special way.
540  */
541 static int __team_change_mode(struct team *team,
542 			      const struct team_mode *new_mode)
543 {
544 	/* Check if mode was previously set and do cleanup if so */
545 	if (team_is_mode_set(team)) {
546 		void (*exit_op)(struct team *team) = team->ops.exit;
547 
548 		/* Clear ops area so no callback is called any longer */
549 		memset(&team->ops, 0, sizeof(struct team_mode_ops));
550 		team_adjust_ops(team);
551 
552 		if (exit_op)
553 			exit_op(team);
554 		team_mode_put(team->mode);
555 		team_set_no_mode(team);
556 		/* zero private data area */
557 		memset(&team->mode_priv, 0,
558 		       sizeof(struct team) - offsetof(struct team, mode_priv));
559 	}
560 
561 	if (!new_mode)
562 		return 0;
563 
564 	if (new_mode->ops->init) {
565 		int err;
566 
567 		err = new_mode->ops->init(team);
568 		if (err)
569 			return err;
570 	}
571 
572 	team->mode = new_mode;
573 	memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
574 	team_adjust_ops(team);
575 
576 	return 0;
577 }
578 
579 static int team_change_mode(struct team *team, const char *kind)
580 {
581 	const struct team_mode *new_mode;
582 	struct net_device *dev = team->dev;
583 	int err;
584 
585 	if (!list_empty(&team->port_list)) {
586 		netdev_err(dev, "No ports can be present during mode change\n");
587 		return -EBUSY;
588 	}
589 
590 	if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
591 		netdev_err(dev, "Unable to change to the same mode the team is in\n");
592 		return -EINVAL;
593 	}
594 
595 	new_mode = team_mode_get(kind);
596 	if (!new_mode) {
597 		netdev_err(dev, "Mode \"%s\" not found\n", kind);
598 		return -EINVAL;
599 	}
600 
601 	err = __team_change_mode(team, new_mode);
602 	if (err) {
603 		netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
604 		team_mode_put(new_mode);
605 		return err;
606 	}
607 
608 	netdev_info(dev, "Mode changed to \"%s\"\n", kind);
609 	return 0;
610 }
611 
612 
613 /************************
614  * Rx path frame handler
615  ************************/
616 
617 /* note: already called with rcu_read_lock */
618 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
619 {
620 	struct sk_buff *skb = *pskb;
621 	struct team_port *port;
622 	struct team *team;
623 	rx_handler_result_t res;
624 
625 	skb = skb_share_check(skb, GFP_ATOMIC);
626 	if (!skb)
627 		return RX_HANDLER_CONSUMED;
628 
629 	*pskb = skb;
630 
631 	port = team_port_get_rcu(skb->dev);
632 	team = port->team;
633 	if (!team_port_enabled(port)) {
634 		/* allow exact match delivery for disabled ports */
635 		res = RX_HANDLER_EXACT;
636 	} else {
637 		res = team->ops.receive(team, port, skb);
638 	}
639 	if (res == RX_HANDLER_ANOTHER) {
640 		struct team_pcpu_stats *pcpu_stats;
641 
642 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
643 		u64_stats_update_begin(&pcpu_stats->syncp);
644 		pcpu_stats->rx_packets++;
645 		pcpu_stats->rx_bytes += skb->len;
646 		if (skb->pkt_type == PACKET_MULTICAST)
647 			pcpu_stats->rx_multicast++;
648 		u64_stats_update_end(&pcpu_stats->syncp);
649 
650 		skb->dev = team->dev;
651 	} else {
652 		this_cpu_inc(team->pcpu_stats->rx_dropped);
653 	}
654 
655 	return res;
656 }
657 
658 
659 /****************
660  * Port handling
661  ****************/
662 
663 static bool team_port_find(const struct team *team,
664 			   const struct team_port *port)
665 {
666 	struct team_port *cur;
667 
668 	list_for_each_entry(cur, &team->port_list, list)
669 		if (cur == port)
670 			return true;
671 	return false;
672 }
673 
674 bool team_port_enabled(struct team_port *port)
675 {
676 	return port->index != -1;
677 }
678 EXPORT_SYMBOL(team_port_enabled);
679 
680 /*
681  * Enable/disable port by adding to enabled port hashlist and setting
682  * port->index (Might be racy so reader could see incorrect ifindex when
683  * processing a flying packet, but that is not a problem). Write guarded
684  * by team->lock.
685  */
686 static void team_port_enable(struct team *team,
687 			     struct team_port *port)
688 {
689 	if (team_port_enabled(port))
690 		return;
691 	port->index = team->en_port_count++;
692 	hlist_add_head_rcu(&port->hlist,
693 			   team_port_index_hash(team, port->index));
694 	team_adjust_ops(team);
695 	if (team->ops.port_enabled)
696 		team->ops.port_enabled(team, port);
697 }
698 
699 static void __reconstruct_port_hlist(struct team *team, int rm_index)
700 {
701 	int i;
702 	struct team_port *port;
703 
704 	for (i = rm_index + 1; i < team->en_port_count; i++) {
705 		port = team_get_port_by_index(team, i);
706 		hlist_del_rcu(&port->hlist);
707 		port->index--;
708 		hlist_add_head_rcu(&port->hlist,
709 				   team_port_index_hash(team, port->index));
710 	}
711 }
712 
713 static void team_port_disable(struct team *team,
714 			      struct team_port *port)
715 {
716 	if (!team_port_enabled(port))
717 		return;
718 	if (team->ops.port_disabled)
719 		team->ops.port_disabled(team, port);
720 	hlist_del_rcu(&port->hlist);
721 	__reconstruct_port_hlist(team, port->index);
722 	port->index = -1;
723 	__team_adjust_ops(team, team->en_port_count - 1);
724 	/*
725 	 * Wait until readers see adjusted ops. This ensures that
726 	 * readers never see team->en_port_count == 0
727 	 */
728 	synchronize_rcu();
729 	team->en_port_count--;
730 }
731 
732 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
733 			    NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
734 			    NETIF_F_HIGHDMA | NETIF_F_LRO)
735 
736 static void __team_compute_features(struct team *team)
737 {
738 	struct team_port *port;
739 	u32 vlan_features = TEAM_VLAN_FEATURES;
740 	unsigned short max_hard_header_len = ETH_HLEN;
741 
742 	list_for_each_entry(port, &team->port_list, list) {
743 		vlan_features = netdev_increment_features(vlan_features,
744 					port->dev->vlan_features,
745 					TEAM_VLAN_FEATURES);
746 
747 		if (port->dev->hard_header_len > max_hard_header_len)
748 			max_hard_header_len = port->dev->hard_header_len;
749 	}
750 
751 	team->dev->vlan_features = vlan_features;
752 	team->dev->hard_header_len = max_hard_header_len;
753 
754 	netdev_change_features(team->dev);
755 }
756 
757 static void team_compute_features(struct team *team)
758 {
759 	mutex_lock(&team->lock);
760 	__team_compute_features(team);
761 	mutex_unlock(&team->lock);
762 }
763 
764 static int team_port_enter(struct team *team, struct team_port *port)
765 {
766 	int err = 0;
767 
768 	dev_hold(team->dev);
769 	port->dev->priv_flags |= IFF_TEAM_PORT;
770 	if (team->ops.port_enter) {
771 		err = team->ops.port_enter(team, port);
772 		if (err) {
773 			netdev_err(team->dev, "Device %s failed to enter team mode\n",
774 				   port->dev->name);
775 			goto err_port_enter;
776 		}
777 	}
778 
779 	return 0;
780 
781 err_port_enter:
782 	port->dev->priv_flags &= ~IFF_TEAM_PORT;
783 	dev_put(team->dev);
784 
785 	return err;
786 }
787 
788 static void team_port_leave(struct team *team, struct team_port *port)
789 {
790 	if (team->ops.port_leave)
791 		team->ops.port_leave(team, port);
792 	port->dev->priv_flags &= ~IFF_TEAM_PORT;
793 	dev_put(team->dev);
794 }
795 
796 static void __team_port_change_check(struct team_port *port, bool linkup);
797 
798 static int team_port_add(struct team *team, struct net_device *port_dev)
799 {
800 	struct net_device *dev = team->dev;
801 	struct team_port *port;
802 	char *portname = port_dev->name;
803 	int err;
804 
805 	if (port_dev->flags & IFF_LOOPBACK ||
806 	    port_dev->type != ARPHRD_ETHER) {
807 		netdev_err(dev, "Device %s is of an unsupported type\n",
808 			   portname);
809 		return -EINVAL;
810 	}
811 
812 	if (team_port_exists(port_dev)) {
813 		netdev_err(dev, "Device %s is already a port "
814 				"of a team device\n", portname);
815 		return -EBUSY;
816 	}
817 
818 	if (port_dev->flags & IFF_UP) {
819 		netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
820 			   portname);
821 		return -EBUSY;
822 	}
823 
824 	port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
825 		       GFP_KERNEL);
826 	if (!port)
827 		return -ENOMEM;
828 
829 	port->dev = port_dev;
830 	port->team = team;
831 
832 	port->orig.mtu = port_dev->mtu;
833 	err = dev_set_mtu(port_dev, dev->mtu);
834 	if (err) {
835 		netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
836 		goto err_set_mtu;
837 	}
838 
839 	memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
840 
841 	err = team_port_enter(team, port);
842 	if (err) {
843 		netdev_err(dev, "Device %s failed to enter team mode\n",
844 			   portname);
845 		goto err_port_enter;
846 	}
847 
848 	err = dev_open(port_dev);
849 	if (err) {
850 		netdev_dbg(dev, "Device %s opening failed\n",
851 			   portname);
852 		goto err_dev_open;
853 	}
854 
855 	err = vlan_vids_add_by_dev(port_dev, dev);
856 	if (err) {
857 		netdev_err(dev, "Failed to add vlan ids to device %s\n",
858 				portname);
859 		goto err_vids_add;
860 	}
861 
862 	err = netdev_set_master(port_dev, dev);
863 	if (err) {
864 		netdev_err(dev, "Device %s failed to set master\n", portname);
865 		goto err_set_master;
866 	}
867 
868 	err = netdev_rx_handler_register(port_dev, team_handle_frame,
869 					 port);
870 	if (err) {
871 		netdev_err(dev, "Device %s failed to register rx_handler\n",
872 			   portname);
873 		goto err_handler_register;
874 	}
875 
876 	err = __team_option_inst_add_port(team, port);
877 	if (err) {
878 		netdev_err(dev, "Device %s failed to add per-port options\n",
879 			   portname);
880 		goto err_option_port_add;
881 	}
882 
883 	port->index = -1;
884 	team_port_enable(team, port);
885 	list_add_tail_rcu(&port->list, &team->port_list);
886 	__team_compute_features(team);
887 	__team_port_change_check(port, !!netif_carrier_ok(port_dev));
888 	__team_options_change_check(team);
889 
890 	netdev_info(dev, "Port device %s added\n", portname);
891 
892 	return 0;
893 
894 err_option_port_add:
895 	netdev_rx_handler_unregister(port_dev);
896 
897 err_handler_register:
898 	netdev_set_master(port_dev, NULL);
899 
900 err_set_master:
901 	vlan_vids_del_by_dev(port_dev, dev);
902 
903 err_vids_add:
904 	dev_close(port_dev);
905 
906 err_dev_open:
907 	team_port_leave(team, port);
908 	team_port_set_orig_mac(port);
909 
910 err_port_enter:
911 	dev_set_mtu(port_dev, port->orig.mtu);
912 
913 err_set_mtu:
914 	kfree(port);
915 
916 	return err;
917 }
918 
919 static int team_port_del(struct team *team, struct net_device *port_dev)
920 {
921 	struct net_device *dev = team->dev;
922 	struct team_port *port;
923 	char *portname = port_dev->name;
924 
925 	port = team_port_get_rtnl(port_dev);
926 	if (!port || !team_port_find(team, port)) {
927 		netdev_err(dev, "Device %s does not act as a port of this team\n",
928 			   portname);
929 		return -ENOENT;
930 	}
931 
932 	__team_option_inst_mark_removed_port(team, port);
933 	__team_options_change_check(team);
934 	__team_option_inst_del_port(team, port);
935 	port->removed = true;
936 	__team_port_change_check(port, false);
937 	team_port_disable(team, port);
938 	list_del_rcu(&port->list);
939 	netdev_rx_handler_unregister(port_dev);
940 	netdev_set_master(port_dev, NULL);
941 	vlan_vids_del_by_dev(port_dev, dev);
942 	dev_close(port_dev);
943 	team_port_leave(team, port);
944 	team_port_set_orig_mac(port);
945 	dev_set_mtu(port_dev, port->orig.mtu);
946 	synchronize_rcu();
947 	kfree(port);
948 	netdev_info(dev, "Port device %s removed\n", portname);
949 	__team_compute_features(team);
950 
951 	return 0;
952 }
953 
954 
955 /*****************
956  * Net device ops
957  *****************/
958 
959 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
960 {
961 	ctx->data.str_val = team->mode->kind;
962 	return 0;
963 }
964 
965 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
966 {
967 	return team_change_mode(team, ctx->data.str_val);
968 }
969 
970 static int team_port_en_option_get(struct team *team,
971 				   struct team_gsetter_ctx *ctx)
972 {
973 	struct team_port *port = ctx->info->port;
974 
975 	ctx->data.bool_val = team_port_enabled(port);
976 	return 0;
977 }
978 
979 static int team_port_en_option_set(struct team *team,
980 				   struct team_gsetter_ctx *ctx)
981 {
982 	struct team_port *port = ctx->info->port;
983 
984 	if (ctx->data.bool_val)
985 		team_port_enable(team, port);
986 	else
987 		team_port_disable(team, port);
988 	return 0;
989 }
990 
991 static int team_user_linkup_option_get(struct team *team,
992 				       struct team_gsetter_ctx *ctx)
993 {
994 	struct team_port *port = ctx->info->port;
995 
996 	ctx->data.bool_val = port->user.linkup;
997 	return 0;
998 }
999 
1000 static int team_user_linkup_option_set(struct team *team,
1001 				       struct team_gsetter_ctx *ctx)
1002 {
1003 	struct team_port *port = ctx->info->port;
1004 
1005 	port->user.linkup = ctx->data.bool_val;
1006 	team_refresh_port_linkup(port);
1007 	return 0;
1008 }
1009 
1010 static int team_user_linkup_en_option_get(struct team *team,
1011 					  struct team_gsetter_ctx *ctx)
1012 {
1013 	struct team_port *port = ctx->info->port;
1014 
1015 	ctx->data.bool_val = port->user.linkup_enabled;
1016 	return 0;
1017 }
1018 
1019 static int team_user_linkup_en_option_set(struct team *team,
1020 					  struct team_gsetter_ctx *ctx)
1021 {
1022 	struct team_port *port = ctx->info->port;
1023 
1024 	port->user.linkup_enabled = ctx->data.bool_val;
1025 	team_refresh_port_linkup(port);
1026 	return 0;
1027 }
1028 
1029 static const struct team_option team_options[] = {
1030 	{
1031 		.name = "mode",
1032 		.type = TEAM_OPTION_TYPE_STRING,
1033 		.getter = team_mode_option_get,
1034 		.setter = team_mode_option_set,
1035 	},
1036 	{
1037 		.name = "enabled",
1038 		.type = TEAM_OPTION_TYPE_BOOL,
1039 		.per_port = true,
1040 		.getter = team_port_en_option_get,
1041 		.setter = team_port_en_option_set,
1042 	},
1043 	{
1044 		.name = "user_linkup",
1045 		.type = TEAM_OPTION_TYPE_BOOL,
1046 		.per_port = true,
1047 		.getter = team_user_linkup_option_get,
1048 		.setter = team_user_linkup_option_set,
1049 	},
1050 	{
1051 		.name = "user_linkup_enabled",
1052 		.type = TEAM_OPTION_TYPE_BOOL,
1053 		.per_port = true,
1054 		.getter = team_user_linkup_en_option_get,
1055 		.setter = team_user_linkup_en_option_set,
1056 	},
1057 };
1058 
1059 static int team_init(struct net_device *dev)
1060 {
1061 	struct team *team = netdev_priv(dev);
1062 	int i;
1063 	int err;
1064 
1065 	team->dev = dev;
1066 	mutex_init(&team->lock);
1067 	team_set_no_mode(team);
1068 
1069 	team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1070 	if (!team->pcpu_stats)
1071 		return -ENOMEM;
1072 
1073 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1074 		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1075 	INIT_LIST_HEAD(&team->port_list);
1076 
1077 	team_adjust_ops(team);
1078 
1079 	INIT_LIST_HEAD(&team->option_list);
1080 	INIT_LIST_HEAD(&team->option_inst_list);
1081 	err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1082 	if (err)
1083 		goto err_options_register;
1084 	netif_carrier_off(dev);
1085 
1086 	return 0;
1087 
1088 err_options_register:
1089 	free_percpu(team->pcpu_stats);
1090 
1091 	return err;
1092 }
1093 
1094 static void team_uninit(struct net_device *dev)
1095 {
1096 	struct team *team = netdev_priv(dev);
1097 	struct team_port *port;
1098 	struct team_port *tmp;
1099 
1100 	mutex_lock(&team->lock);
1101 	list_for_each_entry_safe(port, tmp, &team->port_list, list)
1102 		team_port_del(team, port->dev);
1103 
1104 	__team_change_mode(team, NULL); /* cleanup */
1105 	__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1106 	mutex_unlock(&team->lock);
1107 }
1108 
1109 static void team_destructor(struct net_device *dev)
1110 {
1111 	struct team *team = netdev_priv(dev);
1112 
1113 	free_percpu(team->pcpu_stats);
1114 	free_netdev(dev);
1115 }
1116 
1117 static int team_open(struct net_device *dev)
1118 {
1119 	netif_carrier_on(dev);
1120 	return 0;
1121 }
1122 
1123 static int team_close(struct net_device *dev)
1124 {
1125 	netif_carrier_off(dev);
1126 	return 0;
1127 }
1128 
1129 /*
1130  * note: already called with rcu_read_lock
1131  */
1132 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1133 {
1134 	struct team *team = netdev_priv(dev);
1135 	bool tx_success = false;
1136 	unsigned int len = skb->len;
1137 
1138 	tx_success = team->ops.transmit(team, skb);
1139 	if (tx_success) {
1140 		struct team_pcpu_stats *pcpu_stats;
1141 
1142 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1143 		u64_stats_update_begin(&pcpu_stats->syncp);
1144 		pcpu_stats->tx_packets++;
1145 		pcpu_stats->tx_bytes += len;
1146 		u64_stats_update_end(&pcpu_stats->syncp);
1147 	} else {
1148 		this_cpu_inc(team->pcpu_stats->tx_dropped);
1149 	}
1150 
1151 	return NETDEV_TX_OK;
1152 }
1153 
1154 static void team_change_rx_flags(struct net_device *dev, int change)
1155 {
1156 	struct team *team = netdev_priv(dev);
1157 	struct team_port *port;
1158 	int inc;
1159 
1160 	rcu_read_lock();
1161 	list_for_each_entry_rcu(port, &team->port_list, list) {
1162 		if (change & IFF_PROMISC) {
1163 			inc = dev->flags & IFF_PROMISC ? 1 : -1;
1164 			dev_set_promiscuity(port->dev, inc);
1165 		}
1166 		if (change & IFF_ALLMULTI) {
1167 			inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1168 			dev_set_allmulti(port->dev, inc);
1169 		}
1170 	}
1171 	rcu_read_unlock();
1172 }
1173 
1174 static void team_set_rx_mode(struct net_device *dev)
1175 {
1176 	struct team *team = netdev_priv(dev);
1177 	struct team_port *port;
1178 
1179 	rcu_read_lock();
1180 	list_for_each_entry_rcu(port, &team->port_list, list) {
1181 		dev_uc_sync(port->dev, dev);
1182 		dev_mc_sync(port->dev, dev);
1183 	}
1184 	rcu_read_unlock();
1185 }
1186 
1187 static int team_set_mac_address(struct net_device *dev, void *p)
1188 {
1189 	struct team *team = netdev_priv(dev);
1190 	struct team_port *port;
1191 	struct sockaddr *addr = p;
1192 
1193 	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1194 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1195 	rcu_read_lock();
1196 	list_for_each_entry_rcu(port, &team->port_list, list)
1197 		if (team->ops.port_change_mac)
1198 			team->ops.port_change_mac(team, port);
1199 	rcu_read_unlock();
1200 	return 0;
1201 }
1202 
1203 static int team_change_mtu(struct net_device *dev, int new_mtu)
1204 {
1205 	struct team *team = netdev_priv(dev);
1206 	struct team_port *port;
1207 	int err;
1208 
1209 	/*
1210 	 * Alhough this is reader, it's guarded by team lock. It's not possible
1211 	 * to traverse list in reverse under rcu_read_lock
1212 	 */
1213 	mutex_lock(&team->lock);
1214 	list_for_each_entry(port, &team->port_list, list) {
1215 		err = dev_set_mtu(port->dev, new_mtu);
1216 		if (err) {
1217 			netdev_err(dev, "Device %s failed to change mtu",
1218 				   port->dev->name);
1219 			goto unwind;
1220 		}
1221 	}
1222 	mutex_unlock(&team->lock);
1223 
1224 	dev->mtu = new_mtu;
1225 
1226 	return 0;
1227 
1228 unwind:
1229 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
1230 		dev_set_mtu(port->dev, dev->mtu);
1231 	mutex_unlock(&team->lock);
1232 
1233 	return err;
1234 }
1235 
1236 static struct rtnl_link_stats64 *
1237 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1238 {
1239 	struct team *team = netdev_priv(dev);
1240 	struct team_pcpu_stats *p;
1241 	u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1242 	u32 rx_dropped = 0, tx_dropped = 0;
1243 	unsigned int start;
1244 	int i;
1245 
1246 	for_each_possible_cpu(i) {
1247 		p = per_cpu_ptr(team->pcpu_stats, i);
1248 		do {
1249 			start = u64_stats_fetch_begin_bh(&p->syncp);
1250 			rx_packets	= p->rx_packets;
1251 			rx_bytes	= p->rx_bytes;
1252 			rx_multicast	= p->rx_multicast;
1253 			tx_packets	= p->tx_packets;
1254 			tx_bytes	= p->tx_bytes;
1255 		} while (u64_stats_fetch_retry_bh(&p->syncp, start));
1256 
1257 		stats->rx_packets	+= rx_packets;
1258 		stats->rx_bytes		+= rx_bytes;
1259 		stats->multicast	+= rx_multicast;
1260 		stats->tx_packets	+= tx_packets;
1261 		stats->tx_bytes		+= tx_bytes;
1262 		/*
1263 		 * rx_dropped & tx_dropped are u32, updated
1264 		 * without syncp protection.
1265 		 */
1266 		rx_dropped	+= p->rx_dropped;
1267 		tx_dropped	+= p->tx_dropped;
1268 	}
1269 	stats->rx_dropped	= rx_dropped;
1270 	stats->tx_dropped	= tx_dropped;
1271 	return stats;
1272 }
1273 
1274 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1275 {
1276 	struct team *team = netdev_priv(dev);
1277 	struct team_port *port;
1278 	int err;
1279 
1280 	/*
1281 	 * Alhough this is reader, it's guarded by team lock. It's not possible
1282 	 * to traverse list in reverse under rcu_read_lock
1283 	 */
1284 	mutex_lock(&team->lock);
1285 	list_for_each_entry(port, &team->port_list, list) {
1286 		err = vlan_vid_add(port->dev, vid);
1287 		if (err)
1288 			goto unwind;
1289 	}
1290 	mutex_unlock(&team->lock);
1291 
1292 	return 0;
1293 
1294 unwind:
1295 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
1296 		vlan_vid_del(port->dev, vid);
1297 	mutex_unlock(&team->lock);
1298 
1299 	return err;
1300 }
1301 
1302 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1303 {
1304 	struct team *team = netdev_priv(dev);
1305 	struct team_port *port;
1306 
1307 	rcu_read_lock();
1308 	list_for_each_entry_rcu(port, &team->port_list, list)
1309 		vlan_vid_del(port->dev, vid);
1310 	rcu_read_unlock();
1311 
1312 	return 0;
1313 }
1314 
1315 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1316 {
1317 	struct team *team = netdev_priv(dev);
1318 	int err;
1319 
1320 	mutex_lock(&team->lock);
1321 	err = team_port_add(team, port_dev);
1322 	mutex_unlock(&team->lock);
1323 	return err;
1324 }
1325 
1326 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1327 {
1328 	struct team *team = netdev_priv(dev);
1329 	int err;
1330 
1331 	mutex_lock(&team->lock);
1332 	err = team_port_del(team, port_dev);
1333 	mutex_unlock(&team->lock);
1334 	return err;
1335 }
1336 
1337 static netdev_features_t team_fix_features(struct net_device *dev,
1338 					   netdev_features_t features)
1339 {
1340 	struct team_port *port;
1341 	struct team *team = netdev_priv(dev);
1342 	netdev_features_t mask;
1343 
1344 	mask = features;
1345 	features &= ~NETIF_F_ONE_FOR_ALL;
1346 	features |= NETIF_F_ALL_FOR_ALL;
1347 
1348 	rcu_read_lock();
1349 	list_for_each_entry_rcu(port, &team->port_list, list) {
1350 		features = netdev_increment_features(features,
1351 						     port->dev->features,
1352 						     mask);
1353 	}
1354 	rcu_read_unlock();
1355 	return features;
1356 }
1357 
1358 static const struct net_device_ops team_netdev_ops = {
1359 	.ndo_init		= team_init,
1360 	.ndo_uninit		= team_uninit,
1361 	.ndo_open		= team_open,
1362 	.ndo_stop		= team_close,
1363 	.ndo_start_xmit		= team_xmit,
1364 	.ndo_change_rx_flags	= team_change_rx_flags,
1365 	.ndo_set_rx_mode	= team_set_rx_mode,
1366 	.ndo_set_mac_address	= team_set_mac_address,
1367 	.ndo_change_mtu		= team_change_mtu,
1368 	.ndo_get_stats64	= team_get_stats64,
1369 	.ndo_vlan_rx_add_vid	= team_vlan_rx_add_vid,
1370 	.ndo_vlan_rx_kill_vid	= team_vlan_rx_kill_vid,
1371 	.ndo_add_slave		= team_add_slave,
1372 	.ndo_del_slave		= team_del_slave,
1373 	.ndo_fix_features	= team_fix_features,
1374 };
1375 
1376 
1377 /***********************
1378  * rt netlink interface
1379  ***********************/
1380 
1381 static void team_setup(struct net_device *dev)
1382 {
1383 	ether_setup(dev);
1384 
1385 	dev->netdev_ops = &team_netdev_ops;
1386 	dev->destructor	= team_destructor;
1387 	dev->tx_queue_len = 0;
1388 	dev->flags |= IFF_MULTICAST;
1389 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1390 
1391 	/*
1392 	 * Indicate we support unicast address filtering. That way core won't
1393 	 * bring us to promisc mode in case a unicast addr is added.
1394 	 * Let this up to underlay drivers.
1395 	 */
1396 	dev->priv_flags |= IFF_UNICAST_FLT;
1397 
1398 	dev->features |= NETIF_F_LLTX;
1399 	dev->features |= NETIF_F_GRO;
1400 	dev->hw_features = NETIF_F_HW_VLAN_TX |
1401 			   NETIF_F_HW_VLAN_RX |
1402 			   NETIF_F_HW_VLAN_FILTER;
1403 
1404 	dev->features |= dev->hw_features;
1405 }
1406 
1407 static int team_newlink(struct net *src_net, struct net_device *dev,
1408 			struct nlattr *tb[], struct nlattr *data[])
1409 {
1410 	int err;
1411 
1412 	if (tb[IFLA_ADDRESS] == NULL)
1413 		eth_hw_addr_random(dev);
1414 
1415 	err = register_netdevice(dev);
1416 	if (err)
1417 		return err;
1418 
1419 	return 0;
1420 }
1421 
1422 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1423 {
1424 	if (tb[IFLA_ADDRESS]) {
1425 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1426 			return -EINVAL;
1427 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1428 			return -EADDRNOTAVAIL;
1429 	}
1430 	return 0;
1431 }
1432 
1433 static struct rtnl_link_ops team_link_ops __read_mostly = {
1434 	.kind		= DRV_NAME,
1435 	.priv_size	= sizeof(struct team),
1436 	.setup		= team_setup,
1437 	.newlink	= team_newlink,
1438 	.validate	= team_validate,
1439 };
1440 
1441 
1442 /***********************************
1443  * Generic netlink custom interface
1444  ***********************************/
1445 
1446 static struct genl_family team_nl_family = {
1447 	.id		= GENL_ID_GENERATE,
1448 	.name		= TEAM_GENL_NAME,
1449 	.version	= TEAM_GENL_VERSION,
1450 	.maxattr	= TEAM_ATTR_MAX,
1451 	.netnsok	= true,
1452 };
1453 
1454 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1455 	[TEAM_ATTR_UNSPEC]			= { .type = NLA_UNSPEC, },
1456 	[TEAM_ATTR_TEAM_IFINDEX]		= { .type = NLA_U32 },
1457 	[TEAM_ATTR_LIST_OPTION]			= { .type = NLA_NESTED },
1458 	[TEAM_ATTR_LIST_PORT]			= { .type = NLA_NESTED },
1459 };
1460 
1461 static const struct nla_policy
1462 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1463 	[TEAM_ATTR_OPTION_UNSPEC]		= { .type = NLA_UNSPEC, },
1464 	[TEAM_ATTR_OPTION_NAME] = {
1465 		.type = NLA_STRING,
1466 		.len = TEAM_STRING_MAX_LEN,
1467 	},
1468 	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
1469 	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
1470 	[TEAM_ATTR_OPTION_DATA]			= { .type = NLA_BINARY },
1471 };
1472 
1473 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1474 {
1475 	struct sk_buff *msg;
1476 	void *hdr;
1477 	int err;
1478 
1479 	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1480 	if (!msg)
1481 		return -ENOMEM;
1482 
1483 	hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1484 			  &team_nl_family, 0, TEAM_CMD_NOOP);
1485 	if (IS_ERR(hdr)) {
1486 		err = PTR_ERR(hdr);
1487 		goto err_msg_put;
1488 	}
1489 
1490 	genlmsg_end(msg, hdr);
1491 
1492 	return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1493 
1494 err_msg_put:
1495 	nlmsg_free(msg);
1496 
1497 	return err;
1498 }
1499 
1500 /*
1501  * Netlink cmd functions should be locked by following two functions.
1502  * Since dev gets held here, that ensures dev won't disappear in between.
1503  */
1504 static struct team *team_nl_team_get(struct genl_info *info)
1505 {
1506 	struct net *net = genl_info_net(info);
1507 	int ifindex;
1508 	struct net_device *dev;
1509 	struct team *team;
1510 
1511 	if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1512 		return NULL;
1513 
1514 	ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1515 	dev = dev_get_by_index(net, ifindex);
1516 	if (!dev || dev->netdev_ops != &team_netdev_ops) {
1517 		if (dev)
1518 			dev_put(dev);
1519 		return NULL;
1520 	}
1521 
1522 	team = netdev_priv(dev);
1523 	mutex_lock(&team->lock);
1524 	return team;
1525 }
1526 
1527 static void team_nl_team_put(struct team *team)
1528 {
1529 	mutex_unlock(&team->lock);
1530 	dev_put(team->dev);
1531 }
1532 
1533 static int team_nl_send_generic(struct genl_info *info, struct team *team,
1534 				int (*fill_func)(struct sk_buff *skb,
1535 						 struct genl_info *info,
1536 						 int flags, struct team *team))
1537 {
1538 	struct sk_buff *skb;
1539 	int err;
1540 
1541 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1542 	if (!skb)
1543 		return -ENOMEM;
1544 
1545 	err = fill_func(skb, info, NLM_F_ACK, team);
1546 	if (err < 0)
1547 		goto err_fill;
1548 
1549 	err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1550 	return err;
1551 
1552 err_fill:
1553 	nlmsg_free(skb);
1554 	return err;
1555 }
1556 
1557 typedef int team_nl_send_func_t(struct sk_buff *skb,
1558 				struct team *team, u32 pid);
1559 
1560 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
1561 {
1562 	return genlmsg_unicast(dev_net(team->dev), skb, pid);
1563 }
1564 
1565 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1566 				       struct team_option_inst *opt_inst)
1567 {
1568 	struct nlattr *option_item;
1569 	struct team_option *option = opt_inst->option;
1570 	struct team_option_inst_info *opt_inst_info = &opt_inst->info;
1571 	struct team_gsetter_ctx ctx;
1572 	int err;
1573 
1574 	ctx.info = opt_inst_info;
1575 	err = team_option_get(team, opt_inst, &ctx);
1576 	if (err)
1577 		return err;
1578 
1579 	option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1580 	if (!option_item)
1581 		return -EMSGSIZE;
1582 
1583 	if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1584 		goto nest_cancel;
1585 	if (opt_inst_info->port &&
1586 	    nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1587 			opt_inst_info->port->dev->ifindex))
1588 		goto nest_cancel;
1589 	if (opt_inst->option->array_size &&
1590 	    nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1591 			opt_inst_info->array_index))
1592 		goto nest_cancel;
1593 
1594 	switch (option->type) {
1595 	case TEAM_OPTION_TYPE_U32:
1596 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1597 			goto nest_cancel;
1598 		if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
1599 			goto nest_cancel;
1600 		break;
1601 	case TEAM_OPTION_TYPE_STRING:
1602 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1603 			goto nest_cancel;
1604 		if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1605 				   ctx.data.str_val))
1606 			goto nest_cancel;
1607 		break;
1608 	case TEAM_OPTION_TYPE_BINARY:
1609 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1610 			goto nest_cancel;
1611 		if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
1612 			    ctx.data.bin_val.ptr))
1613 			goto nest_cancel;
1614 		break;
1615 	case TEAM_OPTION_TYPE_BOOL:
1616 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1617 			goto nest_cancel;
1618 		if (ctx.data.bool_val &&
1619 		    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1620 			goto nest_cancel;
1621 		break;
1622 	default:
1623 		BUG();
1624 	}
1625 	if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1626 		goto nest_cancel;
1627 	if (opt_inst->changed) {
1628 		if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1629 			goto nest_cancel;
1630 		opt_inst->changed = false;
1631 	}
1632 	nla_nest_end(skb, option_item);
1633 	return 0;
1634 
1635 nest_cancel:
1636 	nla_nest_cancel(skb, option_item);
1637 	return -EMSGSIZE;
1638 }
1639 
1640 static int __send_and_alloc_skb(struct sk_buff **pskb,
1641 				struct team *team, u32 pid,
1642 				team_nl_send_func_t *send_func)
1643 {
1644 	int err;
1645 
1646 	if (*pskb) {
1647 		err = send_func(*pskb, team, pid);
1648 		if (err)
1649 			return err;
1650 	}
1651 	*pskb = genlmsg_new(NLMSG_DEFAULT_SIZE - GENL_HDRLEN, GFP_KERNEL);
1652 	if (!*pskb)
1653 		return -ENOMEM;
1654 	return 0;
1655 }
1656 
1657 static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
1658 				    int flags, team_nl_send_func_t *send_func,
1659 				    struct list_head *sel_opt_inst_list)
1660 {
1661 	struct nlattr *option_list;
1662 	struct nlmsghdr *nlh;
1663 	void *hdr;
1664 	struct team_option_inst *opt_inst;
1665 	int err;
1666 	struct sk_buff *skb = NULL;
1667 	bool incomplete;
1668 	int i;
1669 
1670 	opt_inst = list_first_entry(sel_opt_inst_list,
1671 				    struct team_option_inst, tmp_list);
1672 
1673 start_again:
1674 	err = __send_and_alloc_skb(&skb, team, pid, send_func);
1675 	if (err)
1676 		return err;
1677 
1678 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
1679 			  TEAM_CMD_OPTIONS_GET);
1680 	if (IS_ERR(hdr))
1681 		return PTR_ERR(hdr);
1682 
1683 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1684 		goto nla_put_failure;
1685 	option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1686 	if (!option_list)
1687 		goto nla_put_failure;
1688 
1689 	i = 0;
1690 	incomplete = false;
1691 	list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
1692 		err = team_nl_fill_one_option_get(skb, team, opt_inst);
1693 		if (err) {
1694 			if (err == -EMSGSIZE) {
1695 				if (!i)
1696 					goto errout;
1697 				incomplete = true;
1698 				break;
1699 			}
1700 			goto errout;
1701 		}
1702 		i++;
1703 	}
1704 
1705 	nla_nest_end(skb, option_list);
1706 	genlmsg_end(skb, hdr);
1707 	if (incomplete)
1708 		goto start_again;
1709 
1710 send_done:
1711 	nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
1712 	if (!nlh) {
1713 		err = __send_and_alloc_skb(&skb, team, pid, send_func);
1714 		if (err)
1715 			goto errout;
1716 		goto send_done;
1717 	}
1718 
1719 	return send_func(skb, team, pid);
1720 
1721 nla_put_failure:
1722 	err = -EMSGSIZE;
1723 errout:
1724 	genlmsg_cancel(skb, hdr);
1725 	nlmsg_free(skb);
1726 	return err;
1727 }
1728 
1729 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1730 {
1731 	struct team *team;
1732 	struct team_option_inst *opt_inst;
1733 	int err;
1734 	LIST_HEAD(sel_opt_inst_list);
1735 
1736 	team = team_nl_team_get(info);
1737 	if (!team)
1738 		return -EINVAL;
1739 
1740 	list_for_each_entry(opt_inst, &team->option_inst_list, list)
1741 		list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1742 	err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
1743 				       NLM_F_ACK, team_nl_send_unicast,
1744 				       &sel_opt_inst_list);
1745 
1746 	team_nl_team_put(team);
1747 
1748 	return err;
1749 }
1750 
1751 static int team_nl_send_event_options_get(struct team *team,
1752 					  struct list_head *sel_opt_inst_list);
1753 
1754 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1755 {
1756 	struct team *team;
1757 	int err = 0;
1758 	int i;
1759 	struct nlattr *nl_option;
1760 	LIST_HEAD(opt_inst_list);
1761 
1762 	team = team_nl_team_get(info);
1763 	if (!team)
1764 		return -EINVAL;
1765 
1766 	err = -EINVAL;
1767 	if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1768 		err = -EINVAL;
1769 		goto team_put;
1770 	}
1771 
1772 	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1773 		struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1774 		struct nlattr *attr;
1775 		struct nlattr *attr_data;
1776 		enum team_option_type opt_type;
1777 		int opt_port_ifindex = 0; /* != 0 for per-port options */
1778 		u32 opt_array_index = 0;
1779 		bool opt_is_array = false;
1780 		struct team_option_inst *opt_inst;
1781 		char *opt_name;
1782 		bool opt_found = false;
1783 
1784 		if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1785 			err = -EINVAL;
1786 			goto team_put;
1787 		}
1788 		err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1789 				       nl_option, team_nl_option_policy);
1790 		if (err)
1791 			goto team_put;
1792 		if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1793 		    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1794 			err = -EINVAL;
1795 			goto team_put;
1796 		}
1797 		switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1798 		case NLA_U32:
1799 			opt_type = TEAM_OPTION_TYPE_U32;
1800 			break;
1801 		case NLA_STRING:
1802 			opt_type = TEAM_OPTION_TYPE_STRING;
1803 			break;
1804 		case NLA_BINARY:
1805 			opt_type = TEAM_OPTION_TYPE_BINARY;
1806 			break;
1807 		case NLA_FLAG:
1808 			opt_type = TEAM_OPTION_TYPE_BOOL;
1809 			break;
1810 		default:
1811 			goto team_put;
1812 		}
1813 
1814 		attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1815 		if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1816 			err = -EINVAL;
1817 			goto team_put;
1818 		}
1819 
1820 		opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1821 		attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1822 		if (attr)
1823 			opt_port_ifindex = nla_get_u32(attr);
1824 
1825 		attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1826 		if (attr) {
1827 			opt_is_array = true;
1828 			opt_array_index = nla_get_u32(attr);
1829 		}
1830 
1831 		list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1832 			struct team_option *option = opt_inst->option;
1833 			struct team_gsetter_ctx ctx;
1834 			struct team_option_inst_info *opt_inst_info;
1835 			int tmp_ifindex;
1836 
1837 			opt_inst_info = &opt_inst->info;
1838 			tmp_ifindex = opt_inst_info->port ?
1839 				      opt_inst_info->port->dev->ifindex : 0;
1840 			if (option->type != opt_type ||
1841 			    strcmp(option->name, opt_name) ||
1842 			    tmp_ifindex != opt_port_ifindex ||
1843 			    (option->array_size && !opt_is_array) ||
1844 			    opt_inst_info->array_index != opt_array_index)
1845 				continue;
1846 			opt_found = true;
1847 			ctx.info = opt_inst_info;
1848 			switch (opt_type) {
1849 			case TEAM_OPTION_TYPE_U32:
1850 				ctx.data.u32_val = nla_get_u32(attr_data);
1851 				break;
1852 			case TEAM_OPTION_TYPE_STRING:
1853 				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1854 					err = -EINVAL;
1855 					goto team_put;
1856 				}
1857 				ctx.data.str_val = nla_data(attr_data);
1858 				break;
1859 			case TEAM_OPTION_TYPE_BINARY:
1860 				ctx.data.bin_val.len = nla_len(attr_data);
1861 				ctx.data.bin_val.ptr = nla_data(attr_data);
1862 				break;
1863 			case TEAM_OPTION_TYPE_BOOL:
1864 				ctx.data.bool_val = attr_data ? true : false;
1865 				break;
1866 			default:
1867 				BUG();
1868 			}
1869 			err = team_option_set(team, opt_inst, &ctx);
1870 			if (err)
1871 				goto team_put;
1872 			opt_inst->changed = true;
1873 			list_add(&opt_inst->tmp_list, &opt_inst_list);
1874 		}
1875 		if (!opt_found) {
1876 			err = -ENOENT;
1877 			goto team_put;
1878 		}
1879 	}
1880 
1881 	err = team_nl_send_event_options_get(team, &opt_inst_list);
1882 
1883 team_put:
1884 	team_nl_team_put(team);
1885 
1886 	return err;
1887 }
1888 
1889 static int team_nl_fill_port_list_get(struct sk_buff *skb,
1890 				      u32 pid, u32 seq, int flags,
1891 				      struct team *team,
1892 				      bool fillall)
1893 {
1894 	struct nlattr *port_list;
1895 	void *hdr;
1896 	struct team_port *port;
1897 
1898 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1899 			  TEAM_CMD_PORT_LIST_GET);
1900 	if (IS_ERR(hdr))
1901 		return PTR_ERR(hdr);
1902 
1903 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1904 		goto nla_put_failure;
1905 	port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1906 	if (!port_list)
1907 		goto nla_put_failure;
1908 
1909 	list_for_each_entry(port, &team->port_list, list) {
1910 		struct nlattr *port_item;
1911 
1912 		/* Include only changed ports if fill all mode is not on */
1913 		if (!fillall && !port->changed)
1914 			continue;
1915 		port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1916 		if (!port_item)
1917 			goto nla_put_failure;
1918 		if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1919 			goto nla_put_failure;
1920 		if (port->changed) {
1921 			if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1922 				goto nla_put_failure;
1923 			port->changed = false;
1924 		}
1925 		if ((port->removed &&
1926 		     nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1927 		    (port->state.linkup &&
1928 		     nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1929 		    nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1930 		    nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1931 			goto nla_put_failure;
1932 		nla_nest_end(skb, port_item);
1933 	}
1934 
1935 	nla_nest_end(skb, port_list);
1936 	return genlmsg_end(skb, hdr);
1937 
1938 nla_put_failure:
1939 	genlmsg_cancel(skb, hdr);
1940 	return -EMSGSIZE;
1941 }
1942 
1943 static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1944 					  struct genl_info *info, int flags,
1945 					  struct team *team)
1946 {
1947 	return team_nl_fill_port_list_get(skb, info->snd_pid,
1948 					  info->snd_seq, NLM_F_ACK,
1949 					  team, true);
1950 }
1951 
1952 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1953 				     struct genl_info *info)
1954 {
1955 	struct team *team;
1956 	int err;
1957 
1958 	team = team_nl_team_get(info);
1959 	if (!team)
1960 		return -EINVAL;
1961 
1962 	err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1963 
1964 	team_nl_team_put(team);
1965 
1966 	return err;
1967 }
1968 
1969 static struct genl_ops team_nl_ops[] = {
1970 	{
1971 		.cmd = TEAM_CMD_NOOP,
1972 		.doit = team_nl_cmd_noop,
1973 		.policy = team_nl_policy,
1974 	},
1975 	{
1976 		.cmd = TEAM_CMD_OPTIONS_SET,
1977 		.doit = team_nl_cmd_options_set,
1978 		.policy = team_nl_policy,
1979 		.flags = GENL_ADMIN_PERM,
1980 	},
1981 	{
1982 		.cmd = TEAM_CMD_OPTIONS_GET,
1983 		.doit = team_nl_cmd_options_get,
1984 		.policy = team_nl_policy,
1985 		.flags = GENL_ADMIN_PERM,
1986 	},
1987 	{
1988 		.cmd = TEAM_CMD_PORT_LIST_GET,
1989 		.doit = team_nl_cmd_port_list_get,
1990 		.policy = team_nl_policy,
1991 		.flags = GENL_ADMIN_PERM,
1992 	},
1993 };
1994 
1995 static struct genl_multicast_group team_change_event_mcgrp = {
1996 	.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1997 };
1998 
1999 static int team_nl_send_multicast(struct sk_buff *skb,
2000 				  struct team *team, u32 pid)
2001 {
2002 	return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
2003 				       team_change_event_mcgrp.id, GFP_KERNEL);
2004 }
2005 
2006 static int team_nl_send_event_options_get(struct team *team,
2007 					  struct list_head *sel_opt_inst_list)
2008 {
2009 	return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2010 					sel_opt_inst_list);
2011 }
2012 
2013 static int team_nl_send_event_port_list_get(struct team *team)
2014 {
2015 	struct sk_buff *skb;
2016 	int err;
2017 	struct net *net = dev_net(team->dev);
2018 
2019 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2020 	if (!skb)
2021 		return -ENOMEM;
2022 
2023 	err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
2024 	if (err < 0)
2025 		goto err_fill;
2026 
2027 	err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
2028 				      GFP_KERNEL);
2029 	return err;
2030 
2031 err_fill:
2032 	nlmsg_free(skb);
2033 	return err;
2034 }
2035 
2036 static int team_nl_init(void)
2037 {
2038 	int err;
2039 
2040 	err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2041 					    ARRAY_SIZE(team_nl_ops));
2042 	if (err)
2043 		return err;
2044 
2045 	err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2046 	if (err)
2047 		goto err_change_event_grp_reg;
2048 
2049 	return 0;
2050 
2051 err_change_event_grp_reg:
2052 	genl_unregister_family(&team_nl_family);
2053 
2054 	return err;
2055 }
2056 
2057 static void team_nl_fini(void)
2058 {
2059 	genl_unregister_family(&team_nl_family);
2060 }
2061 
2062 
2063 /******************
2064  * Change checkers
2065  ******************/
2066 
2067 static void __team_options_change_check(struct team *team)
2068 {
2069 	int err;
2070 	struct team_option_inst *opt_inst;
2071 	LIST_HEAD(sel_opt_inst_list);
2072 
2073 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2074 		if (opt_inst->changed)
2075 			list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2076 	}
2077 	err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2078 	if (err)
2079 		netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2080 			    err);
2081 }
2082 
2083 /* rtnl lock is held */
2084 static void __team_port_change_check(struct team_port *port, bool linkup)
2085 {
2086 	int err;
2087 
2088 	if (!port->removed && port->state.linkup == linkup)
2089 		return;
2090 
2091 	port->changed = true;
2092 	port->state.linkup = linkup;
2093 	team_refresh_port_linkup(port);
2094 	if (linkup) {
2095 		struct ethtool_cmd ecmd;
2096 
2097 		err = __ethtool_get_settings(port->dev, &ecmd);
2098 		if (!err) {
2099 			port->state.speed = ethtool_cmd_speed(&ecmd);
2100 			port->state.duplex = ecmd.duplex;
2101 			goto send_event;
2102 		}
2103 	}
2104 	port->state.speed = 0;
2105 	port->state.duplex = 0;
2106 
2107 send_event:
2108 	err = team_nl_send_event_port_list_get(port->team);
2109 	if (err)
2110 		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
2111 			    port->dev->name);
2112 
2113 }
2114 
2115 static void team_port_change_check(struct team_port *port, bool linkup)
2116 {
2117 	struct team *team = port->team;
2118 
2119 	mutex_lock(&team->lock);
2120 	__team_port_change_check(port, linkup);
2121 	mutex_unlock(&team->lock);
2122 }
2123 
2124 
2125 /************************************
2126  * Net device notifier event handler
2127  ************************************/
2128 
2129 static int team_device_event(struct notifier_block *unused,
2130 			     unsigned long event, void *ptr)
2131 {
2132 	struct net_device *dev = (struct net_device *) ptr;
2133 	struct team_port *port;
2134 
2135 	port = team_port_get_rtnl(dev);
2136 	if (!port)
2137 		return NOTIFY_DONE;
2138 
2139 	switch (event) {
2140 	case NETDEV_UP:
2141 		if (netif_carrier_ok(dev))
2142 			team_port_change_check(port, true);
2143 	case NETDEV_DOWN:
2144 		team_port_change_check(port, false);
2145 	case NETDEV_CHANGE:
2146 		if (netif_running(port->dev))
2147 			team_port_change_check(port,
2148 					       !!netif_carrier_ok(port->dev));
2149 		break;
2150 	case NETDEV_UNREGISTER:
2151 		team_del_slave(port->team->dev, dev);
2152 		break;
2153 	case NETDEV_FEAT_CHANGE:
2154 		team_compute_features(port->team);
2155 		break;
2156 	case NETDEV_CHANGEMTU:
2157 		/* Forbid to change mtu of underlaying device */
2158 		return NOTIFY_BAD;
2159 	case NETDEV_PRE_TYPE_CHANGE:
2160 		/* Forbid to change type of underlaying device */
2161 		return NOTIFY_BAD;
2162 	}
2163 	return NOTIFY_DONE;
2164 }
2165 
2166 static struct notifier_block team_notifier_block __read_mostly = {
2167 	.notifier_call = team_device_event,
2168 };
2169 
2170 
2171 /***********************
2172  * Module init and exit
2173  ***********************/
2174 
2175 static int __init team_module_init(void)
2176 {
2177 	int err;
2178 
2179 	register_netdevice_notifier(&team_notifier_block);
2180 
2181 	err = rtnl_link_register(&team_link_ops);
2182 	if (err)
2183 		goto err_rtnl_reg;
2184 
2185 	err = team_nl_init();
2186 	if (err)
2187 		goto err_nl_init;
2188 
2189 	return 0;
2190 
2191 err_nl_init:
2192 	rtnl_link_unregister(&team_link_ops);
2193 
2194 err_rtnl_reg:
2195 	unregister_netdevice_notifier(&team_notifier_block);
2196 
2197 	return err;
2198 }
2199 
2200 static void __exit team_module_exit(void)
2201 {
2202 	team_nl_fini();
2203 	rtnl_link_unregister(&team_link_ops);
2204 	unregister_netdevice_notifier(&team_notifier_block);
2205 }
2206 
2207 module_init(team_module_init);
2208 module_exit(team_module_exit);
2209 
2210 MODULE_LICENSE("GPL v2");
2211 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2212 MODULE_DESCRIPTION("Ethernet team device driver");
2213 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2214