xref: /openbmc/linux/drivers/net/team/team.c (revision f88725ff)
1 /*
2  * drivers/net/team/team.c - Network team device driver
3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_arp.h>
23 #include <linux/socket.h>
24 #include <linux/etherdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <net/rtnetlink.h>
27 #include <net/genetlink.h>
28 #include <net/netlink.h>
29 #include <linux/if_team.h>
30 
31 #define DRV_NAME "team"
32 
33 
34 /**********
35  * Helpers
36  **********/
37 
38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
39 
40 static struct team_port *team_port_get_rcu(const struct net_device *dev)
41 {
42 	struct team_port *port = rcu_dereference(dev->rx_handler_data);
43 
44 	return team_port_exists(dev) ? port : NULL;
45 }
46 
47 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
48 {
49 	struct team_port *port = rtnl_dereference(dev->rx_handler_data);
50 
51 	return team_port_exists(dev) ? port : NULL;
52 }
53 
54 /*
55  * Since the ability to change mac address for open port device is tested in
56  * team_port_add, this function can be called without control of return value
57  */
58 static int __set_port_mac(struct net_device *port_dev,
59 			  const unsigned char *dev_addr)
60 {
61 	struct sockaddr addr;
62 
63 	memcpy(addr.sa_data, dev_addr, ETH_ALEN);
64 	addr.sa_family = ARPHRD_ETHER;
65 	return dev_set_mac_address(port_dev, &addr);
66 }
67 
68 static int team_port_set_orig_mac(struct team_port *port)
69 {
70 	return __set_port_mac(port->dev, port->orig.dev_addr);
71 }
72 
73 int team_port_set_team_mac(struct team_port *port)
74 {
75 	return __set_port_mac(port->dev, port->team->dev->dev_addr);
76 }
77 EXPORT_SYMBOL(team_port_set_team_mac);
78 
79 static void team_refresh_port_linkup(struct team_port *port)
80 {
81 	port->linkup = port->user.linkup_enabled ? port->user.linkup :
82 						   port->state.linkup;
83 }
84 
85 
86 /*******************
87  * Options handling
88  *******************/
89 
90 struct team_option_inst { /* One for each option instance */
91 	struct list_head list;
92 	struct list_head tmp_list;
93 	struct team_option *option;
94 	struct team_option_inst_info info;
95 	bool changed;
96 	bool removed;
97 };
98 
99 static struct team_option *__team_find_option(struct team *team,
100 					      const char *opt_name)
101 {
102 	struct team_option *option;
103 
104 	list_for_each_entry(option, &team->option_list, list) {
105 		if (strcmp(option->name, opt_name) == 0)
106 			return option;
107 	}
108 	return NULL;
109 }
110 
111 static void __team_option_inst_del(struct team_option_inst *opt_inst)
112 {
113 	list_del(&opt_inst->list);
114 	kfree(opt_inst);
115 }
116 
117 static void __team_option_inst_del_option(struct team *team,
118 					  struct team_option *option)
119 {
120 	struct team_option_inst *opt_inst, *tmp;
121 
122 	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
123 		if (opt_inst->option == option)
124 			__team_option_inst_del(opt_inst);
125 	}
126 }
127 
128 static int __team_option_inst_add(struct team *team, struct team_option *option,
129 				  struct team_port *port)
130 {
131 	struct team_option_inst *opt_inst;
132 	unsigned int array_size;
133 	unsigned int i;
134 	int err;
135 
136 	array_size = option->array_size;
137 	if (!array_size)
138 		array_size = 1; /* No array but still need one instance */
139 
140 	for (i = 0; i < array_size; i++) {
141 		opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
142 		if (!opt_inst)
143 			return -ENOMEM;
144 		opt_inst->option = option;
145 		opt_inst->info.port = port;
146 		opt_inst->info.array_index = i;
147 		opt_inst->changed = true;
148 		opt_inst->removed = false;
149 		list_add_tail(&opt_inst->list, &team->option_inst_list);
150 		if (option->init) {
151 			err = option->init(team, &opt_inst->info);
152 			if (err)
153 				return err;
154 		}
155 
156 	}
157 	return 0;
158 }
159 
160 static int __team_option_inst_add_option(struct team *team,
161 					 struct team_option *option)
162 {
163 	struct team_port *port;
164 	int err;
165 
166 	if (!option->per_port) {
167 		err = __team_option_inst_add(team, option, NULL);
168 		if (err)
169 			goto inst_del_option;
170 	}
171 
172 	list_for_each_entry(port, &team->port_list, list) {
173 		err = __team_option_inst_add(team, option, port);
174 		if (err)
175 			goto inst_del_option;
176 	}
177 	return 0;
178 
179 inst_del_option:
180 	__team_option_inst_del_option(team, option);
181 	return err;
182 }
183 
184 static void __team_option_inst_mark_removed_option(struct team *team,
185 						   struct team_option *option)
186 {
187 	struct team_option_inst *opt_inst;
188 
189 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
190 		if (opt_inst->option == option) {
191 			opt_inst->changed = true;
192 			opt_inst->removed = true;
193 		}
194 	}
195 }
196 
197 static void __team_option_inst_del_port(struct team *team,
198 					struct team_port *port)
199 {
200 	struct team_option_inst *opt_inst, *tmp;
201 
202 	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
203 		if (opt_inst->option->per_port &&
204 		    opt_inst->info.port == port)
205 			__team_option_inst_del(opt_inst);
206 	}
207 }
208 
209 static int __team_option_inst_add_port(struct team *team,
210 				       struct team_port *port)
211 {
212 	struct team_option *option;
213 	int err;
214 
215 	list_for_each_entry(option, &team->option_list, list) {
216 		if (!option->per_port)
217 			continue;
218 		err = __team_option_inst_add(team, option, port);
219 		if (err)
220 			goto inst_del_port;
221 	}
222 	return 0;
223 
224 inst_del_port:
225 	__team_option_inst_del_port(team, port);
226 	return err;
227 }
228 
229 static void __team_option_inst_mark_removed_port(struct team *team,
230 						 struct team_port *port)
231 {
232 	struct team_option_inst *opt_inst;
233 
234 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
235 		if (opt_inst->info.port == port) {
236 			opt_inst->changed = true;
237 			opt_inst->removed = true;
238 		}
239 	}
240 }
241 
242 static int __team_options_register(struct team *team,
243 				   const struct team_option *option,
244 				   size_t option_count)
245 {
246 	int i;
247 	struct team_option **dst_opts;
248 	int err;
249 
250 	dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
251 			   GFP_KERNEL);
252 	if (!dst_opts)
253 		return -ENOMEM;
254 	for (i = 0; i < option_count; i++, option++) {
255 		if (__team_find_option(team, option->name)) {
256 			err = -EEXIST;
257 			goto alloc_rollback;
258 		}
259 		dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
260 		if (!dst_opts[i]) {
261 			err = -ENOMEM;
262 			goto alloc_rollback;
263 		}
264 	}
265 
266 	for (i = 0; i < option_count; i++) {
267 		err = __team_option_inst_add_option(team, dst_opts[i]);
268 		if (err)
269 			goto inst_rollback;
270 		list_add_tail(&dst_opts[i]->list, &team->option_list);
271 	}
272 
273 	kfree(dst_opts);
274 	return 0;
275 
276 inst_rollback:
277 	for (i--; i >= 0; i--)
278 		__team_option_inst_del_option(team, dst_opts[i]);
279 
280 	i = option_count - 1;
281 alloc_rollback:
282 	for (i--; i >= 0; i--)
283 		kfree(dst_opts[i]);
284 
285 	kfree(dst_opts);
286 	return err;
287 }
288 
289 static void __team_options_mark_removed(struct team *team,
290 					const struct team_option *option,
291 					size_t option_count)
292 {
293 	int i;
294 
295 	for (i = 0; i < option_count; i++, option++) {
296 		struct team_option *del_opt;
297 
298 		del_opt = __team_find_option(team, option->name);
299 		if (del_opt)
300 			__team_option_inst_mark_removed_option(team, del_opt);
301 	}
302 }
303 
304 static void __team_options_unregister(struct team *team,
305 				      const struct team_option *option,
306 				      size_t option_count)
307 {
308 	int i;
309 
310 	for (i = 0; i < option_count; i++, option++) {
311 		struct team_option *del_opt;
312 
313 		del_opt = __team_find_option(team, option->name);
314 		if (del_opt) {
315 			__team_option_inst_del_option(team, del_opt);
316 			list_del(&del_opt->list);
317 			kfree(del_opt);
318 		}
319 	}
320 }
321 
322 static void __team_options_change_check(struct team *team);
323 static void __team_option_inst_change(struct team *team,
324 				      struct team_option_inst *opt_inst);
325 
326 int team_options_register(struct team *team,
327 			  const struct team_option *option,
328 			  size_t option_count)
329 {
330 	int err;
331 
332 	err = __team_options_register(team, option, option_count);
333 	if (err)
334 		return err;
335 	__team_options_change_check(team);
336 	return 0;
337 }
338 EXPORT_SYMBOL(team_options_register);
339 
340 void team_options_unregister(struct team *team,
341 			     const struct team_option *option,
342 			     size_t option_count)
343 {
344 	__team_options_mark_removed(team, option, option_count);
345 	__team_options_change_check(team);
346 	__team_options_unregister(team, option, option_count);
347 }
348 EXPORT_SYMBOL(team_options_unregister);
349 
350 static int team_option_port_add(struct team *team, struct team_port *port)
351 {
352 	int err;
353 
354 	err = __team_option_inst_add_port(team, port);
355 	if (err)
356 		return err;
357 	__team_options_change_check(team);
358 	return 0;
359 }
360 
361 static void team_option_port_del(struct team *team, struct team_port *port)
362 {
363 	__team_option_inst_mark_removed_port(team, port);
364 	__team_options_change_check(team);
365 	__team_option_inst_del_port(team, port);
366 }
367 
368 static int team_option_get(struct team *team,
369 			   struct team_option_inst *opt_inst,
370 			   struct team_gsetter_ctx *ctx)
371 {
372 	if (!opt_inst->option->getter)
373 		return -EOPNOTSUPP;
374 	return opt_inst->option->getter(team, ctx);
375 }
376 
377 static int team_option_set(struct team *team,
378 			   struct team_option_inst *opt_inst,
379 			   struct team_gsetter_ctx *ctx)
380 {
381 	int err;
382 
383 	if (!opt_inst->option->setter)
384 		return -EOPNOTSUPP;
385 	err = opt_inst->option->setter(team, ctx);
386 	if (err)
387 		return err;
388 
389 	__team_option_inst_change(team, opt_inst);
390 	return err;
391 }
392 
393 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
394 {
395 	struct team_option_inst *opt_inst;
396 
397 	opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
398 	opt_inst->changed = true;
399 }
400 EXPORT_SYMBOL(team_option_inst_set_change);
401 
402 void team_options_change_check(struct team *team)
403 {
404 	__team_options_change_check(team);
405 }
406 EXPORT_SYMBOL(team_options_change_check);
407 
408 
409 /****************
410  * Mode handling
411  ****************/
412 
413 static LIST_HEAD(mode_list);
414 static DEFINE_SPINLOCK(mode_list_lock);
415 
416 struct team_mode_item {
417 	struct list_head list;
418 	const struct team_mode *mode;
419 };
420 
421 static struct team_mode_item *__find_mode(const char *kind)
422 {
423 	struct team_mode_item *mitem;
424 
425 	list_for_each_entry(mitem, &mode_list, list) {
426 		if (strcmp(mitem->mode->kind, kind) == 0)
427 			return mitem;
428 	}
429 	return NULL;
430 }
431 
432 static bool is_good_mode_name(const char *name)
433 {
434 	while (*name != '\0') {
435 		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
436 			return false;
437 		name++;
438 	}
439 	return true;
440 }
441 
442 int team_mode_register(const struct team_mode *mode)
443 {
444 	int err = 0;
445 	struct team_mode_item *mitem;
446 
447 	if (!is_good_mode_name(mode->kind) ||
448 	    mode->priv_size > TEAM_MODE_PRIV_SIZE)
449 		return -EINVAL;
450 
451 	mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
452 	if (!mitem)
453 		return -ENOMEM;
454 
455 	spin_lock(&mode_list_lock);
456 	if (__find_mode(mode->kind)) {
457 		err = -EEXIST;
458 		kfree(mitem);
459 		goto unlock;
460 	}
461 	mitem->mode = mode;
462 	list_add_tail(&mitem->list, &mode_list);
463 unlock:
464 	spin_unlock(&mode_list_lock);
465 	return err;
466 }
467 EXPORT_SYMBOL(team_mode_register);
468 
469 void team_mode_unregister(const struct team_mode *mode)
470 {
471 	struct team_mode_item *mitem;
472 
473 	spin_lock(&mode_list_lock);
474 	mitem = __find_mode(mode->kind);
475 	if (mitem) {
476 		list_del_init(&mitem->list);
477 		kfree(mitem);
478 	}
479 	spin_unlock(&mode_list_lock);
480 }
481 EXPORT_SYMBOL(team_mode_unregister);
482 
483 static const struct team_mode *team_mode_get(const char *kind)
484 {
485 	struct team_mode_item *mitem;
486 	const struct team_mode *mode = NULL;
487 
488 	spin_lock(&mode_list_lock);
489 	mitem = __find_mode(kind);
490 	if (!mitem) {
491 		spin_unlock(&mode_list_lock);
492 		request_module("team-mode-%s", kind);
493 		spin_lock(&mode_list_lock);
494 		mitem = __find_mode(kind);
495 	}
496 	if (mitem) {
497 		mode = mitem->mode;
498 		if (!try_module_get(mode->owner))
499 			mode = NULL;
500 	}
501 
502 	spin_unlock(&mode_list_lock);
503 	return mode;
504 }
505 
506 static void team_mode_put(const struct team_mode *mode)
507 {
508 	module_put(mode->owner);
509 }
510 
511 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
512 {
513 	dev_kfree_skb_any(skb);
514 	return false;
515 }
516 
517 rx_handler_result_t team_dummy_receive(struct team *team,
518 				       struct team_port *port,
519 				       struct sk_buff *skb)
520 {
521 	return RX_HANDLER_ANOTHER;
522 }
523 
524 static const struct team_mode __team_no_mode = {
525 	.kind		= "*NOMODE*",
526 };
527 
528 static bool team_is_mode_set(struct team *team)
529 {
530 	return team->mode != &__team_no_mode;
531 }
532 
533 static void team_set_no_mode(struct team *team)
534 {
535 	team->mode = &__team_no_mode;
536 }
537 
538 static void team_adjust_ops(struct team *team)
539 {
540 	/*
541 	 * To avoid checks in rx/tx skb paths, ensure here that non-null and
542 	 * correct ops are always set.
543 	 */
544 
545 	if (list_empty(&team->port_list) ||
546 	    !team_is_mode_set(team) || !team->mode->ops->transmit)
547 		team->ops.transmit = team_dummy_transmit;
548 	else
549 		team->ops.transmit = team->mode->ops->transmit;
550 
551 	if (list_empty(&team->port_list) ||
552 	    !team_is_mode_set(team) || !team->mode->ops->receive)
553 		team->ops.receive = team_dummy_receive;
554 	else
555 		team->ops.receive = team->mode->ops->receive;
556 }
557 
558 /*
559  * We can benefit from the fact that it's ensured no port is present
560  * at the time of mode change. Therefore no packets are in fly so there's no
561  * need to set mode operations in any special way.
562  */
563 static int __team_change_mode(struct team *team,
564 			      const struct team_mode *new_mode)
565 {
566 	/* Check if mode was previously set and do cleanup if so */
567 	if (team_is_mode_set(team)) {
568 		void (*exit_op)(struct team *team) = team->ops.exit;
569 
570 		/* Clear ops area so no callback is called any longer */
571 		memset(&team->ops, 0, sizeof(struct team_mode_ops));
572 		team_adjust_ops(team);
573 
574 		if (exit_op)
575 			exit_op(team);
576 		team_mode_put(team->mode);
577 		team_set_no_mode(team);
578 		/* zero private data area */
579 		memset(&team->mode_priv, 0,
580 		       sizeof(struct team) - offsetof(struct team, mode_priv));
581 	}
582 
583 	if (!new_mode)
584 		return 0;
585 
586 	if (new_mode->ops->init) {
587 		int err;
588 
589 		err = new_mode->ops->init(team);
590 		if (err)
591 			return err;
592 	}
593 
594 	team->mode = new_mode;
595 	memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
596 	team_adjust_ops(team);
597 
598 	return 0;
599 }
600 
601 static int team_change_mode(struct team *team, const char *kind)
602 {
603 	const struct team_mode *new_mode;
604 	struct net_device *dev = team->dev;
605 	int err;
606 
607 	if (!list_empty(&team->port_list)) {
608 		netdev_err(dev, "No ports can be present during mode change\n");
609 		return -EBUSY;
610 	}
611 
612 	if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
613 		netdev_err(dev, "Unable to change to the same mode the team is in\n");
614 		return -EINVAL;
615 	}
616 
617 	new_mode = team_mode_get(kind);
618 	if (!new_mode) {
619 		netdev_err(dev, "Mode \"%s\" not found\n", kind);
620 		return -EINVAL;
621 	}
622 
623 	err = __team_change_mode(team, new_mode);
624 	if (err) {
625 		netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
626 		team_mode_put(new_mode);
627 		return err;
628 	}
629 
630 	netdev_info(dev, "Mode changed to \"%s\"\n", kind);
631 	return 0;
632 }
633 
634 
635 /************************
636  * Rx path frame handler
637  ************************/
638 
639 static bool team_port_enabled(struct team_port *port);
640 
641 /* note: already called with rcu_read_lock */
642 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
643 {
644 	struct sk_buff *skb = *pskb;
645 	struct team_port *port;
646 	struct team *team;
647 	rx_handler_result_t res;
648 
649 	skb = skb_share_check(skb, GFP_ATOMIC);
650 	if (!skb)
651 		return RX_HANDLER_CONSUMED;
652 
653 	*pskb = skb;
654 
655 	port = team_port_get_rcu(skb->dev);
656 	team = port->team;
657 	if (!team_port_enabled(port)) {
658 		/* allow exact match delivery for disabled ports */
659 		res = RX_HANDLER_EXACT;
660 	} else {
661 		res = team->ops.receive(team, port, skb);
662 	}
663 	if (res == RX_HANDLER_ANOTHER) {
664 		struct team_pcpu_stats *pcpu_stats;
665 
666 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
667 		u64_stats_update_begin(&pcpu_stats->syncp);
668 		pcpu_stats->rx_packets++;
669 		pcpu_stats->rx_bytes += skb->len;
670 		if (skb->pkt_type == PACKET_MULTICAST)
671 			pcpu_stats->rx_multicast++;
672 		u64_stats_update_end(&pcpu_stats->syncp);
673 
674 		skb->dev = team->dev;
675 	} else {
676 		this_cpu_inc(team->pcpu_stats->rx_dropped);
677 	}
678 
679 	return res;
680 }
681 
682 
683 /****************
684  * Port handling
685  ****************/
686 
687 static bool team_port_find(const struct team *team,
688 			   const struct team_port *port)
689 {
690 	struct team_port *cur;
691 
692 	list_for_each_entry(cur, &team->port_list, list)
693 		if (cur == port)
694 			return true;
695 	return false;
696 }
697 
698 static bool team_port_enabled(struct team_port *port)
699 {
700 	return port->index != -1;
701 }
702 
703 /*
704  * Enable/disable port by adding to enabled port hashlist and setting
705  * port->index (Might be racy so reader could see incorrect ifindex when
706  * processing a flying packet, but that is not a problem). Write guarded
707  * by team->lock.
708  */
709 static void team_port_enable(struct team *team,
710 			     struct team_port *port)
711 {
712 	if (team_port_enabled(port))
713 		return;
714 	port->index = team->en_port_count++;
715 	hlist_add_head_rcu(&port->hlist,
716 			   team_port_index_hash(team, port->index));
717 }
718 
719 static void __reconstruct_port_hlist(struct team *team, int rm_index)
720 {
721 	int i;
722 	struct team_port *port;
723 
724 	for (i = rm_index + 1; i < team->en_port_count; i++) {
725 		port = team_get_port_by_index(team, i);
726 		hlist_del_rcu(&port->hlist);
727 		port->index--;
728 		hlist_add_head_rcu(&port->hlist,
729 				   team_port_index_hash(team, port->index));
730 	}
731 }
732 
733 static void team_port_disable(struct team *team,
734 			      struct team_port *port)
735 {
736 	int rm_index = port->index;
737 
738 	if (!team_port_enabled(port))
739 		return;
740 	hlist_del_rcu(&port->hlist);
741 	__reconstruct_port_hlist(team, rm_index);
742 	team->en_port_count--;
743 	port->index = -1;
744 }
745 
746 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
747 			    NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
748 			    NETIF_F_HIGHDMA | NETIF_F_LRO)
749 
750 static void __team_compute_features(struct team *team)
751 {
752 	struct team_port *port;
753 	u32 vlan_features = TEAM_VLAN_FEATURES;
754 	unsigned short max_hard_header_len = ETH_HLEN;
755 
756 	list_for_each_entry(port, &team->port_list, list) {
757 		vlan_features = netdev_increment_features(vlan_features,
758 					port->dev->vlan_features,
759 					TEAM_VLAN_FEATURES);
760 
761 		if (port->dev->hard_header_len > max_hard_header_len)
762 			max_hard_header_len = port->dev->hard_header_len;
763 	}
764 
765 	team->dev->vlan_features = vlan_features;
766 	team->dev->hard_header_len = max_hard_header_len;
767 
768 	netdev_change_features(team->dev);
769 }
770 
771 static void team_compute_features(struct team *team)
772 {
773 	mutex_lock(&team->lock);
774 	__team_compute_features(team);
775 	mutex_unlock(&team->lock);
776 }
777 
778 static int team_port_enter(struct team *team, struct team_port *port)
779 {
780 	int err = 0;
781 
782 	dev_hold(team->dev);
783 	port->dev->priv_flags |= IFF_TEAM_PORT;
784 	if (team->ops.port_enter) {
785 		err = team->ops.port_enter(team, port);
786 		if (err) {
787 			netdev_err(team->dev, "Device %s failed to enter team mode\n",
788 				   port->dev->name);
789 			goto err_port_enter;
790 		}
791 	}
792 
793 	return 0;
794 
795 err_port_enter:
796 	port->dev->priv_flags &= ~IFF_TEAM_PORT;
797 	dev_put(team->dev);
798 
799 	return err;
800 }
801 
802 static void team_port_leave(struct team *team, struct team_port *port)
803 {
804 	if (team->ops.port_leave)
805 		team->ops.port_leave(team, port);
806 	port->dev->priv_flags &= ~IFF_TEAM_PORT;
807 	dev_put(team->dev);
808 }
809 
810 static void __team_port_change_check(struct team_port *port, bool linkup);
811 
812 static int team_port_add(struct team *team, struct net_device *port_dev)
813 {
814 	struct net_device *dev = team->dev;
815 	struct team_port *port;
816 	char *portname = port_dev->name;
817 	int err;
818 
819 	if (port_dev->flags & IFF_LOOPBACK ||
820 	    port_dev->type != ARPHRD_ETHER) {
821 		netdev_err(dev, "Device %s is of an unsupported type\n",
822 			   portname);
823 		return -EINVAL;
824 	}
825 
826 	if (team_port_exists(port_dev)) {
827 		netdev_err(dev, "Device %s is already a port "
828 				"of a team device\n", portname);
829 		return -EBUSY;
830 	}
831 
832 	if (port_dev->flags & IFF_UP) {
833 		netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
834 			   portname);
835 		return -EBUSY;
836 	}
837 
838 	port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
839 		       GFP_KERNEL);
840 	if (!port)
841 		return -ENOMEM;
842 
843 	port->dev = port_dev;
844 	port->team = team;
845 
846 	port->orig.mtu = port_dev->mtu;
847 	err = dev_set_mtu(port_dev, dev->mtu);
848 	if (err) {
849 		netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
850 		goto err_set_mtu;
851 	}
852 
853 	memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
854 
855 	err = team_port_enter(team, port);
856 	if (err) {
857 		netdev_err(dev, "Device %s failed to enter team mode\n",
858 			   portname);
859 		goto err_port_enter;
860 	}
861 
862 	err = dev_open(port_dev);
863 	if (err) {
864 		netdev_dbg(dev, "Device %s opening failed\n",
865 			   portname);
866 		goto err_dev_open;
867 	}
868 
869 	err = vlan_vids_add_by_dev(port_dev, dev);
870 	if (err) {
871 		netdev_err(dev, "Failed to add vlan ids to device %s\n",
872 				portname);
873 		goto err_vids_add;
874 	}
875 
876 	err = netdev_set_master(port_dev, dev);
877 	if (err) {
878 		netdev_err(dev, "Device %s failed to set master\n", portname);
879 		goto err_set_master;
880 	}
881 
882 	err = netdev_rx_handler_register(port_dev, team_handle_frame,
883 					 port);
884 	if (err) {
885 		netdev_err(dev, "Device %s failed to register rx_handler\n",
886 			   portname);
887 		goto err_handler_register;
888 	}
889 
890 	err = team_option_port_add(team, port);
891 	if (err) {
892 		netdev_err(dev, "Device %s failed to add per-port options\n",
893 			   portname);
894 		goto err_option_port_add;
895 	}
896 
897 	port->index = -1;
898 	team_port_enable(team, port);
899 	list_add_tail_rcu(&port->list, &team->port_list);
900 	team_adjust_ops(team);
901 	__team_compute_features(team);
902 	__team_port_change_check(port, !!netif_carrier_ok(port_dev));
903 
904 	netdev_info(dev, "Port device %s added\n", portname);
905 
906 	return 0;
907 
908 err_option_port_add:
909 	netdev_rx_handler_unregister(port_dev);
910 
911 err_handler_register:
912 	netdev_set_master(port_dev, NULL);
913 
914 err_set_master:
915 	vlan_vids_del_by_dev(port_dev, dev);
916 
917 err_vids_add:
918 	dev_close(port_dev);
919 
920 err_dev_open:
921 	team_port_leave(team, port);
922 	team_port_set_orig_mac(port);
923 
924 err_port_enter:
925 	dev_set_mtu(port_dev, port->orig.mtu);
926 
927 err_set_mtu:
928 	kfree(port);
929 
930 	return err;
931 }
932 
933 static int team_port_del(struct team *team, struct net_device *port_dev)
934 {
935 	struct net_device *dev = team->dev;
936 	struct team_port *port;
937 	char *portname = port_dev->name;
938 
939 	port = team_port_get_rtnl(port_dev);
940 	if (!port || !team_port_find(team, port)) {
941 		netdev_err(dev, "Device %s does not act as a port of this team\n",
942 			   portname);
943 		return -ENOENT;
944 	}
945 
946 	port->removed = true;
947 	__team_port_change_check(port, false);
948 	team_port_disable(team, port);
949 	list_del_rcu(&port->list);
950 	team_adjust_ops(team);
951 	team_option_port_del(team, port);
952 	netdev_rx_handler_unregister(port_dev);
953 	netdev_set_master(port_dev, NULL);
954 	vlan_vids_del_by_dev(port_dev, dev);
955 	dev_close(port_dev);
956 	team_port_leave(team, port);
957 	team_port_set_orig_mac(port);
958 	dev_set_mtu(port_dev, port->orig.mtu);
959 	synchronize_rcu();
960 	kfree(port);
961 	netdev_info(dev, "Port device %s removed\n", portname);
962 	__team_compute_features(team);
963 
964 	return 0;
965 }
966 
967 
968 /*****************
969  * Net device ops
970  *****************/
971 
972 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
973 {
974 	ctx->data.str_val = team->mode->kind;
975 	return 0;
976 }
977 
978 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
979 {
980 	return team_change_mode(team, ctx->data.str_val);
981 }
982 
983 static int team_port_en_option_get(struct team *team,
984 				   struct team_gsetter_ctx *ctx)
985 {
986 	struct team_port *port = ctx->info->port;
987 
988 	ctx->data.bool_val = team_port_enabled(port);
989 	return 0;
990 }
991 
992 static int team_port_en_option_set(struct team *team,
993 				   struct team_gsetter_ctx *ctx)
994 {
995 	struct team_port *port = ctx->info->port;
996 
997 	if (ctx->data.bool_val)
998 		team_port_enable(team, port);
999 	else
1000 		team_port_disable(team, port);
1001 	return 0;
1002 }
1003 
1004 static int team_user_linkup_option_get(struct team *team,
1005 				       struct team_gsetter_ctx *ctx)
1006 {
1007 	struct team_port *port = ctx->info->port;
1008 
1009 	ctx->data.bool_val = port->user.linkup;
1010 	return 0;
1011 }
1012 
1013 static int team_user_linkup_option_set(struct team *team,
1014 				       struct team_gsetter_ctx *ctx)
1015 {
1016 	struct team_port *port = ctx->info->port;
1017 
1018 	port->user.linkup = ctx->data.bool_val;
1019 	team_refresh_port_linkup(port);
1020 	return 0;
1021 }
1022 
1023 static int team_user_linkup_en_option_get(struct team *team,
1024 					  struct team_gsetter_ctx *ctx)
1025 {
1026 	struct team_port *port = ctx->info->port;
1027 
1028 	ctx->data.bool_val = port->user.linkup_enabled;
1029 	return 0;
1030 }
1031 
1032 static int team_user_linkup_en_option_set(struct team *team,
1033 					  struct team_gsetter_ctx *ctx)
1034 {
1035 	struct team_port *port = ctx->info->port;
1036 
1037 	port->user.linkup_enabled = ctx->data.bool_val;
1038 	team_refresh_port_linkup(port);
1039 	return 0;
1040 }
1041 
1042 static const struct team_option team_options[] = {
1043 	{
1044 		.name = "mode",
1045 		.type = TEAM_OPTION_TYPE_STRING,
1046 		.getter = team_mode_option_get,
1047 		.setter = team_mode_option_set,
1048 	},
1049 	{
1050 		.name = "enabled",
1051 		.type = TEAM_OPTION_TYPE_BOOL,
1052 		.per_port = true,
1053 		.getter = team_port_en_option_get,
1054 		.setter = team_port_en_option_set,
1055 	},
1056 	{
1057 		.name = "user_linkup",
1058 		.type = TEAM_OPTION_TYPE_BOOL,
1059 		.per_port = true,
1060 		.getter = team_user_linkup_option_get,
1061 		.setter = team_user_linkup_option_set,
1062 	},
1063 	{
1064 		.name = "user_linkup_enabled",
1065 		.type = TEAM_OPTION_TYPE_BOOL,
1066 		.per_port = true,
1067 		.getter = team_user_linkup_en_option_get,
1068 		.setter = team_user_linkup_en_option_set,
1069 	},
1070 };
1071 
1072 static int team_init(struct net_device *dev)
1073 {
1074 	struct team *team = netdev_priv(dev);
1075 	int i;
1076 	int err;
1077 
1078 	team->dev = dev;
1079 	mutex_init(&team->lock);
1080 	team_set_no_mode(team);
1081 
1082 	team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1083 	if (!team->pcpu_stats)
1084 		return -ENOMEM;
1085 
1086 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1087 		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1088 	INIT_LIST_HEAD(&team->port_list);
1089 
1090 	team_adjust_ops(team);
1091 
1092 	INIT_LIST_HEAD(&team->option_list);
1093 	INIT_LIST_HEAD(&team->option_inst_list);
1094 	err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1095 	if (err)
1096 		goto err_options_register;
1097 	netif_carrier_off(dev);
1098 
1099 	return 0;
1100 
1101 err_options_register:
1102 	free_percpu(team->pcpu_stats);
1103 
1104 	return err;
1105 }
1106 
1107 static void team_uninit(struct net_device *dev)
1108 {
1109 	struct team *team = netdev_priv(dev);
1110 	struct team_port *port;
1111 	struct team_port *tmp;
1112 
1113 	mutex_lock(&team->lock);
1114 	list_for_each_entry_safe(port, tmp, &team->port_list, list)
1115 		team_port_del(team, port->dev);
1116 
1117 	__team_change_mode(team, NULL); /* cleanup */
1118 	__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1119 	mutex_unlock(&team->lock);
1120 }
1121 
1122 static void team_destructor(struct net_device *dev)
1123 {
1124 	struct team *team = netdev_priv(dev);
1125 
1126 	free_percpu(team->pcpu_stats);
1127 	free_netdev(dev);
1128 }
1129 
1130 static int team_open(struct net_device *dev)
1131 {
1132 	netif_carrier_on(dev);
1133 	return 0;
1134 }
1135 
1136 static int team_close(struct net_device *dev)
1137 {
1138 	netif_carrier_off(dev);
1139 	return 0;
1140 }
1141 
1142 /*
1143  * note: already called with rcu_read_lock
1144  */
1145 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1146 {
1147 	struct team *team = netdev_priv(dev);
1148 	bool tx_success = false;
1149 	unsigned int len = skb->len;
1150 
1151 	tx_success = team->ops.transmit(team, skb);
1152 	if (tx_success) {
1153 		struct team_pcpu_stats *pcpu_stats;
1154 
1155 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1156 		u64_stats_update_begin(&pcpu_stats->syncp);
1157 		pcpu_stats->tx_packets++;
1158 		pcpu_stats->tx_bytes += len;
1159 		u64_stats_update_end(&pcpu_stats->syncp);
1160 	} else {
1161 		this_cpu_inc(team->pcpu_stats->tx_dropped);
1162 	}
1163 
1164 	return NETDEV_TX_OK;
1165 }
1166 
1167 static void team_change_rx_flags(struct net_device *dev, int change)
1168 {
1169 	struct team *team = netdev_priv(dev);
1170 	struct team_port *port;
1171 	int inc;
1172 
1173 	rcu_read_lock();
1174 	list_for_each_entry_rcu(port, &team->port_list, list) {
1175 		if (change & IFF_PROMISC) {
1176 			inc = dev->flags & IFF_PROMISC ? 1 : -1;
1177 			dev_set_promiscuity(port->dev, inc);
1178 		}
1179 		if (change & IFF_ALLMULTI) {
1180 			inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1181 			dev_set_allmulti(port->dev, inc);
1182 		}
1183 	}
1184 	rcu_read_unlock();
1185 }
1186 
1187 static void team_set_rx_mode(struct net_device *dev)
1188 {
1189 	struct team *team = netdev_priv(dev);
1190 	struct team_port *port;
1191 
1192 	rcu_read_lock();
1193 	list_for_each_entry_rcu(port, &team->port_list, list) {
1194 		dev_uc_sync(port->dev, dev);
1195 		dev_mc_sync(port->dev, dev);
1196 	}
1197 	rcu_read_unlock();
1198 }
1199 
1200 static int team_set_mac_address(struct net_device *dev, void *p)
1201 {
1202 	struct team *team = netdev_priv(dev);
1203 	struct team_port *port;
1204 	struct sockaddr *addr = p;
1205 
1206 	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1207 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1208 	rcu_read_lock();
1209 	list_for_each_entry_rcu(port, &team->port_list, list)
1210 		if (team->ops.port_change_mac)
1211 			team->ops.port_change_mac(team, port);
1212 	rcu_read_unlock();
1213 	return 0;
1214 }
1215 
1216 static int team_change_mtu(struct net_device *dev, int new_mtu)
1217 {
1218 	struct team *team = netdev_priv(dev);
1219 	struct team_port *port;
1220 	int err;
1221 
1222 	/*
1223 	 * Alhough this is reader, it's guarded by team lock. It's not possible
1224 	 * to traverse list in reverse under rcu_read_lock
1225 	 */
1226 	mutex_lock(&team->lock);
1227 	list_for_each_entry(port, &team->port_list, list) {
1228 		err = dev_set_mtu(port->dev, new_mtu);
1229 		if (err) {
1230 			netdev_err(dev, "Device %s failed to change mtu",
1231 				   port->dev->name);
1232 			goto unwind;
1233 		}
1234 	}
1235 	mutex_unlock(&team->lock);
1236 
1237 	dev->mtu = new_mtu;
1238 
1239 	return 0;
1240 
1241 unwind:
1242 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
1243 		dev_set_mtu(port->dev, dev->mtu);
1244 	mutex_unlock(&team->lock);
1245 
1246 	return err;
1247 }
1248 
1249 static struct rtnl_link_stats64 *
1250 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1251 {
1252 	struct team *team = netdev_priv(dev);
1253 	struct team_pcpu_stats *p;
1254 	u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1255 	u32 rx_dropped = 0, tx_dropped = 0;
1256 	unsigned int start;
1257 	int i;
1258 
1259 	for_each_possible_cpu(i) {
1260 		p = per_cpu_ptr(team->pcpu_stats, i);
1261 		do {
1262 			start = u64_stats_fetch_begin_bh(&p->syncp);
1263 			rx_packets	= p->rx_packets;
1264 			rx_bytes	= p->rx_bytes;
1265 			rx_multicast	= p->rx_multicast;
1266 			tx_packets	= p->tx_packets;
1267 			tx_bytes	= p->tx_bytes;
1268 		} while (u64_stats_fetch_retry_bh(&p->syncp, start));
1269 
1270 		stats->rx_packets	+= rx_packets;
1271 		stats->rx_bytes		+= rx_bytes;
1272 		stats->multicast	+= rx_multicast;
1273 		stats->tx_packets	+= tx_packets;
1274 		stats->tx_bytes		+= tx_bytes;
1275 		/*
1276 		 * rx_dropped & tx_dropped are u32, updated
1277 		 * without syncp protection.
1278 		 */
1279 		rx_dropped	+= p->rx_dropped;
1280 		tx_dropped	+= p->tx_dropped;
1281 	}
1282 	stats->rx_dropped	= rx_dropped;
1283 	stats->tx_dropped	= tx_dropped;
1284 	return stats;
1285 }
1286 
1287 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1288 {
1289 	struct team *team = netdev_priv(dev);
1290 	struct team_port *port;
1291 	int err;
1292 
1293 	/*
1294 	 * Alhough this is reader, it's guarded by team lock. It's not possible
1295 	 * to traverse list in reverse under rcu_read_lock
1296 	 */
1297 	mutex_lock(&team->lock);
1298 	list_for_each_entry(port, &team->port_list, list) {
1299 		err = vlan_vid_add(port->dev, vid);
1300 		if (err)
1301 			goto unwind;
1302 	}
1303 	mutex_unlock(&team->lock);
1304 
1305 	return 0;
1306 
1307 unwind:
1308 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
1309 		vlan_vid_del(port->dev, vid);
1310 	mutex_unlock(&team->lock);
1311 
1312 	return err;
1313 }
1314 
1315 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1316 {
1317 	struct team *team = netdev_priv(dev);
1318 	struct team_port *port;
1319 
1320 	rcu_read_lock();
1321 	list_for_each_entry_rcu(port, &team->port_list, list)
1322 		vlan_vid_del(port->dev, vid);
1323 	rcu_read_unlock();
1324 
1325 	return 0;
1326 }
1327 
1328 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1329 {
1330 	struct team *team = netdev_priv(dev);
1331 	int err;
1332 
1333 	mutex_lock(&team->lock);
1334 	err = team_port_add(team, port_dev);
1335 	mutex_unlock(&team->lock);
1336 	return err;
1337 }
1338 
1339 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1340 {
1341 	struct team *team = netdev_priv(dev);
1342 	int err;
1343 
1344 	mutex_lock(&team->lock);
1345 	err = team_port_del(team, port_dev);
1346 	mutex_unlock(&team->lock);
1347 	return err;
1348 }
1349 
1350 static netdev_features_t team_fix_features(struct net_device *dev,
1351 					   netdev_features_t features)
1352 {
1353 	struct team_port *port;
1354 	struct team *team = netdev_priv(dev);
1355 	netdev_features_t mask;
1356 
1357 	mask = features;
1358 	features &= ~NETIF_F_ONE_FOR_ALL;
1359 	features |= NETIF_F_ALL_FOR_ALL;
1360 
1361 	rcu_read_lock();
1362 	list_for_each_entry_rcu(port, &team->port_list, list) {
1363 		features = netdev_increment_features(features,
1364 						     port->dev->features,
1365 						     mask);
1366 	}
1367 	rcu_read_unlock();
1368 	return features;
1369 }
1370 
1371 static const struct net_device_ops team_netdev_ops = {
1372 	.ndo_init		= team_init,
1373 	.ndo_uninit		= team_uninit,
1374 	.ndo_open		= team_open,
1375 	.ndo_stop		= team_close,
1376 	.ndo_start_xmit		= team_xmit,
1377 	.ndo_change_rx_flags	= team_change_rx_flags,
1378 	.ndo_set_rx_mode	= team_set_rx_mode,
1379 	.ndo_set_mac_address	= team_set_mac_address,
1380 	.ndo_change_mtu		= team_change_mtu,
1381 	.ndo_get_stats64	= team_get_stats64,
1382 	.ndo_vlan_rx_add_vid	= team_vlan_rx_add_vid,
1383 	.ndo_vlan_rx_kill_vid	= team_vlan_rx_kill_vid,
1384 	.ndo_add_slave		= team_add_slave,
1385 	.ndo_del_slave		= team_del_slave,
1386 	.ndo_fix_features	= team_fix_features,
1387 };
1388 
1389 
1390 /***********************
1391  * rt netlink interface
1392  ***********************/
1393 
1394 static void team_setup(struct net_device *dev)
1395 {
1396 	ether_setup(dev);
1397 
1398 	dev->netdev_ops = &team_netdev_ops;
1399 	dev->destructor	= team_destructor;
1400 	dev->tx_queue_len = 0;
1401 	dev->flags |= IFF_MULTICAST;
1402 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1403 
1404 	/*
1405 	 * Indicate we support unicast address filtering. That way core won't
1406 	 * bring us to promisc mode in case a unicast addr is added.
1407 	 * Let this up to underlay drivers.
1408 	 */
1409 	dev->priv_flags |= IFF_UNICAST_FLT;
1410 
1411 	dev->features |= NETIF_F_LLTX;
1412 	dev->features |= NETIF_F_GRO;
1413 	dev->hw_features = NETIF_F_HW_VLAN_TX |
1414 			   NETIF_F_HW_VLAN_RX |
1415 			   NETIF_F_HW_VLAN_FILTER;
1416 
1417 	dev->features |= dev->hw_features;
1418 }
1419 
1420 static int team_newlink(struct net *src_net, struct net_device *dev,
1421 			struct nlattr *tb[], struct nlattr *data[])
1422 {
1423 	int err;
1424 
1425 	if (tb[IFLA_ADDRESS] == NULL)
1426 		eth_hw_addr_random(dev);
1427 
1428 	err = register_netdevice(dev);
1429 	if (err)
1430 		return err;
1431 
1432 	return 0;
1433 }
1434 
1435 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1436 {
1437 	if (tb[IFLA_ADDRESS]) {
1438 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1439 			return -EINVAL;
1440 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1441 			return -EADDRNOTAVAIL;
1442 	}
1443 	return 0;
1444 }
1445 
1446 static struct rtnl_link_ops team_link_ops __read_mostly = {
1447 	.kind		= DRV_NAME,
1448 	.priv_size	= sizeof(struct team),
1449 	.setup		= team_setup,
1450 	.newlink	= team_newlink,
1451 	.validate	= team_validate,
1452 };
1453 
1454 
1455 /***********************************
1456  * Generic netlink custom interface
1457  ***********************************/
1458 
1459 static struct genl_family team_nl_family = {
1460 	.id		= GENL_ID_GENERATE,
1461 	.name		= TEAM_GENL_NAME,
1462 	.version	= TEAM_GENL_VERSION,
1463 	.maxattr	= TEAM_ATTR_MAX,
1464 	.netnsok	= true,
1465 };
1466 
1467 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1468 	[TEAM_ATTR_UNSPEC]			= { .type = NLA_UNSPEC, },
1469 	[TEAM_ATTR_TEAM_IFINDEX]		= { .type = NLA_U32 },
1470 	[TEAM_ATTR_LIST_OPTION]			= { .type = NLA_NESTED },
1471 	[TEAM_ATTR_LIST_PORT]			= { .type = NLA_NESTED },
1472 };
1473 
1474 static const struct nla_policy
1475 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1476 	[TEAM_ATTR_OPTION_UNSPEC]		= { .type = NLA_UNSPEC, },
1477 	[TEAM_ATTR_OPTION_NAME] = {
1478 		.type = NLA_STRING,
1479 		.len = TEAM_STRING_MAX_LEN,
1480 	},
1481 	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
1482 	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
1483 	[TEAM_ATTR_OPTION_DATA]			= { .type = NLA_BINARY },
1484 };
1485 
1486 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1487 {
1488 	struct sk_buff *msg;
1489 	void *hdr;
1490 	int err;
1491 
1492 	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1493 	if (!msg)
1494 		return -ENOMEM;
1495 
1496 	hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1497 			  &team_nl_family, 0, TEAM_CMD_NOOP);
1498 	if (IS_ERR(hdr)) {
1499 		err = PTR_ERR(hdr);
1500 		goto err_msg_put;
1501 	}
1502 
1503 	genlmsg_end(msg, hdr);
1504 
1505 	return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1506 
1507 err_msg_put:
1508 	nlmsg_free(msg);
1509 
1510 	return err;
1511 }
1512 
1513 /*
1514  * Netlink cmd functions should be locked by following two functions.
1515  * Since dev gets held here, that ensures dev won't disappear in between.
1516  */
1517 static struct team *team_nl_team_get(struct genl_info *info)
1518 {
1519 	struct net *net = genl_info_net(info);
1520 	int ifindex;
1521 	struct net_device *dev;
1522 	struct team *team;
1523 
1524 	if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1525 		return NULL;
1526 
1527 	ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1528 	dev = dev_get_by_index(net, ifindex);
1529 	if (!dev || dev->netdev_ops != &team_netdev_ops) {
1530 		if (dev)
1531 			dev_put(dev);
1532 		return NULL;
1533 	}
1534 
1535 	team = netdev_priv(dev);
1536 	mutex_lock(&team->lock);
1537 	return team;
1538 }
1539 
1540 static void team_nl_team_put(struct team *team)
1541 {
1542 	mutex_unlock(&team->lock);
1543 	dev_put(team->dev);
1544 }
1545 
1546 static int team_nl_send_generic(struct genl_info *info, struct team *team,
1547 				int (*fill_func)(struct sk_buff *skb,
1548 						 struct genl_info *info,
1549 						 int flags, struct team *team))
1550 {
1551 	struct sk_buff *skb;
1552 	int err;
1553 
1554 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1555 	if (!skb)
1556 		return -ENOMEM;
1557 
1558 	err = fill_func(skb, info, NLM_F_ACK, team);
1559 	if (err < 0)
1560 		goto err_fill;
1561 
1562 	err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1563 	return err;
1564 
1565 err_fill:
1566 	nlmsg_free(skb);
1567 	return err;
1568 }
1569 
1570 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1571 				       struct team_option_inst *opt_inst)
1572 {
1573 	struct nlattr *option_item;
1574 	struct team_option *option = opt_inst->option;
1575 	struct team_option_inst_info *opt_inst_info;
1576 	struct team_gsetter_ctx ctx;
1577 	int err;
1578 
1579 	option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1580 	if (!option_item)
1581 		goto nla_put_failure;
1582 	if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1583 		goto nla_put_failure;
1584 	if (opt_inst->changed) {
1585 		if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1586 			goto nla_put_failure;
1587 		opt_inst->changed = false;
1588 	}
1589 	if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1590 		goto nla_put_failure;
1591 
1592 	opt_inst_info = &opt_inst->info;
1593 	if (opt_inst_info->port &&
1594 	    nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1595 			opt_inst_info->port->dev->ifindex))
1596 		goto nla_put_failure;
1597 	if (opt_inst->option->array_size &&
1598 	    nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1599 			opt_inst_info->array_index))
1600 		goto nla_put_failure;
1601 	ctx.info = opt_inst_info;
1602 
1603 	switch (option->type) {
1604 	case TEAM_OPTION_TYPE_U32:
1605 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1606 			goto nla_put_failure;
1607 		err = team_option_get(team, opt_inst, &ctx);
1608 		if (err)
1609 			goto errout;
1610 		if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
1611 			goto nla_put_failure;
1612 		break;
1613 	case TEAM_OPTION_TYPE_STRING:
1614 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1615 			goto nla_put_failure;
1616 		err = team_option_get(team, opt_inst, &ctx);
1617 		if (err)
1618 			goto errout;
1619 		if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1620 				   ctx.data.str_val))
1621 			goto nla_put_failure;
1622 		break;
1623 	case TEAM_OPTION_TYPE_BINARY:
1624 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1625 			goto nla_put_failure;
1626 		err = team_option_get(team, opt_inst, &ctx);
1627 		if (err)
1628 			goto errout;
1629 		if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
1630 			    ctx.data.bin_val.ptr))
1631 			goto nla_put_failure;
1632 		break;
1633 	case TEAM_OPTION_TYPE_BOOL:
1634 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1635 			goto nla_put_failure;
1636 		err = team_option_get(team, opt_inst, &ctx);
1637 		if (err)
1638 			goto errout;
1639 		if (ctx.data.bool_val &&
1640 		    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1641 			goto nla_put_failure;
1642 		break;
1643 	default:
1644 		BUG();
1645 	}
1646 	nla_nest_end(skb, option_item);
1647 	return 0;
1648 
1649 nla_put_failure:
1650 	err = -EMSGSIZE;
1651 errout:
1652 	return err;
1653 }
1654 
1655 static int team_nl_fill_options_get(struct sk_buff *skb,
1656 				    u32 pid, u32 seq, int flags,
1657 				    struct team *team,
1658 				    struct list_head *sel_opt_inst_list)
1659 {
1660 	struct nlattr *option_list;
1661 	void *hdr;
1662 	struct team_option_inst *opt_inst;
1663 	int err;
1664 
1665 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1666 			  TEAM_CMD_OPTIONS_GET);
1667 	if (IS_ERR(hdr))
1668 		return PTR_ERR(hdr);
1669 
1670 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1671 		goto nla_put_failure;
1672 	option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1673 	if (!option_list)
1674 		goto nla_put_failure;
1675 
1676 	list_for_each_entry(opt_inst, sel_opt_inst_list, tmp_list) {
1677 		err = team_nl_fill_one_option_get(skb, team, opt_inst);
1678 		if (err)
1679 			goto errout;
1680 	}
1681 
1682 	nla_nest_end(skb, option_list);
1683 	return genlmsg_end(skb, hdr);
1684 
1685 nla_put_failure:
1686 	err = -EMSGSIZE;
1687 errout:
1688 	genlmsg_cancel(skb, hdr);
1689 	return err;
1690 }
1691 
1692 static int team_nl_fill_options_get_all(struct sk_buff *skb,
1693 					struct genl_info *info, int flags,
1694 					struct team *team)
1695 {
1696 	struct team_option_inst *opt_inst;
1697 	LIST_HEAD(sel_opt_inst_list);
1698 
1699 	list_for_each_entry(opt_inst, &team->option_inst_list, list)
1700 		list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1701 	return team_nl_fill_options_get(skb, info->snd_pid,
1702 					info->snd_seq, NLM_F_ACK,
1703 					team, &sel_opt_inst_list);
1704 }
1705 
1706 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1707 {
1708 	struct team *team;
1709 	int err;
1710 
1711 	team = team_nl_team_get(info);
1712 	if (!team)
1713 		return -EINVAL;
1714 
1715 	err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
1716 
1717 	team_nl_team_put(team);
1718 
1719 	return err;
1720 }
1721 
1722 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1723 {
1724 	struct team *team;
1725 	int err = 0;
1726 	int i;
1727 	struct nlattr *nl_option;
1728 
1729 	team = team_nl_team_get(info);
1730 	if (!team)
1731 		return -EINVAL;
1732 
1733 	err = -EINVAL;
1734 	if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1735 		err = -EINVAL;
1736 		goto team_put;
1737 	}
1738 
1739 	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1740 		struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1741 		struct nlattr *attr;
1742 		struct nlattr *attr_data;
1743 		enum team_option_type opt_type;
1744 		int opt_port_ifindex = 0; /* != 0 for per-port options */
1745 		u32 opt_array_index = 0;
1746 		bool opt_is_array = false;
1747 		struct team_option_inst *opt_inst;
1748 		char *opt_name;
1749 		bool opt_found = false;
1750 
1751 		if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1752 			err = -EINVAL;
1753 			goto team_put;
1754 		}
1755 		err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1756 				       nl_option, team_nl_option_policy);
1757 		if (err)
1758 			goto team_put;
1759 		if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1760 		    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1761 			err = -EINVAL;
1762 			goto team_put;
1763 		}
1764 		switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1765 		case NLA_U32:
1766 			opt_type = TEAM_OPTION_TYPE_U32;
1767 			break;
1768 		case NLA_STRING:
1769 			opt_type = TEAM_OPTION_TYPE_STRING;
1770 			break;
1771 		case NLA_BINARY:
1772 			opt_type = TEAM_OPTION_TYPE_BINARY;
1773 			break;
1774 		case NLA_FLAG:
1775 			opt_type = TEAM_OPTION_TYPE_BOOL;
1776 			break;
1777 		default:
1778 			goto team_put;
1779 		}
1780 
1781 		attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1782 		if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1783 			err = -EINVAL;
1784 			goto team_put;
1785 		}
1786 
1787 		opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1788 		attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1789 		if (attr)
1790 			opt_port_ifindex = nla_get_u32(attr);
1791 
1792 		attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1793 		if (attr) {
1794 			opt_is_array = true;
1795 			opt_array_index = nla_get_u32(attr);
1796 		}
1797 
1798 		list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1799 			struct team_option *option = opt_inst->option;
1800 			struct team_gsetter_ctx ctx;
1801 			struct team_option_inst_info *opt_inst_info;
1802 			int tmp_ifindex;
1803 
1804 			opt_inst_info = &opt_inst->info;
1805 			tmp_ifindex = opt_inst_info->port ?
1806 				      opt_inst_info->port->dev->ifindex : 0;
1807 			if (option->type != opt_type ||
1808 			    strcmp(option->name, opt_name) ||
1809 			    tmp_ifindex != opt_port_ifindex ||
1810 			    (option->array_size && !opt_is_array) ||
1811 			    opt_inst_info->array_index != opt_array_index)
1812 				continue;
1813 			opt_found = true;
1814 			ctx.info = opt_inst_info;
1815 			switch (opt_type) {
1816 			case TEAM_OPTION_TYPE_U32:
1817 				ctx.data.u32_val = nla_get_u32(attr_data);
1818 				break;
1819 			case TEAM_OPTION_TYPE_STRING:
1820 				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1821 					err = -EINVAL;
1822 					goto team_put;
1823 				}
1824 				ctx.data.str_val = nla_data(attr_data);
1825 				break;
1826 			case TEAM_OPTION_TYPE_BINARY:
1827 				ctx.data.bin_val.len = nla_len(attr_data);
1828 				ctx.data.bin_val.ptr = nla_data(attr_data);
1829 				break;
1830 			case TEAM_OPTION_TYPE_BOOL:
1831 				ctx.data.bool_val = attr_data ? true : false;
1832 				break;
1833 			default:
1834 				BUG();
1835 			}
1836 			err = team_option_set(team, opt_inst, &ctx);
1837 			if (err)
1838 				goto team_put;
1839 		}
1840 		if (!opt_found) {
1841 			err = -ENOENT;
1842 			goto team_put;
1843 		}
1844 	}
1845 
1846 team_put:
1847 	team_nl_team_put(team);
1848 
1849 	return err;
1850 }
1851 
1852 static int team_nl_fill_port_list_get(struct sk_buff *skb,
1853 				      u32 pid, u32 seq, int flags,
1854 				      struct team *team,
1855 				      bool fillall)
1856 {
1857 	struct nlattr *port_list;
1858 	void *hdr;
1859 	struct team_port *port;
1860 
1861 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1862 			  TEAM_CMD_PORT_LIST_GET);
1863 	if (IS_ERR(hdr))
1864 		return PTR_ERR(hdr);
1865 
1866 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1867 		goto nla_put_failure;
1868 	port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1869 	if (!port_list)
1870 		goto nla_put_failure;
1871 
1872 	list_for_each_entry(port, &team->port_list, list) {
1873 		struct nlattr *port_item;
1874 
1875 		/* Include only changed ports if fill all mode is not on */
1876 		if (!fillall && !port->changed)
1877 			continue;
1878 		port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1879 		if (!port_item)
1880 			goto nla_put_failure;
1881 		if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1882 			goto nla_put_failure;
1883 		if (port->changed) {
1884 			if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1885 				goto nla_put_failure;
1886 			port->changed = false;
1887 		}
1888 		if ((port->removed &&
1889 		     nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1890 		    (port->state.linkup &&
1891 		     nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1892 		    nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1893 		    nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1894 			goto nla_put_failure;
1895 		nla_nest_end(skb, port_item);
1896 	}
1897 
1898 	nla_nest_end(skb, port_list);
1899 	return genlmsg_end(skb, hdr);
1900 
1901 nla_put_failure:
1902 	genlmsg_cancel(skb, hdr);
1903 	return -EMSGSIZE;
1904 }
1905 
1906 static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1907 					  struct genl_info *info, int flags,
1908 					  struct team *team)
1909 {
1910 	return team_nl_fill_port_list_get(skb, info->snd_pid,
1911 					  info->snd_seq, NLM_F_ACK,
1912 					  team, true);
1913 }
1914 
1915 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1916 				     struct genl_info *info)
1917 {
1918 	struct team *team;
1919 	int err;
1920 
1921 	team = team_nl_team_get(info);
1922 	if (!team)
1923 		return -EINVAL;
1924 
1925 	err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1926 
1927 	team_nl_team_put(team);
1928 
1929 	return err;
1930 }
1931 
1932 static struct genl_ops team_nl_ops[] = {
1933 	{
1934 		.cmd = TEAM_CMD_NOOP,
1935 		.doit = team_nl_cmd_noop,
1936 		.policy = team_nl_policy,
1937 	},
1938 	{
1939 		.cmd = TEAM_CMD_OPTIONS_SET,
1940 		.doit = team_nl_cmd_options_set,
1941 		.policy = team_nl_policy,
1942 		.flags = GENL_ADMIN_PERM,
1943 	},
1944 	{
1945 		.cmd = TEAM_CMD_OPTIONS_GET,
1946 		.doit = team_nl_cmd_options_get,
1947 		.policy = team_nl_policy,
1948 		.flags = GENL_ADMIN_PERM,
1949 	},
1950 	{
1951 		.cmd = TEAM_CMD_PORT_LIST_GET,
1952 		.doit = team_nl_cmd_port_list_get,
1953 		.policy = team_nl_policy,
1954 		.flags = GENL_ADMIN_PERM,
1955 	},
1956 };
1957 
1958 static struct genl_multicast_group team_change_event_mcgrp = {
1959 	.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1960 };
1961 
1962 static int team_nl_send_event_options_get(struct team *team,
1963 					  struct list_head *sel_opt_inst_list)
1964 {
1965 	struct sk_buff *skb;
1966 	int err;
1967 	struct net *net = dev_net(team->dev);
1968 
1969 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1970 	if (!skb)
1971 		return -ENOMEM;
1972 
1973 	err = team_nl_fill_options_get(skb, 0, 0, 0, team, sel_opt_inst_list);
1974 	if (err < 0)
1975 		goto err_fill;
1976 
1977 	err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1978 				      GFP_KERNEL);
1979 	return err;
1980 
1981 err_fill:
1982 	nlmsg_free(skb);
1983 	return err;
1984 }
1985 
1986 static int team_nl_send_event_port_list_get(struct team *team)
1987 {
1988 	struct sk_buff *skb;
1989 	int err;
1990 	struct net *net = dev_net(team->dev);
1991 
1992 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1993 	if (!skb)
1994 		return -ENOMEM;
1995 
1996 	err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
1997 	if (err < 0)
1998 		goto err_fill;
1999 
2000 	err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
2001 				      GFP_KERNEL);
2002 	return err;
2003 
2004 err_fill:
2005 	nlmsg_free(skb);
2006 	return err;
2007 }
2008 
2009 static int team_nl_init(void)
2010 {
2011 	int err;
2012 
2013 	err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2014 					    ARRAY_SIZE(team_nl_ops));
2015 	if (err)
2016 		return err;
2017 
2018 	err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2019 	if (err)
2020 		goto err_change_event_grp_reg;
2021 
2022 	return 0;
2023 
2024 err_change_event_grp_reg:
2025 	genl_unregister_family(&team_nl_family);
2026 
2027 	return err;
2028 }
2029 
2030 static void team_nl_fini(void)
2031 {
2032 	genl_unregister_family(&team_nl_family);
2033 }
2034 
2035 
2036 /******************
2037  * Change checkers
2038  ******************/
2039 
2040 static void __team_options_change_check(struct team *team)
2041 {
2042 	int err;
2043 	struct team_option_inst *opt_inst;
2044 	LIST_HEAD(sel_opt_inst_list);
2045 
2046 	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2047 		if (opt_inst->changed)
2048 			list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2049 	}
2050 	err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2051 	if (err)
2052 		netdev_warn(team->dev, "Failed to send options change via netlink\n");
2053 }
2054 
2055 static void __team_option_inst_change(struct team *team,
2056 				      struct team_option_inst *sel_opt_inst)
2057 {
2058 	int err;
2059 	LIST_HEAD(sel_opt_inst_list);
2060 
2061 	sel_opt_inst->changed = true;
2062 	list_add(&sel_opt_inst->tmp_list, &sel_opt_inst_list);
2063 	err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2064 	if (err)
2065 		netdev_warn(team->dev, "Failed to send option change via netlink\n");
2066 }
2067 
2068 /* rtnl lock is held */
2069 static void __team_port_change_check(struct team_port *port, bool linkup)
2070 {
2071 	int err;
2072 
2073 	if (!port->removed && port->state.linkup == linkup)
2074 		return;
2075 
2076 	port->changed = true;
2077 	port->state.linkup = linkup;
2078 	team_refresh_port_linkup(port);
2079 	if (linkup) {
2080 		struct ethtool_cmd ecmd;
2081 
2082 		err = __ethtool_get_settings(port->dev, &ecmd);
2083 		if (!err) {
2084 			port->state.speed = ethtool_cmd_speed(&ecmd);
2085 			port->state.duplex = ecmd.duplex;
2086 			goto send_event;
2087 		}
2088 	}
2089 	port->state.speed = 0;
2090 	port->state.duplex = 0;
2091 
2092 send_event:
2093 	err = team_nl_send_event_port_list_get(port->team);
2094 	if (err)
2095 		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
2096 			    port->dev->name);
2097 
2098 }
2099 
2100 static void team_port_change_check(struct team_port *port, bool linkup)
2101 {
2102 	struct team *team = port->team;
2103 
2104 	mutex_lock(&team->lock);
2105 	__team_port_change_check(port, linkup);
2106 	mutex_unlock(&team->lock);
2107 }
2108 
2109 
2110 /************************************
2111  * Net device notifier event handler
2112  ************************************/
2113 
2114 static int team_device_event(struct notifier_block *unused,
2115 			     unsigned long event, void *ptr)
2116 {
2117 	struct net_device *dev = (struct net_device *) ptr;
2118 	struct team_port *port;
2119 
2120 	port = team_port_get_rtnl(dev);
2121 	if (!port)
2122 		return NOTIFY_DONE;
2123 
2124 	switch (event) {
2125 	case NETDEV_UP:
2126 		if (netif_carrier_ok(dev))
2127 			team_port_change_check(port, true);
2128 	case NETDEV_DOWN:
2129 		team_port_change_check(port, false);
2130 	case NETDEV_CHANGE:
2131 		if (netif_running(port->dev))
2132 			team_port_change_check(port,
2133 					       !!netif_carrier_ok(port->dev));
2134 		break;
2135 	case NETDEV_UNREGISTER:
2136 		team_del_slave(port->team->dev, dev);
2137 		break;
2138 	case NETDEV_FEAT_CHANGE:
2139 		team_compute_features(port->team);
2140 		break;
2141 	case NETDEV_CHANGEMTU:
2142 		/* Forbid to change mtu of underlaying device */
2143 		return NOTIFY_BAD;
2144 	case NETDEV_PRE_TYPE_CHANGE:
2145 		/* Forbid to change type of underlaying device */
2146 		return NOTIFY_BAD;
2147 	}
2148 	return NOTIFY_DONE;
2149 }
2150 
2151 static struct notifier_block team_notifier_block __read_mostly = {
2152 	.notifier_call = team_device_event,
2153 };
2154 
2155 
2156 /***********************
2157  * Module init and exit
2158  ***********************/
2159 
2160 static int __init team_module_init(void)
2161 {
2162 	int err;
2163 
2164 	register_netdevice_notifier(&team_notifier_block);
2165 
2166 	err = rtnl_link_register(&team_link_ops);
2167 	if (err)
2168 		goto err_rtnl_reg;
2169 
2170 	err = team_nl_init();
2171 	if (err)
2172 		goto err_nl_init;
2173 
2174 	return 0;
2175 
2176 err_nl_init:
2177 	rtnl_link_unregister(&team_link_ops);
2178 
2179 err_rtnl_reg:
2180 	unregister_netdevice_notifier(&team_notifier_block);
2181 
2182 	return err;
2183 }
2184 
2185 static void __exit team_module_exit(void)
2186 {
2187 	team_nl_fini();
2188 	rtnl_link_unregister(&team_link_ops);
2189 	unregister_netdevice_notifier(&team_notifier_block);
2190 }
2191 
2192 module_init(team_module_init);
2193 module_exit(team_module_exit);
2194 
2195 MODULE_LICENSE("GPL v2");
2196 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2197 MODULE_DESCRIPTION("Ethernet team device driver");
2198 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2199