xref: /openbmc/linux/drivers/net/team/team.c (revision 61dc3461)
1 /*
2  * net/drivers/team/team.c - Network team device driver
3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_arp.h>
22 #include <linux/socket.h>
23 #include <linux/etherdevice.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <net/genetlink.h>
27 #include <net/netlink.h>
28 #include <linux/if_team.h>
29 
30 #define DRV_NAME "team"
31 
32 
33 /**********
34  * Helpers
35  **********/
36 
37 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
38 
39 static struct team_port *team_port_get_rcu(const struct net_device *dev)
40 {
41 	struct team_port *port = rcu_dereference(dev->rx_handler_data);
42 
43 	return team_port_exists(dev) ? port : NULL;
44 }
45 
46 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
47 {
48 	struct team_port *port = rtnl_dereference(dev->rx_handler_data);
49 
50 	return team_port_exists(dev) ? port : NULL;
51 }
52 
53 /*
54  * Since the ability to change mac address for open port device is tested in
55  * team_port_add, this function can be called without control of return value
56  */
57 static int __set_port_mac(struct net_device *port_dev,
58 			  const unsigned char *dev_addr)
59 {
60 	struct sockaddr addr;
61 
62 	memcpy(addr.sa_data, dev_addr, ETH_ALEN);
63 	addr.sa_family = ARPHRD_ETHER;
64 	return dev_set_mac_address(port_dev, &addr);
65 }
66 
67 int team_port_set_orig_mac(struct team_port *port)
68 {
69 	return __set_port_mac(port->dev, port->orig.dev_addr);
70 }
71 
72 int team_port_set_team_mac(struct team_port *port)
73 {
74 	return __set_port_mac(port->dev, port->team->dev->dev_addr);
75 }
76 EXPORT_SYMBOL(team_port_set_team_mac);
77 
78 
79 /*******************
80  * Options handling
81  *******************/
82 
83 void team_options_register(struct team *team, struct team_option *option,
84 			   size_t option_count)
85 {
86 	int i;
87 
88 	for (i = 0; i < option_count; i++, option++)
89 		list_add_tail(&option->list, &team->option_list);
90 }
91 EXPORT_SYMBOL(team_options_register);
92 
93 static void __team_options_change_check(struct team *team,
94 					struct team_option *changed_option);
95 
96 static void __team_options_unregister(struct team *team,
97 				      struct team_option *option,
98 				      size_t option_count)
99 {
100 	int i;
101 
102 	for (i = 0; i < option_count; i++, option++)
103 		list_del(&option->list);
104 }
105 
106 void team_options_unregister(struct team *team, struct team_option *option,
107 			     size_t option_count)
108 {
109 	__team_options_unregister(team, option, option_count);
110 	__team_options_change_check(team, NULL);
111 }
112 EXPORT_SYMBOL(team_options_unregister);
113 
114 static int team_option_get(struct team *team, struct team_option *option,
115 			   void *arg)
116 {
117 	return option->getter(team, arg);
118 }
119 
120 static int team_option_set(struct team *team, struct team_option *option,
121 			   void *arg)
122 {
123 	int err;
124 
125 	err = option->setter(team, arg);
126 	if (err)
127 		return err;
128 
129 	__team_options_change_check(team, option);
130 	return err;
131 }
132 
133 /****************
134  * Mode handling
135  ****************/
136 
137 static LIST_HEAD(mode_list);
138 static DEFINE_SPINLOCK(mode_list_lock);
139 
140 static struct team_mode *__find_mode(const char *kind)
141 {
142 	struct team_mode *mode;
143 
144 	list_for_each_entry(mode, &mode_list, list) {
145 		if (strcmp(mode->kind, kind) == 0)
146 			return mode;
147 	}
148 	return NULL;
149 }
150 
151 static bool is_good_mode_name(const char *name)
152 {
153 	while (*name != '\0') {
154 		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
155 			return false;
156 		name++;
157 	}
158 	return true;
159 }
160 
161 int team_mode_register(struct team_mode *mode)
162 {
163 	int err = 0;
164 
165 	if (!is_good_mode_name(mode->kind) ||
166 	    mode->priv_size > TEAM_MODE_PRIV_SIZE)
167 		return -EINVAL;
168 	spin_lock(&mode_list_lock);
169 	if (__find_mode(mode->kind)) {
170 		err = -EEXIST;
171 		goto unlock;
172 	}
173 	list_add_tail(&mode->list, &mode_list);
174 unlock:
175 	spin_unlock(&mode_list_lock);
176 	return err;
177 }
178 EXPORT_SYMBOL(team_mode_register);
179 
180 int team_mode_unregister(struct team_mode *mode)
181 {
182 	spin_lock(&mode_list_lock);
183 	list_del_init(&mode->list);
184 	spin_unlock(&mode_list_lock);
185 	return 0;
186 }
187 EXPORT_SYMBOL(team_mode_unregister);
188 
189 static struct team_mode *team_mode_get(const char *kind)
190 {
191 	struct team_mode *mode;
192 
193 	spin_lock(&mode_list_lock);
194 	mode = __find_mode(kind);
195 	if (!mode) {
196 		spin_unlock(&mode_list_lock);
197 		request_module("team-mode-%s", kind);
198 		spin_lock(&mode_list_lock);
199 		mode = __find_mode(kind);
200 	}
201 	if (mode)
202 		if (!try_module_get(mode->owner))
203 			mode = NULL;
204 
205 	spin_unlock(&mode_list_lock);
206 	return mode;
207 }
208 
209 static void team_mode_put(const struct team_mode *mode)
210 {
211 	module_put(mode->owner);
212 }
213 
214 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
215 {
216 	dev_kfree_skb_any(skb);
217 	return false;
218 }
219 
220 rx_handler_result_t team_dummy_receive(struct team *team,
221 				       struct team_port *port,
222 				       struct sk_buff *skb)
223 {
224 	return RX_HANDLER_ANOTHER;
225 }
226 
227 static void team_adjust_ops(struct team *team)
228 {
229 	/*
230 	 * To avoid checks in rx/tx skb paths, ensure here that non-null and
231 	 * correct ops are always set.
232 	 */
233 
234 	if (list_empty(&team->port_list) ||
235 	    !team->mode || !team->mode->ops->transmit)
236 		team->ops.transmit = team_dummy_transmit;
237 	else
238 		team->ops.transmit = team->mode->ops->transmit;
239 
240 	if (list_empty(&team->port_list) ||
241 	    !team->mode || !team->mode->ops->receive)
242 		team->ops.receive = team_dummy_receive;
243 	else
244 		team->ops.receive = team->mode->ops->receive;
245 }
246 
247 /*
248  * We can benefit from the fact that it's ensured no port is present
249  * at the time of mode change. Therefore no packets are in fly so there's no
250  * need to set mode operations in any special way.
251  */
252 static int __team_change_mode(struct team *team,
253 			      const struct team_mode *new_mode)
254 {
255 	/* Check if mode was previously set and do cleanup if so */
256 	if (team->mode) {
257 		void (*exit_op)(struct team *team) = team->ops.exit;
258 
259 		/* Clear ops area so no callback is called any longer */
260 		memset(&team->ops, 0, sizeof(struct team_mode_ops));
261 		team_adjust_ops(team);
262 
263 		if (exit_op)
264 			exit_op(team);
265 		team_mode_put(team->mode);
266 		team->mode = NULL;
267 		/* zero private data area */
268 		memset(&team->mode_priv, 0,
269 		       sizeof(struct team) - offsetof(struct team, mode_priv));
270 	}
271 
272 	if (!new_mode)
273 		return 0;
274 
275 	if (new_mode->ops->init) {
276 		int err;
277 
278 		err = new_mode->ops->init(team);
279 		if (err)
280 			return err;
281 	}
282 
283 	team->mode = new_mode;
284 	memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
285 	team_adjust_ops(team);
286 
287 	return 0;
288 }
289 
290 static int team_change_mode(struct team *team, const char *kind)
291 {
292 	struct team_mode *new_mode;
293 	struct net_device *dev = team->dev;
294 	int err;
295 
296 	if (!list_empty(&team->port_list)) {
297 		netdev_err(dev, "No ports can be present during mode change\n");
298 		return -EBUSY;
299 	}
300 
301 	if (team->mode && strcmp(team->mode->kind, kind) == 0) {
302 		netdev_err(dev, "Unable to change to the same mode the team is in\n");
303 		return -EINVAL;
304 	}
305 
306 	new_mode = team_mode_get(kind);
307 	if (!new_mode) {
308 		netdev_err(dev, "Mode \"%s\" not found\n", kind);
309 		return -EINVAL;
310 	}
311 
312 	err = __team_change_mode(team, new_mode);
313 	if (err) {
314 		netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
315 		team_mode_put(new_mode);
316 		return err;
317 	}
318 
319 	netdev_info(dev, "Mode changed to \"%s\"\n", kind);
320 	return 0;
321 }
322 
323 
324 /************************
325  * Rx path frame handler
326  ************************/
327 
328 /* note: already called with rcu_read_lock */
329 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
330 {
331 	struct sk_buff *skb = *pskb;
332 	struct team_port *port;
333 	struct team *team;
334 	rx_handler_result_t res;
335 
336 	skb = skb_share_check(skb, GFP_ATOMIC);
337 	if (!skb)
338 		return RX_HANDLER_CONSUMED;
339 
340 	*pskb = skb;
341 
342 	port = team_port_get_rcu(skb->dev);
343 	team = port->team;
344 
345 	res = team->ops.receive(team, port, skb);
346 	if (res == RX_HANDLER_ANOTHER) {
347 		struct team_pcpu_stats *pcpu_stats;
348 
349 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
350 		u64_stats_update_begin(&pcpu_stats->syncp);
351 		pcpu_stats->rx_packets++;
352 		pcpu_stats->rx_bytes += skb->len;
353 		if (skb->pkt_type == PACKET_MULTICAST)
354 			pcpu_stats->rx_multicast++;
355 		u64_stats_update_end(&pcpu_stats->syncp);
356 
357 		skb->dev = team->dev;
358 	} else {
359 		this_cpu_inc(team->pcpu_stats->rx_dropped);
360 	}
361 
362 	return res;
363 }
364 
365 
366 /****************
367  * Port handling
368  ****************/
369 
370 static bool team_port_find(const struct team *team,
371 			   const struct team_port *port)
372 {
373 	struct team_port *cur;
374 
375 	list_for_each_entry(cur, &team->port_list, list)
376 		if (cur == port)
377 			return true;
378 	return false;
379 }
380 
381 /*
382  * Add/delete port to the team port list. Write guarded by rtnl_lock.
383  * Takes care of correct port->index setup (might be racy).
384  */
385 static void team_port_list_add_port(struct team *team,
386 				    struct team_port *port)
387 {
388 	port->index = team->port_count++;
389 	hlist_add_head_rcu(&port->hlist,
390 			   team_port_index_hash(team, port->index));
391 	list_add_tail_rcu(&port->list, &team->port_list);
392 }
393 
394 static void __reconstruct_port_hlist(struct team *team, int rm_index)
395 {
396 	int i;
397 	struct team_port *port;
398 
399 	for (i = rm_index + 1; i < team->port_count; i++) {
400 		port = team_get_port_by_index(team, i);
401 		hlist_del_rcu(&port->hlist);
402 		port->index--;
403 		hlist_add_head_rcu(&port->hlist,
404 				   team_port_index_hash(team, port->index));
405 	}
406 }
407 
408 static void team_port_list_del_port(struct team *team,
409 				   struct team_port *port)
410 {
411 	int rm_index = port->index;
412 
413 	hlist_del_rcu(&port->hlist);
414 	list_del_rcu(&port->list);
415 	__reconstruct_port_hlist(team, rm_index);
416 	team->port_count--;
417 }
418 
419 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
420 			    NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
421 			    NETIF_F_HIGHDMA | NETIF_F_LRO)
422 
423 static void __team_compute_features(struct team *team)
424 {
425 	struct team_port *port;
426 	u32 vlan_features = TEAM_VLAN_FEATURES;
427 	unsigned short max_hard_header_len = ETH_HLEN;
428 
429 	list_for_each_entry(port, &team->port_list, list) {
430 		vlan_features = netdev_increment_features(vlan_features,
431 					port->dev->vlan_features,
432 					TEAM_VLAN_FEATURES);
433 
434 		if (port->dev->hard_header_len > max_hard_header_len)
435 			max_hard_header_len = port->dev->hard_header_len;
436 	}
437 
438 	team->dev->vlan_features = vlan_features;
439 	team->dev->hard_header_len = max_hard_header_len;
440 
441 	netdev_change_features(team->dev);
442 }
443 
444 static void team_compute_features(struct team *team)
445 {
446 	mutex_lock(&team->lock);
447 	__team_compute_features(team);
448 	mutex_unlock(&team->lock);
449 }
450 
451 static int team_port_enter(struct team *team, struct team_port *port)
452 {
453 	int err = 0;
454 
455 	dev_hold(team->dev);
456 	port->dev->priv_flags |= IFF_TEAM_PORT;
457 	if (team->ops.port_enter) {
458 		err = team->ops.port_enter(team, port);
459 		if (err) {
460 			netdev_err(team->dev, "Device %s failed to enter team mode\n",
461 				   port->dev->name);
462 			goto err_port_enter;
463 		}
464 	}
465 
466 	return 0;
467 
468 err_port_enter:
469 	port->dev->priv_flags &= ~IFF_TEAM_PORT;
470 	dev_put(team->dev);
471 
472 	return err;
473 }
474 
475 static void team_port_leave(struct team *team, struct team_port *port)
476 {
477 	if (team->ops.port_leave)
478 		team->ops.port_leave(team, port);
479 	port->dev->priv_flags &= ~IFF_TEAM_PORT;
480 	dev_put(team->dev);
481 }
482 
483 static void __team_port_change_check(struct team_port *port, bool linkup);
484 
485 static int team_port_add(struct team *team, struct net_device *port_dev)
486 {
487 	struct net_device *dev = team->dev;
488 	struct team_port *port;
489 	char *portname = port_dev->name;
490 	int err;
491 
492 	if (port_dev->flags & IFF_LOOPBACK ||
493 	    port_dev->type != ARPHRD_ETHER) {
494 		netdev_err(dev, "Device %s is of an unsupported type\n",
495 			   portname);
496 		return -EINVAL;
497 	}
498 
499 	if (team_port_exists(port_dev)) {
500 		netdev_err(dev, "Device %s is already a port "
501 				"of a team device\n", portname);
502 		return -EBUSY;
503 	}
504 
505 	if (port_dev->flags & IFF_UP) {
506 		netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
507 			   portname);
508 		return -EBUSY;
509 	}
510 
511 	port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
512 	if (!port)
513 		return -ENOMEM;
514 
515 	port->dev = port_dev;
516 	port->team = team;
517 
518 	port->orig.mtu = port_dev->mtu;
519 	err = dev_set_mtu(port_dev, dev->mtu);
520 	if (err) {
521 		netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
522 		goto err_set_mtu;
523 	}
524 
525 	memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
526 
527 	err = team_port_enter(team, port);
528 	if (err) {
529 		netdev_err(dev, "Device %s failed to enter team mode\n",
530 			   portname);
531 		goto err_port_enter;
532 	}
533 
534 	err = dev_open(port_dev);
535 	if (err) {
536 		netdev_dbg(dev, "Device %s opening failed\n",
537 			   portname);
538 		goto err_dev_open;
539 	}
540 
541 	err = netdev_set_master(port_dev, dev);
542 	if (err) {
543 		netdev_err(dev, "Device %s failed to set master\n", portname);
544 		goto err_set_master;
545 	}
546 
547 	err = netdev_rx_handler_register(port_dev, team_handle_frame,
548 					 port);
549 	if (err) {
550 		netdev_err(dev, "Device %s failed to register rx_handler\n",
551 			   portname);
552 		goto err_handler_register;
553 	}
554 
555 	team_port_list_add_port(team, port);
556 	team_adjust_ops(team);
557 	__team_compute_features(team);
558 	__team_port_change_check(port, !!netif_carrier_ok(port_dev));
559 
560 	netdev_info(dev, "Port device %s added\n", portname);
561 
562 	return 0;
563 
564 err_handler_register:
565 	netdev_set_master(port_dev, NULL);
566 
567 err_set_master:
568 	dev_close(port_dev);
569 
570 err_dev_open:
571 	team_port_leave(team, port);
572 	team_port_set_orig_mac(port);
573 
574 err_port_enter:
575 	dev_set_mtu(port_dev, port->orig.mtu);
576 
577 err_set_mtu:
578 	kfree(port);
579 
580 	return err;
581 }
582 
583 static int team_port_del(struct team *team, struct net_device *port_dev)
584 {
585 	struct net_device *dev = team->dev;
586 	struct team_port *port;
587 	char *portname = port_dev->name;
588 
589 	port = team_port_get_rtnl(port_dev);
590 	if (!port || !team_port_find(team, port)) {
591 		netdev_err(dev, "Device %s does not act as a port of this team\n",
592 			   portname);
593 		return -ENOENT;
594 	}
595 
596 	__team_port_change_check(port, false);
597 	team_port_list_del_port(team, port);
598 	team_adjust_ops(team);
599 	netdev_rx_handler_unregister(port_dev);
600 	netdev_set_master(port_dev, NULL);
601 	dev_close(port_dev);
602 	team_port_leave(team, port);
603 	team_port_set_orig_mac(port);
604 	dev_set_mtu(port_dev, port->orig.mtu);
605 	synchronize_rcu();
606 	kfree(port);
607 	netdev_info(dev, "Port device %s removed\n", portname);
608 	__team_compute_features(team);
609 
610 	return 0;
611 }
612 
613 
614 /*****************
615  * Net device ops
616  *****************/
617 
618 static const char team_no_mode_kind[] = "*NOMODE*";
619 
620 static int team_mode_option_get(struct team *team, void *arg)
621 {
622 	const char **str = arg;
623 
624 	*str = team->mode ? team->mode->kind : team_no_mode_kind;
625 	return 0;
626 }
627 
628 static int team_mode_option_set(struct team *team, void *arg)
629 {
630 	const char **str = arg;
631 
632 	return team_change_mode(team, *str);
633 }
634 
635 static struct team_option team_options[] = {
636 	{
637 		.name = "mode",
638 		.type = TEAM_OPTION_TYPE_STRING,
639 		.getter = team_mode_option_get,
640 		.setter = team_mode_option_set,
641 	},
642 };
643 
644 static int team_init(struct net_device *dev)
645 {
646 	struct team *team = netdev_priv(dev);
647 	int i;
648 
649 	team->dev = dev;
650 	mutex_init(&team->lock);
651 
652 	team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
653 	if (!team->pcpu_stats)
654 		return -ENOMEM;
655 
656 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
657 		INIT_HLIST_HEAD(&team->port_hlist[i]);
658 	INIT_LIST_HEAD(&team->port_list);
659 
660 	team_adjust_ops(team);
661 
662 	INIT_LIST_HEAD(&team->option_list);
663 	team_options_register(team, team_options, ARRAY_SIZE(team_options));
664 	netif_carrier_off(dev);
665 
666 	return 0;
667 }
668 
669 static void team_uninit(struct net_device *dev)
670 {
671 	struct team *team = netdev_priv(dev);
672 	struct team_port *port;
673 	struct team_port *tmp;
674 
675 	mutex_lock(&team->lock);
676 	list_for_each_entry_safe(port, tmp, &team->port_list, list)
677 		team_port_del(team, port->dev);
678 
679 	__team_change_mode(team, NULL); /* cleanup */
680 	__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
681 	mutex_unlock(&team->lock);
682 }
683 
684 static void team_destructor(struct net_device *dev)
685 {
686 	struct team *team = netdev_priv(dev);
687 
688 	free_percpu(team->pcpu_stats);
689 	free_netdev(dev);
690 }
691 
692 static int team_open(struct net_device *dev)
693 {
694 	netif_carrier_on(dev);
695 	return 0;
696 }
697 
698 static int team_close(struct net_device *dev)
699 {
700 	netif_carrier_off(dev);
701 	return 0;
702 }
703 
704 /*
705  * note: already called with rcu_read_lock
706  */
707 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
708 {
709 	struct team *team = netdev_priv(dev);
710 	bool tx_success = false;
711 	unsigned int len = skb->len;
712 
713 	tx_success = team->ops.transmit(team, skb);
714 	if (tx_success) {
715 		struct team_pcpu_stats *pcpu_stats;
716 
717 		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
718 		u64_stats_update_begin(&pcpu_stats->syncp);
719 		pcpu_stats->tx_packets++;
720 		pcpu_stats->tx_bytes += len;
721 		u64_stats_update_end(&pcpu_stats->syncp);
722 	} else {
723 		this_cpu_inc(team->pcpu_stats->tx_dropped);
724 	}
725 
726 	return NETDEV_TX_OK;
727 }
728 
729 static void team_change_rx_flags(struct net_device *dev, int change)
730 {
731 	struct team *team = netdev_priv(dev);
732 	struct team_port *port;
733 	int inc;
734 
735 	rcu_read_lock();
736 	list_for_each_entry_rcu(port, &team->port_list, list) {
737 		if (change & IFF_PROMISC) {
738 			inc = dev->flags & IFF_PROMISC ? 1 : -1;
739 			dev_set_promiscuity(port->dev, inc);
740 		}
741 		if (change & IFF_ALLMULTI) {
742 			inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
743 			dev_set_allmulti(port->dev, inc);
744 		}
745 	}
746 	rcu_read_unlock();
747 }
748 
749 static void team_set_rx_mode(struct net_device *dev)
750 {
751 	struct team *team = netdev_priv(dev);
752 	struct team_port *port;
753 
754 	rcu_read_lock();
755 	list_for_each_entry_rcu(port, &team->port_list, list) {
756 		dev_uc_sync(port->dev, dev);
757 		dev_mc_sync(port->dev, dev);
758 	}
759 	rcu_read_unlock();
760 }
761 
762 static int team_set_mac_address(struct net_device *dev, void *p)
763 {
764 	struct team *team = netdev_priv(dev);
765 	struct team_port *port;
766 	struct sockaddr *addr = p;
767 
768 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
769 	rcu_read_lock();
770 	list_for_each_entry_rcu(port, &team->port_list, list)
771 		if (team->ops.port_change_mac)
772 			team->ops.port_change_mac(team, port);
773 	rcu_read_unlock();
774 	return 0;
775 }
776 
777 static int team_change_mtu(struct net_device *dev, int new_mtu)
778 {
779 	struct team *team = netdev_priv(dev);
780 	struct team_port *port;
781 	int err;
782 
783 	/*
784 	 * Alhough this is reader, it's guarded by team lock. It's not possible
785 	 * to traverse list in reverse under rcu_read_lock
786 	 */
787 	mutex_lock(&team->lock);
788 	list_for_each_entry(port, &team->port_list, list) {
789 		err = dev_set_mtu(port->dev, new_mtu);
790 		if (err) {
791 			netdev_err(dev, "Device %s failed to change mtu",
792 				   port->dev->name);
793 			goto unwind;
794 		}
795 	}
796 	mutex_unlock(&team->lock);
797 
798 	dev->mtu = new_mtu;
799 
800 	return 0;
801 
802 unwind:
803 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
804 		dev_set_mtu(port->dev, dev->mtu);
805 	mutex_unlock(&team->lock);
806 
807 	return err;
808 }
809 
810 static struct rtnl_link_stats64 *
811 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
812 {
813 	struct team *team = netdev_priv(dev);
814 	struct team_pcpu_stats *p;
815 	u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
816 	u32 rx_dropped = 0, tx_dropped = 0;
817 	unsigned int start;
818 	int i;
819 
820 	for_each_possible_cpu(i) {
821 		p = per_cpu_ptr(team->pcpu_stats, i);
822 		do {
823 			start = u64_stats_fetch_begin_bh(&p->syncp);
824 			rx_packets	= p->rx_packets;
825 			rx_bytes	= p->rx_bytes;
826 			rx_multicast	= p->rx_multicast;
827 			tx_packets	= p->tx_packets;
828 			tx_bytes	= p->tx_bytes;
829 		} while (u64_stats_fetch_retry_bh(&p->syncp, start));
830 
831 		stats->rx_packets	+= rx_packets;
832 		stats->rx_bytes		+= rx_bytes;
833 		stats->multicast	+= rx_multicast;
834 		stats->tx_packets	+= tx_packets;
835 		stats->tx_bytes		+= tx_bytes;
836 		/*
837 		 * rx_dropped & tx_dropped are u32, updated
838 		 * without syncp protection.
839 		 */
840 		rx_dropped	+= p->rx_dropped;
841 		tx_dropped	+= p->tx_dropped;
842 	}
843 	stats->rx_dropped	= rx_dropped;
844 	stats->tx_dropped	= tx_dropped;
845 	return stats;
846 }
847 
848 static void team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
849 {
850 	struct team *team = netdev_priv(dev);
851 	struct team_port *port;
852 
853 	rcu_read_lock();
854 	list_for_each_entry_rcu(port, &team->port_list, list) {
855 		const struct net_device_ops *ops = port->dev->netdev_ops;
856 
857 		if (ops->ndo_vlan_rx_add_vid)
858 			ops->ndo_vlan_rx_add_vid(port->dev, vid);
859 	}
860 	rcu_read_unlock();
861 }
862 
863 static void team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
864 {
865 	struct team *team = netdev_priv(dev);
866 	struct team_port *port;
867 
868 	rcu_read_lock();
869 	list_for_each_entry_rcu(port, &team->port_list, list) {
870 		const struct net_device_ops *ops = port->dev->netdev_ops;
871 
872 		if (ops->ndo_vlan_rx_kill_vid)
873 			ops->ndo_vlan_rx_kill_vid(port->dev, vid);
874 	}
875 	rcu_read_unlock();
876 }
877 
878 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
879 {
880 	struct team *team = netdev_priv(dev);
881 	int err;
882 
883 	mutex_lock(&team->lock);
884 	err = team_port_add(team, port_dev);
885 	mutex_unlock(&team->lock);
886 	return err;
887 }
888 
889 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
890 {
891 	struct team *team = netdev_priv(dev);
892 	int err;
893 
894 	mutex_lock(&team->lock);
895 	err = team_port_del(team, port_dev);
896 	mutex_unlock(&team->lock);
897 	return err;
898 }
899 
900 static const struct net_device_ops team_netdev_ops = {
901 	.ndo_init		= team_init,
902 	.ndo_uninit		= team_uninit,
903 	.ndo_open		= team_open,
904 	.ndo_stop		= team_close,
905 	.ndo_start_xmit		= team_xmit,
906 	.ndo_change_rx_flags	= team_change_rx_flags,
907 	.ndo_set_rx_mode	= team_set_rx_mode,
908 	.ndo_set_mac_address	= team_set_mac_address,
909 	.ndo_change_mtu		= team_change_mtu,
910 	.ndo_get_stats64	= team_get_stats64,
911 	.ndo_vlan_rx_add_vid	= team_vlan_rx_add_vid,
912 	.ndo_vlan_rx_kill_vid	= team_vlan_rx_kill_vid,
913 	.ndo_add_slave		= team_add_slave,
914 	.ndo_del_slave		= team_del_slave,
915 };
916 
917 
918 /***********************
919  * rt netlink interface
920  ***********************/
921 
922 static void team_setup(struct net_device *dev)
923 {
924 	ether_setup(dev);
925 
926 	dev->netdev_ops = &team_netdev_ops;
927 	dev->destructor	= team_destructor;
928 	dev->tx_queue_len = 0;
929 	dev->flags |= IFF_MULTICAST;
930 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
931 
932 	/*
933 	 * Indicate we support unicast address filtering. That way core won't
934 	 * bring us to promisc mode in case a unicast addr is added.
935 	 * Let this up to underlay drivers.
936 	 */
937 	dev->priv_flags |= IFF_UNICAST_FLT;
938 
939 	dev->features |= NETIF_F_LLTX;
940 	dev->features |= NETIF_F_GRO;
941 	dev->hw_features = NETIF_F_HW_VLAN_TX |
942 			   NETIF_F_HW_VLAN_RX |
943 			   NETIF_F_HW_VLAN_FILTER;
944 
945 	dev->features |= dev->hw_features;
946 }
947 
948 static int team_newlink(struct net *src_net, struct net_device *dev,
949 			struct nlattr *tb[], struct nlattr *data[])
950 {
951 	int err;
952 
953 	if (tb[IFLA_ADDRESS] == NULL)
954 		random_ether_addr(dev->dev_addr);
955 
956 	err = register_netdevice(dev);
957 	if (err)
958 		return err;
959 
960 	return 0;
961 }
962 
963 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
964 {
965 	if (tb[IFLA_ADDRESS]) {
966 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
967 			return -EINVAL;
968 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
969 			return -EADDRNOTAVAIL;
970 	}
971 	return 0;
972 }
973 
974 static struct rtnl_link_ops team_link_ops __read_mostly = {
975 	.kind		= DRV_NAME,
976 	.priv_size	= sizeof(struct team),
977 	.setup		= team_setup,
978 	.newlink	= team_newlink,
979 	.validate	= team_validate,
980 };
981 
982 
983 /***********************************
984  * Generic netlink custom interface
985  ***********************************/
986 
987 static struct genl_family team_nl_family = {
988 	.id		= GENL_ID_GENERATE,
989 	.name		= TEAM_GENL_NAME,
990 	.version	= TEAM_GENL_VERSION,
991 	.maxattr	= TEAM_ATTR_MAX,
992 	.netnsok	= true,
993 };
994 
995 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
996 	[TEAM_ATTR_UNSPEC]			= { .type = NLA_UNSPEC, },
997 	[TEAM_ATTR_TEAM_IFINDEX]		= { .type = NLA_U32 },
998 	[TEAM_ATTR_LIST_OPTION]			= { .type = NLA_NESTED },
999 	[TEAM_ATTR_LIST_PORT]			= { .type = NLA_NESTED },
1000 };
1001 
1002 static const struct nla_policy
1003 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1004 	[TEAM_ATTR_OPTION_UNSPEC]		= { .type = NLA_UNSPEC, },
1005 	[TEAM_ATTR_OPTION_NAME] = {
1006 		.type = NLA_STRING,
1007 		.len = TEAM_STRING_MAX_LEN,
1008 	},
1009 	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
1010 	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
1011 	[TEAM_ATTR_OPTION_DATA] = {
1012 		.type = NLA_BINARY,
1013 		.len = TEAM_STRING_MAX_LEN,
1014 	},
1015 };
1016 
1017 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1018 {
1019 	struct sk_buff *msg;
1020 	void *hdr;
1021 	int err;
1022 
1023 	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1024 	if (!msg)
1025 		return -ENOMEM;
1026 
1027 	hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1028 			  &team_nl_family, 0, TEAM_CMD_NOOP);
1029 	if (IS_ERR(hdr)) {
1030 		err = PTR_ERR(hdr);
1031 		goto err_msg_put;
1032 	}
1033 
1034 	genlmsg_end(msg, hdr);
1035 
1036 	return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1037 
1038 err_msg_put:
1039 	nlmsg_free(msg);
1040 
1041 	return err;
1042 }
1043 
1044 /*
1045  * Netlink cmd functions should be locked by following two functions.
1046  * Since dev gets held here, that ensures dev won't disappear in between.
1047  */
1048 static struct team *team_nl_team_get(struct genl_info *info)
1049 {
1050 	struct net *net = genl_info_net(info);
1051 	int ifindex;
1052 	struct net_device *dev;
1053 	struct team *team;
1054 
1055 	if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1056 		return NULL;
1057 
1058 	ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1059 	dev = dev_get_by_index(net, ifindex);
1060 	if (!dev || dev->netdev_ops != &team_netdev_ops) {
1061 		if (dev)
1062 			dev_put(dev);
1063 		return NULL;
1064 	}
1065 
1066 	team = netdev_priv(dev);
1067 	mutex_lock(&team->lock);
1068 	return team;
1069 }
1070 
1071 static void team_nl_team_put(struct team *team)
1072 {
1073 	mutex_unlock(&team->lock);
1074 	dev_put(team->dev);
1075 }
1076 
1077 static int team_nl_send_generic(struct genl_info *info, struct team *team,
1078 				int (*fill_func)(struct sk_buff *skb,
1079 						 struct genl_info *info,
1080 						 int flags, struct team *team))
1081 {
1082 	struct sk_buff *skb;
1083 	int err;
1084 
1085 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1086 	if (!skb)
1087 		return -ENOMEM;
1088 
1089 	err = fill_func(skb, info, NLM_F_ACK, team);
1090 	if (err < 0)
1091 		goto err_fill;
1092 
1093 	err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1094 	return err;
1095 
1096 err_fill:
1097 	nlmsg_free(skb);
1098 	return err;
1099 }
1100 
1101 static int team_nl_fill_options_get_changed(struct sk_buff *skb,
1102 					    u32 pid, u32 seq, int flags,
1103 					    struct team *team,
1104 					    struct team_option *changed_option)
1105 {
1106 	struct nlattr *option_list;
1107 	void *hdr;
1108 	struct team_option *option;
1109 
1110 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1111 			  TEAM_CMD_OPTIONS_GET);
1112 	if (IS_ERR(hdr))
1113 		return PTR_ERR(hdr);
1114 
1115 	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
1116 	option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1117 	if (!option_list)
1118 		return -EMSGSIZE;
1119 
1120 	list_for_each_entry(option, &team->option_list, list) {
1121 		struct nlattr *option_item;
1122 		long arg;
1123 
1124 		option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1125 		if (!option_item)
1126 			goto nla_put_failure;
1127 		NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
1128 		if (option == changed_option)
1129 			NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
1130 		switch (option->type) {
1131 		case TEAM_OPTION_TYPE_U32:
1132 			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
1133 			team_option_get(team, option, &arg);
1134 			NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
1135 			break;
1136 		case TEAM_OPTION_TYPE_STRING:
1137 			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
1138 			team_option_get(team, option, &arg);
1139 			NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
1140 				       (char *) arg);
1141 			break;
1142 		default:
1143 			BUG();
1144 		}
1145 		nla_nest_end(skb, option_item);
1146 	}
1147 
1148 	nla_nest_end(skb, option_list);
1149 	return genlmsg_end(skb, hdr);
1150 
1151 nla_put_failure:
1152 	genlmsg_cancel(skb, hdr);
1153 	return -EMSGSIZE;
1154 }
1155 
1156 static int team_nl_fill_options_get(struct sk_buff *skb,
1157 				    struct genl_info *info, int flags,
1158 				    struct team *team)
1159 {
1160 	return team_nl_fill_options_get_changed(skb, info->snd_pid,
1161 						info->snd_seq, NLM_F_ACK,
1162 						team, NULL);
1163 }
1164 
1165 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1166 {
1167 	struct team *team;
1168 	int err;
1169 
1170 	team = team_nl_team_get(info);
1171 	if (!team)
1172 		return -EINVAL;
1173 
1174 	err = team_nl_send_generic(info, team, team_nl_fill_options_get);
1175 
1176 	team_nl_team_put(team);
1177 
1178 	return err;
1179 }
1180 
1181 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1182 {
1183 	struct team *team;
1184 	int err = 0;
1185 	int i;
1186 	struct nlattr *nl_option;
1187 
1188 	team = team_nl_team_get(info);
1189 	if (!team)
1190 		return -EINVAL;
1191 
1192 	err = -EINVAL;
1193 	if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1194 		err = -EINVAL;
1195 		goto team_put;
1196 	}
1197 
1198 	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1199 		struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
1200 		enum team_option_type opt_type;
1201 		struct team_option *option;
1202 		char *opt_name;
1203 		bool opt_found = false;
1204 
1205 		if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1206 			err = -EINVAL;
1207 			goto team_put;
1208 		}
1209 		err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
1210 				       nl_option, team_nl_option_policy);
1211 		if (err)
1212 			goto team_put;
1213 		if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
1214 		    !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
1215 		    !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
1216 			err = -EINVAL;
1217 			goto team_put;
1218 		}
1219 		switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
1220 		case NLA_U32:
1221 			opt_type = TEAM_OPTION_TYPE_U32;
1222 			break;
1223 		case NLA_STRING:
1224 			opt_type = TEAM_OPTION_TYPE_STRING;
1225 			break;
1226 		default:
1227 			goto team_put;
1228 		}
1229 
1230 		opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
1231 		list_for_each_entry(option, &team->option_list, list) {
1232 			long arg;
1233 			struct nlattr *opt_data_attr;
1234 
1235 			if (option->type != opt_type ||
1236 			    strcmp(option->name, opt_name))
1237 				continue;
1238 			opt_found = true;
1239 			opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
1240 			switch (opt_type) {
1241 			case TEAM_OPTION_TYPE_U32:
1242 				arg = nla_get_u32(opt_data_attr);
1243 				break;
1244 			case TEAM_OPTION_TYPE_STRING:
1245 				arg = (long) nla_data(opt_data_attr);
1246 				break;
1247 			default:
1248 				BUG();
1249 			}
1250 			err = team_option_set(team, option, &arg);
1251 			if (err)
1252 				goto team_put;
1253 		}
1254 		if (!opt_found) {
1255 			err = -ENOENT;
1256 			goto team_put;
1257 		}
1258 	}
1259 
1260 team_put:
1261 	team_nl_team_put(team);
1262 
1263 	return err;
1264 }
1265 
1266 static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
1267 					      u32 pid, u32 seq, int flags,
1268 					      struct team *team,
1269 					      struct team_port *changed_port)
1270 {
1271 	struct nlattr *port_list;
1272 	void *hdr;
1273 	struct team_port *port;
1274 
1275 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1276 			  TEAM_CMD_PORT_LIST_GET);
1277 	if (IS_ERR(hdr))
1278 		return PTR_ERR(hdr);
1279 
1280 	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
1281 	port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1282 	if (!port_list)
1283 		return -EMSGSIZE;
1284 
1285 	list_for_each_entry(port, &team->port_list, list) {
1286 		struct nlattr *port_item;
1287 
1288 		port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1289 		if (!port_item)
1290 			goto nla_put_failure;
1291 		NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
1292 		if (port == changed_port)
1293 			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
1294 		if (port->linkup)
1295 			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
1296 		NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
1297 		NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
1298 		nla_nest_end(skb, port_item);
1299 	}
1300 
1301 	nla_nest_end(skb, port_list);
1302 	return genlmsg_end(skb, hdr);
1303 
1304 nla_put_failure:
1305 	genlmsg_cancel(skb, hdr);
1306 	return -EMSGSIZE;
1307 }
1308 
1309 static int team_nl_fill_port_list_get(struct sk_buff *skb,
1310 				      struct genl_info *info, int flags,
1311 				      struct team *team)
1312 {
1313 	return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
1314 						  info->snd_seq, NLM_F_ACK,
1315 						  team, NULL);
1316 }
1317 
1318 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1319 				     struct genl_info *info)
1320 {
1321 	struct team *team;
1322 	int err;
1323 
1324 	team = team_nl_team_get(info);
1325 	if (!team)
1326 		return -EINVAL;
1327 
1328 	err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
1329 
1330 	team_nl_team_put(team);
1331 
1332 	return err;
1333 }
1334 
1335 static struct genl_ops team_nl_ops[] = {
1336 	{
1337 		.cmd = TEAM_CMD_NOOP,
1338 		.doit = team_nl_cmd_noop,
1339 		.policy = team_nl_policy,
1340 	},
1341 	{
1342 		.cmd = TEAM_CMD_OPTIONS_SET,
1343 		.doit = team_nl_cmd_options_set,
1344 		.policy = team_nl_policy,
1345 		.flags = GENL_ADMIN_PERM,
1346 	},
1347 	{
1348 		.cmd = TEAM_CMD_OPTIONS_GET,
1349 		.doit = team_nl_cmd_options_get,
1350 		.policy = team_nl_policy,
1351 		.flags = GENL_ADMIN_PERM,
1352 	},
1353 	{
1354 		.cmd = TEAM_CMD_PORT_LIST_GET,
1355 		.doit = team_nl_cmd_port_list_get,
1356 		.policy = team_nl_policy,
1357 		.flags = GENL_ADMIN_PERM,
1358 	},
1359 };
1360 
1361 static struct genl_multicast_group team_change_event_mcgrp = {
1362 	.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1363 };
1364 
1365 static int team_nl_send_event_options_get(struct team *team,
1366 					  struct team_option *changed_option)
1367 {
1368 	struct sk_buff *skb;
1369 	int err;
1370 	struct net *net = dev_net(team->dev);
1371 
1372 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1373 	if (!skb)
1374 		return -ENOMEM;
1375 
1376 	err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
1377 					       changed_option);
1378 	if (err < 0)
1379 		goto err_fill;
1380 
1381 	err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1382 				      GFP_KERNEL);
1383 	return err;
1384 
1385 err_fill:
1386 	nlmsg_free(skb);
1387 	return err;
1388 }
1389 
1390 static int team_nl_send_event_port_list_get(struct team_port *port)
1391 {
1392 	struct sk_buff *skb;
1393 	int err;
1394 	struct net *net = dev_net(port->team->dev);
1395 
1396 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1397 	if (!skb)
1398 		return -ENOMEM;
1399 
1400 	err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
1401 						 port->team, port);
1402 	if (err < 0)
1403 		goto err_fill;
1404 
1405 	err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1406 				      GFP_KERNEL);
1407 	return err;
1408 
1409 err_fill:
1410 	nlmsg_free(skb);
1411 	return err;
1412 }
1413 
1414 static int team_nl_init(void)
1415 {
1416 	int err;
1417 
1418 	err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
1419 					    ARRAY_SIZE(team_nl_ops));
1420 	if (err)
1421 		return err;
1422 
1423 	err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
1424 	if (err)
1425 		goto err_change_event_grp_reg;
1426 
1427 	return 0;
1428 
1429 err_change_event_grp_reg:
1430 	genl_unregister_family(&team_nl_family);
1431 
1432 	return err;
1433 }
1434 
1435 static void team_nl_fini(void)
1436 {
1437 	genl_unregister_family(&team_nl_family);
1438 }
1439 
1440 
1441 /******************
1442  * Change checkers
1443  ******************/
1444 
1445 static void __team_options_change_check(struct team *team,
1446 					struct team_option *changed_option)
1447 {
1448 	int err;
1449 
1450 	err = team_nl_send_event_options_get(team, changed_option);
1451 	if (err)
1452 		netdev_warn(team->dev, "Failed to send options change via netlink\n");
1453 }
1454 
1455 /* rtnl lock is held */
1456 static void __team_port_change_check(struct team_port *port, bool linkup)
1457 {
1458 	int err;
1459 
1460 	if (port->linkup == linkup)
1461 		return;
1462 
1463 	port->linkup = linkup;
1464 	if (linkup) {
1465 		struct ethtool_cmd ecmd;
1466 
1467 		err = __ethtool_get_settings(port->dev, &ecmd);
1468 		if (!err) {
1469 			port->speed = ethtool_cmd_speed(&ecmd);
1470 			port->duplex = ecmd.duplex;
1471 			goto send_event;
1472 		}
1473 	}
1474 	port->speed = 0;
1475 	port->duplex = 0;
1476 
1477 send_event:
1478 	err = team_nl_send_event_port_list_get(port);
1479 	if (err)
1480 		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
1481 			    port->dev->name);
1482 
1483 }
1484 
1485 static void team_port_change_check(struct team_port *port, bool linkup)
1486 {
1487 	struct team *team = port->team;
1488 
1489 	mutex_lock(&team->lock);
1490 	__team_port_change_check(port, linkup);
1491 	mutex_unlock(&team->lock);
1492 }
1493 
1494 /************************************
1495  * Net device notifier event handler
1496  ************************************/
1497 
1498 static int team_device_event(struct notifier_block *unused,
1499 			     unsigned long event, void *ptr)
1500 {
1501 	struct net_device *dev = (struct net_device *) ptr;
1502 	struct team_port *port;
1503 
1504 	port = team_port_get_rtnl(dev);
1505 	if (!port)
1506 		return NOTIFY_DONE;
1507 
1508 	switch (event) {
1509 	case NETDEV_UP:
1510 		if (netif_carrier_ok(dev))
1511 			team_port_change_check(port, true);
1512 	case NETDEV_DOWN:
1513 		team_port_change_check(port, false);
1514 	case NETDEV_CHANGE:
1515 		if (netif_running(port->dev))
1516 			team_port_change_check(port,
1517 					       !!netif_carrier_ok(port->dev));
1518 		break;
1519 	case NETDEV_UNREGISTER:
1520 		team_del_slave(port->team->dev, dev);
1521 		break;
1522 	case NETDEV_FEAT_CHANGE:
1523 		team_compute_features(port->team);
1524 		break;
1525 	case NETDEV_CHANGEMTU:
1526 		/* Forbid to change mtu of underlaying device */
1527 		return NOTIFY_BAD;
1528 	case NETDEV_PRE_TYPE_CHANGE:
1529 		/* Forbid to change type of underlaying device */
1530 		return NOTIFY_BAD;
1531 	}
1532 	return NOTIFY_DONE;
1533 }
1534 
1535 static struct notifier_block team_notifier_block __read_mostly = {
1536 	.notifier_call = team_device_event,
1537 };
1538 
1539 
1540 /***********************
1541  * Module init and exit
1542  ***********************/
1543 
1544 static int __init team_module_init(void)
1545 {
1546 	int err;
1547 
1548 	register_netdevice_notifier(&team_notifier_block);
1549 
1550 	err = rtnl_link_register(&team_link_ops);
1551 	if (err)
1552 		goto err_rtnl_reg;
1553 
1554 	err = team_nl_init();
1555 	if (err)
1556 		goto err_nl_init;
1557 
1558 	return 0;
1559 
1560 err_nl_init:
1561 	rtnl_link_unregister(&team_link_ops);
1562 
1563 err_rtnl_reg:
1564 	unregister_netdevice_notifier(&team_notifier_block);
1565 
1566 	return err;
1567 }
1568 
1569 static void __exit team_module_exit(void)
1570 {
1571 	team_nl_fini();
1572 	rtnl_link_unregister(&team_link_ops);
1573 	unregister_netdevice_notifier(&team_notifier_block);
1574 }
1575 
1576 module_init(team_module_init);
1577 module_exit(team_module_exit);
1578 
1579 MODULE_LICENSE("GPL v2");
1580 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
1581 MODULE_DESCRIPTION("Ethernet team device driver");
1582 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1583