1 /*
2  * Copyright (c) 2015, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "core_priv.h"
34 
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
41 
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
44 
45 static struct workqueue_struct *gid_cache_wq;
46 
47 enum gid_op_type {
48 	GID_DEL = 0,
49 	GID_ADD
50 };
51 
52 struct update_gid_event_work {
53 	struct work_struct work;
54 	union ib_gid       gid;
55 	struct ib_gid_attr gid_attr;
56 	enum gid_op_type gid_op;
57 };
58 
59 #define ROCE_NETDEV_CALLBACK_SZ		3
60 struct netdev_event_work_cmd {
61 	roce_netdev_callback	cb;
62 	roce_netdev_filter	filter;
63 	struct net_device	*ndev;
64 	struct net_device	*filter_ndev;
65 };
66 
67 struct netdev_event_work {
68 	struct work_struct		work;
69 	struct netdev_event_work_cmd	cmds[ROCE_NETDEV_CALLBACK_SZ];
70 };
71 
72 static const struct {
73 	bool (*is_supported)(const struct ib_device *device, u8 port_num);
74 	enum ib_gid_type gid_type;
75 } PORT_CAP_TO_GID_TYPE[] = {
76 	{rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
77 	{rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
78 };
79 
80 #define CAP_TO_GID_TABLE_SIZE	ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
81 
82 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
83 {
84 	int i;
85 	unsigned int ret_flags = 0;
86 
87 	if (!rdma_protocol_roce(ib_dev, port))
88 		return 1UL << IB_GID_TYPE_IB;
89 
90 	for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
91 		if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
92 			ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
93 
94 	return ret_flags;
95 }
96 EXPORT_SYMBOL(roce_gid_type_mask_support);
97 
98 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
99 		       u8 port, union ib_gid *gid,
100 		       struct ib_gid_attr *gid_attr)
101 {
102 	int i;
103 	unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
104 
105 	for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
106 		if ((1UL << i) & gid_type_mask) {
107 			gid_attr->gid_type = i;
108 			switch (gid_op) {
109 			case GID_ADD:
110 				ib_cache_gid_add(ib_dev, port,
111 						 gid, gid_attr);
112 				break;
113 			case GID_DEL:
114 				ib_cache_gid_del(ib_dev, port,
115 						 gid, gid_attr);
116 				break;
117 			}
118 		}
119 	}
120 }
121 
122 enum bonding_slave_state {
123 	BONDING_SLAVE_STATE_ACTIVE	= 1UL << 0,
124 	BONDING_SLAVE_STATE_INACTIVE	= 1UL << 1,
125 	/* No primary slave or the device isn't a slave in bonding */
126 	BONDING_SLAVE_STATE_NA		= 1UL << 2,
127 };
128 
129 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
130 								   struct net_device *upper)
131 {
132 	if (upper && netif_is_bond_master(upper)) {
133 		struct net_device *pdev =
134 			bond_option_active_slave_get_rcu(netdev_priv(upper));
135 
136 		if (pdev)
137 			return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
138 				BONDING_SLAVE_STATE_INACTIVE;
139 	}
140 
141 	return BONDING_SLAVE_STATE_NA;
142 }
143 
144 #define REQUIRED_BOND_STATES		(BONDING_SLAVE_STATE_ACTIVE |	\
145 					 BONDING_SLAVE_STATE_NA)
146 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
147 				 struct net_device *rdma_ndev, void *cookie)
148 {
149 	struct net_device *real_dev;
150 	int res;
151 
152 	if (!rdma_ndev)
153 		return 0;
154 
155 	rcu_read_lock();
156 	real_dev = rdma_vlan_dev_real_dev(cookie);
157 	if (!real_dev)
158 		real_dev = cookie;
159 
160 	res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
161 	       (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
162 		REQUIRED_BOND_STATES)) ||
163 	       real_dev == rdma_ndev);
164 
165 	rcu_read_unlock();
166 	return res;
167 }
168 
169 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
170 				      struct net_device *rdma_ndev, void *cookie)
171 {
172 	struct net_device *master_dev;
173 	int res;
174 
175 	if (!rdma_ndev)
176 		return 0;
177 
178 	rcu_read_lock();
179 	master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
180 	res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
181 		BONDING_SLAVE_STATE_INACTIVE;
182 	rcu_read_unlock();
183 
184 	return res;
185 }
186 
187 static int pass_all_filter(struct ib_device *ib_dev, u8 port,
188 			   struct net_device *rdma_ndev, void *cookie)
189 {
190 	return 1;
191 }
192 
193 static int upper_device_filter(struct ib_device *ib_dev, u8 port,
194 			       struct net_device *rdma_ndev, void *cookie)
195 {
196 	int res;
197 
198 	if (!rdma_ndev)
199 		return 0;
200 
201 	if (rdma_ndev == cookie)
202 		return 1;
203 
204 	rcu_read_lock();
205 	res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
206 	rcu_read_unlock();
207 
208 	return res;
209 }
210 
211 static void update_gid_ip(enum gid_op_type gid_op,
212 			  struct ib_device *ib_dev,
213 			  u8 port, struct net_device *ndev,
214 			  struct sockaddr *addr)
215 {
216 	union ib_gid gid;
217 	struct ib_gid_attr gid_attr;
218 
219 	rdma_ip2gid(addr, &gid);
220 	memset(&gid_attr, 0, sizeof(gid_attr));
221 	gid_attr.ndev = ndev;
222 
223 	update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
224 }
225 
226 static void enum_netdev_default_gids(struct ib_device *ib_dev,
227 				     u8 port, struct net_device *event_ndev,
228 				     struct net_device *rdma_ndev)
229 {
230 	unsigned long gid_type_mask;
231 
232 	rcu_read_lock();
233 	if (!rdma_ndev ||
234 	    ((rdma_ndev != event_ndev &&
235 	      !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
236 	     is_eth_active_slave_of_bonding_rcu(rdma_ndev,
237 						netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
238 	     BONDING_SLAVE_STATE_INACTIVE)) {
239 		rcu_read_unlock();
240 		return;
241 	}
242 	rcu_read_unlock();
243 
244 	gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
245 
246 	ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
247 				     IB_CACHE_GID_DEFAULT_MODE_SET);
248 }
249 
250 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
251 					    u8 port,
252 					    struct net_device *event_ndev,
253 					    struct net_device *rdma_ndev)
254 {
255 	struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
256 
257 	if (!rdma_ndev)
258 		return;
259 
260 	if (!real_dev)
261 		real_dev = event_ndev;
262 
263 	rcu_read_lock();
264 
265 	if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
266 	    is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
267 	    BONDING_SLAVE_STATE_INACTIVE) {
268 		unsigned long gid_type_mask;
269 
270 		rcu_read_unlock();
271 
272 		gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
273 
274 		ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
275 					     gid_type_mask,
276 					     IB_CACHE_GID_DEFAULT_MODE_DELETE);
277 	} else {
278 		rcu_read_unlock();
279 	}
280 }
281 
282 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
283 				 u8 port, struct net_device *ndev)
284 {
285 	struct in_device *in_dev;
286 	struct sin_list {
287 		struct list_head	list;
288 		struct sockaddr_in	ip;
289 	};
290 	struct sin_list *sin_iter;
291 	struct sin_list *sin_temp;
292 
293 	LIST_HEAD(sin_list);
294 	if (ndev->reg_state >= NETREG_UNREGISTERING)
295 		return;
296 
297 	rcu_read_lock();
298 	in_dev = __in_dev_get_rcu(ndev);
299 	if (!in_dev) {
300 		rcu_read_unlock();
301 		return;
302 	}
303 
304 	for_ifa(in_dev) {
305 		struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
306 
307 		if (!entry)
308 			continue;
309 
310 		entry->ip.sin_family = AF_INET;
311 		entry->ip.sin_addr.s_addr = ifa->ifa_address;
312 		list_add_tail(&entry->list, &sin_list);
313 	}
314 	endfor_ifa(in_dev);
315 	rcu_read_unlock();
316 
317 	list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
318 		update_gid_ip(GID_ADD, ib_dev, port, ndev,
319 			      (struct sockaddr *)&sin_iter->ip);
320 		list_del(&sin_iter->list);
321 		kfree(sin_iter);
322 	}
323 }
324 
325 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
326 				 u8 port, struct net_device *ndev)
327 {
328 	struct inet6_ifaddr *ifp;
329 	struct inet6_dev *in6_dev;
330 	struct sin6_list {
331 		struct list_head	list;
332 		struct sockaddr_in6	sin6;
333 	};
334 	struct sin6_list *sin6_iter;
335 	struct sin6_list *sin6_temp;
336 	struct ib_gid_attr gid_attr = {.ndev = ndev};
337 	LIST_HEAD(sin6_list);
338 
339 	if (ndev->reg_state >= NETREG_UNREGISTERING)
340 		return;
341 
342 	in6_dev = in6_dev_get(ndev);
343 	if (!in6_dev)
344 		return;
345 
346 	read_lock_bh(&in6_dev->lock);
347 	list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
348 		struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
349 
350 		if (!entry)
351 			continue;
352 
353 		entry->sin6.sin6_family = AF_INET6;
354 		entry->sin6.sin6_addr = ifp->addr;
355 		list_add_tail(&entry->list, &sin6_list);
356 	}
357 	read_unlock_bh(&in6_dev->lock);
358 
359 	in6_dev_put(in6_dev);
360 
361 	list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
362 		union ib_gid	gid;
363 
364 		rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
365 		update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
366 		list_del(&sin6_iter->list);
367 		kfree(sin6_iter);
368 	}
369 }
370 
371 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
372 			    struct net_device *ndev)
373 {
374 	enum_netdev_ipv4_ips(ib_dev, port, ndev);
375 	if (IS_ENABLED(CONFIG_IPV6))
376 		enum_netdev_ipv6_ips(ib_dev, port, ndev);
377 }
378 
379 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
380 			   struct net_device *rdma_ndev, void *cookie)
381 {
382 	enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
383 	_add_netdev_ips(ib_dev, port, cookie);
384 }
385 
386 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
387 			   struct net_device *rdma_ndev, void *cookie)
388 {
389 	ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
390 }
391 
392 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
393 				    u8 port,
394 				    struct net_device *rdma_ndev,
395 				    void *cookie)
396 {
397 	struct net *net;
398 	struct net_device *ndev;
399 
400 	/* Lock the rtnl to make sure the netdevs does not move under
401 	 * our feet
402 	 */
403 	rtnl_lock();
404 	for_each_net(net)
405 		for_each_netdev(net, ndev)
406 			if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
407 				add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
408 	rtnl_unlock();
409 }
410 
411 /* This function will rescan all of the network devices in the system
412  * and add their gids, as needed, to the relevant RoCE devices. */
413 int roce_rescan_device(struct ib_device *ib_dev)
414 {
415 	ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
416 			    enum_all_gids_of_dev_cb, NULL);
417 
418 	return 0;
419 }
420 
421 static void callback_for_addr_gid_device_scan(struct ib_device *device,
422 					      u8 port,
423 					      struct net_device *rdma_ndev,
424 					      void *cookie)
425 {
426 	struct update_gid_event_work *parsed = cookie;
427 
428 	return update_gid(parsed->gid_op, device,
429 			  port, &parsed->gid,
430 			  &parsed->gid_attr);
431 }
432 
433 struct upper_list {
434 	struct list_head list;
435 	struct net_device *upper;
436 };
437 
438 static int netdev_upper_walk(struct net_device *upper, void *data)
439 {
440 	struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
441 	struct list_head *upper_list = data;
442 
443 	if (!entry)
444 		return 0;
445 
446 	list_add_tail(&entry->list, upper_list);
447 	dev_hold(upper);
448 	entry->upper = upper;
449 
450 	return 0;
451 }
452 
453 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
454 				void *cookie,
455 				void (*handle_netdev)(struct ib_device *ib_dev,
456 						      u8 port,
457 						      struct net_device *ndev))
458 {
459 	struct net_device *ndev = cookie;
460 	struct upper_list *upper_iter;
461 	struct upper_list *upper_temp;
462 	LIST_HEAD(upper_list);
463 
464 	rcu_read_lock();
465 	netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
466 	rcu_read_unlock();
467 
468 	handle_netdev(ib_dev, port, ndev);
469 	list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
470 				 list) {
471 		handle_netdev(ib_dev, port, upper_iter->upper);
472 		dev_put(upper_iter->upper);
473 		list_del(&upper_iter->list);
474 		kfree(upper_iter);
475 	}
476 }
477 
478 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
479 				      struct net_device *event_ndev)
480 {
481 	ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
482 }
483 
484 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
485 				 struct net_device *rdma_ndev, void *cookie)
486 {
487 	handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
488 }
489 
490 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
491 				 struct net_device *rdma_ndev, void *cookie)
492 {
493 	handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
494 }
495 
496 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
497 					struct net_device *rdma_ndev,
498 					void *cookie)
499 {
500 	struct net_device *master_ndev;
501 
502 	rcu_read_lock();
503 	master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
504 	if (master_ndev)
505 		dev_hold(master_ndev);
506 	rcu_read_unlock();
507 
508 	if (master_ndev) {
509 		bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
510 						rdma_ndev);
511 		dev_put(master_ndev);
512 	}
513 }
514 
515 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
516 				   struct net_device *rdma_ndev, void *cookie)
517 {
518 	bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
519 }
520 
521 /* The following functions operate on all IB devices. netdevice_event and
522  * addr_event execute ib_enum_all_roce_netdevs through a work.
523  * ib_enum_all_roce_netdevs iterates through all IB devices.
524  */
525 
526 static void netdevice_event_work_handler(struct work_struct *_work)
527 {
528 	struct netdev_event_work *work =
529 		container_of(_work, struct netdev_event_work, work);
530 	unsigned int i;
531 
532 	for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
533 		ib_enum_all_roce_netdevs(work->cmds[i].filter,
534 					 work->cmds[i].filter_ndev,
535 					 work->cmds[i].cb,
536 					 work->cmds[i].ndev);
537 		dev_put(work->cmds[i].ndev);
538 		dev_put(work->cmds[i].filter_ndev);
539 	}
540 
541 	kfree(work);
542 }
543 
544 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
545 				struct net_device *ndev)
546 {
547 	unsigned int i;
548 	struct netdev_event_work *ndev_work =
549 		kmalloc(sizeof(*ndev_work), GFP_KERNEL);
550 
551 	if (!ndev_work)
552 		return NOTIFY_DONE;
553 
554 	memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
555 	for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
556 		if (!ndev_work->cmds[i].ndev)
557 			ndev_work->cmds[i].ndev = ndev;
558 		if (!ndev_work->cmds[i].filter_ndev)
559 			ndev_work->cmds[i].filter_ndev = ndev;
560 		dev_hold(ndev_work->cmds[i].ndev);
561 		dev_hold(ndev_work->cmds[i].filter_ndev);
562 	}
563 	INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
564 
565 	queue_work(gid_cache_wq, &ndev_work->work);
566 
567 	return NOTIFY_DONE;
568 }
569 
570 static const struct netdev_event_work_cmd add_cmd = {
571 	.cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
572 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
573 	.cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
574 
575 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
576 					struct netdev_event_work_cmd *cmds)
577 {
578 	static const struct netdev_event_work_cmd upper_ips_del_cmd = {
579 		.cb = del_netdev_upper_ips, .filter = upper_device_filter};
580 	static const struct netdev_event_work_cmd bonding_default_del_cmd = {
581 		.cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
582 
583 	if (changeupper_info->linking == false) {
584 		cmds[0] = upper_ips_del_cmd;
585 		cmds[0].ndev = changeupper_info->upper_dev;
586 		cmds[1] = add_cmd;
587 	} else {
588 		cmds[0] = bonding_default_del_cmd;
589 		cmds[0].ndev = changeupper_info->upper_dev;
590 		cmds[1] = add_cmd_upper_ips;
591 		cmds[1].ndev = changeupper_info->upper_dev;
592 		cmds[1].filter_ndev = changeupper_info->upper_dev;
593 	}
594 }
595 
596 static int netdevice_event(struct notifier_block *this, unsigned long event,
597 			   void *ptr)
598 {
599 	static const struct netdev_event_work_cmd del_cmd = {
600 		.cb = del_netdev_ips, .filter = pass_all_filter};
601 	static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
602 		.cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
603 	static const struct netdev_event_work_cmd default_del_cmd = {
604 		.cb = del_netdev_default_ips, .filter = pass_all_filter};
605 	static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
606 		.cb = del_netdev_upper_ips, .filter = upper_device_filter};
607 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
608 	struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
609 
610 	if (ndev->type != ARPHRD_ETHER)
611 		return NOTIFY_DONE;
612 
613 	switch (event) {
614 	case NETDEV_REGISTER:
615 	case NETDEV_UP:
616 		cmds[0] = bonding_default_del_cmd_join;
617 		cmds[1] = add_cmd;
618 		break;
619 
620 	case NETDEV_UNREGISTER:
621 		if (ndev->reg_state < NETREG_UNREGISTERED)
622 			cmds[0] = del_cmd;
623 		else
624 			return NOTIFY_DONE;
625 		break;
626 
627 	case NETDEV_CHANGEADDR:
628 		cmds[0] = default_del_cmd;
629 		cmds[1] = add_cmd;
630 		break;
631 
632 	case NETDEV_CHANGEUPPER:
633 		netdevice_event_changeupper(
634 			container_of(ptr, struct netdev_notifier_changeupper_info, info),
635 			cmds);
636 		break;
637 
638 	case NETDEV_BONDING_FAILOVER:
639 		cmds[0] = bonding_event_ips_del_cmd;
640 		cmds[1] = bonding_default_del_cmd_join;
641 		cmds[2] = add_cmd_upper_ips;
642 		break;
643 
644 	default:
645 		return NOTIFY_DONE;
646 	}
647 
648 	return netdevice_queue_work(cmds, ndev);
649 }
650 
651 static void update_gid_event_work_handler(struct work_struct *_work)
652 {
653 	struct update_gid_event_work *work =
654 		container_of(_work, struct update_gid_event_work, work);
655 
656 	ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
657 				 callback_for_addr_gid_device_scan, work);
658 
659 	dev_put(work->gid_attr.ndev);
660 	kfree(work);
661 }
662 
663 static int addr_event(struct notifier_block *this, unsigned long event,
664 		      struct sockaddr *sa, struct net_device *ndev)
665 {
666 	struct update_gid_event_work *work;
667 	enum gid_op_type gid_op;
668 
669 	if (ndev->type != ARPHRD_ETHER)
670 		return NOTIFY_DONE;
671 
672 	switch (event) {
673 	case NETDEV_UP:
674 		gid_op = GID_ADD;
675 		break;
676 
677 	case NETDEV_DOWN:
678 		gid_op = GID_DEL;
679 		break;
680 
681 	default:
682 		return NOTIFY_DONE;
683 	}
684 
685 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
686 	if (!work)
687 		return NOTIFY_DONE;
688 
689 	INIT_WORK(&work->work, update_gid_event_work_handler);
690 
691 	rdma_ip2gid(sa, &work->gid);
692 	work->gid_op = gid_op;
693 
694 	memset(&work->gid_attr, 0, sizeof(work->gid_attr));
695 	dev_hold(ndev);
696 	work->gid_attr.ndev   = ndev;
697 
698 	queue_work(gid_cache_wq, &work->work);
699 
700 	return NOTIFY_DONE;
701 }
702 
703 static int inetaddr_event(struct notifier_block *this, unsigned long event,
704 			  void *ptr)
705 {
706 	struct sockaddr_in	in;
707 	struct net_device	*ndev;
708 	struct in_ifaddr	*ifa = ptr;
709 
710 	in.sin_family = AF_INET;
711 	in.sin_addr.s_addr = ifa->ifa_address;
712 	ndev = ifa->ifa_dev->dev;
713 
714 	return addr_event(this, event, (struct sockaddr *)&in, ndev);
715 }
716 
717 static int inet6addr_event(struct notifier_block *this, unsigned long event,
718 			   void *ptr)
719 {
720 	struct sockaddr_in6	in6;
721 	struct net_device	*ndev;
722 	struct inet6_ifaddr	*ifa6 = ptr;
723 
724 	in6.sin6_family = AF_INET6;
725 	in6.sin6_addr = ifa6->addr;
726 	ndev = ifa6->idev->dev;
727 
728 	return addr_event(this, event, (struct sockaddr *)&in6, ndev);
729 }
730 
731 static struct notifier_block nb_netdevice = {
732 	.notifier_call = netdevice_event
733 };
734 
735 static struct notifier_block nb_inetaddr = {
736 	.notifier_call = inetaddr_event
737 };
738 
739 static struct notifier_block nb_inet6addr = {
740 	.notifier_call = inet6addr_event
741 };
742 
743 int __init roce_gid_mgmt_init(void)
744 {
745 	gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
746 	if (!gid_cache_wq)
747 		return -ENOMEM;
748 
749 	register_inetaddr_notifier(&nb_inetaddr);
750 	if (IS_ENABLED(CONFIG_IPV6))
751 		register_inet6addr_notifier(&nb_inet6addr);
752 	/* We relay on the netdevice notifier to enumerate all
753 	 * existing devices in the system. Register to this notifier
754 	 * last to make sure we will not miss any IP add/del
755 	 * callbacks.
756 	 */
757 	register_netdevice_notifier(&nb_netdevice);
758 
759 	return 0;
760 }
761 
762 void __exit roce_gid_mgmt_cleanup(void)
763 {
764 	if (IS_ENABLED(CONFIG_IPV6))
765 		unregister_inet6addr_notifier(&nb_inet6addr);
766 	unregister_inetaddr_notifier(&nb_inetaddr);
767 	unregister_netdevice_notifier(&nb_netdevice);
768 	/* Ensure all gid deletion tasks complete before we go down,
769 	 * to avoid any reference to free'd memory. By the time
770 	 * ib-core is removed, all physical devices have been removed,
771 	 * so no issue with remaining hardware contexts.
772 	 */
773 	destroy_workqueue(gid_cache_wq);
774 }
775