1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "ipoib.h"
36 
37 #include <linux/module.h>
38 
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43 
44 #include <linux/if_arp.h>	/* For ARPHRD_xxx */
45 
46 #include <linux/ip.h>
47 #include <linux/in.h>
48 
49 #include <linux/jhash.h>
50 #include <net/arp.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
54 
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
57 MODULE_LICENSE("Dual BSD/GPL");
58 
59 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
60 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
61 
62 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
63 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
64 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
65 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
66 
67 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
68 int ipoib_debug_level;
69 
70 module_param_named(debug_level, ipoib_debug_level, int, 0644);
71 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
72 #endif
73 
74 struct ipoib_path_iter {
75 	struct net_device *dev;
76 	struct ipoib_path  path;
77 };
78 
79 static const u8 ipv4_bcast_addr[] = {
80 	0x00, 0xff, 0xff, 0xff,
81 	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
82 	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
83 };
84 
85 struct workqueue_struct *ipoib_workqueue;
86 
87 struct ib_sa_client ipoib_sa_client;
88 
89 static void ipoib_add_one(struct ib_device *device);
90 static void ipoib_remove_one(struct ib_device *device, void *client_data);
91 static void ipoib_neigh_reclaim(struct rcu_head *rp);
92 static struct net_device *ipoib_get_net_dev_by_params(
93 		struct ib_device *dev, u8 port, u16 pkey,
94 		const union ib_gid *gid, const struct sockaddr *addr,
95 		void *client_data);
96 static int ipoib_set_mac(struct net_device *dev, void *addr);
97 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
98 		       int cmd);
99 
100 static struct ib_client ipoib_client = {
101 	.name   = "ipoib",
102 	.add    = ipoib_add_one,
103 	.remove = ipoib_remove_one,
104 	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
105 };
106 
107 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
108 static int ipoib_netdev_event(struct notifier_block *this,
109 			      unsigned long event, void *ptr)
110 {
111 	struct netdev_notifier_info *ni = ptr;
112 	struct net_device *dev = ni->dev;
113 
114 	if (dev->netdev_ops->ndo_open != ipoib_open)
115 		return NOTIFY_DONE;
116 
117 	switch (event) {
118 	case NETDEV_REGISTER:
119 		ipoib_create_debug_files(dev);
120 		break;
121 	case NETDEV_CHANGENAME:
122 		ipoib_delete_debug_files(dev);
123 		ipoib_create_debug_files(dev);
124 		break;
125 	case NETDEV_UNREGISTER:
126 		ipoib_delete_debug_files(dev);
127 		break;
128 	}
129 
130 	return NOTIFY_DONE;
131 }
132 #endif
133 
134 int ipoib_open(struct net_device *dev)
135 {
136 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
137 
138 	ipoib_dbg(priv, "bringing up interface\n");
139 
140 	netif_carrier_off(dev);
141 
142 	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
143 
144 	priv->sm_fullmember_sendonly_support = false;
145 
146 	if (ipoib_ib_dev_open(dev)) {
147 		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
148 			return 0;
149 		goto err_disable;
150 	}
151 
152 	ipoib_ib_dev_up(dev);
153 
154 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
155 		struct ipoib_dev_priv *cpriv;
156 
157 		/* Bring up any child interfaces too */
158 		down_read(&priv->vlan_rwsem);
159 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
160 			int flags;
161 
162 			flags = cpriv->dev->flags;
163 			if (flags & IFF_UP)
164 				continue;
165 
166 			dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
167 		}
168 		up_read(&priv->vlan_rwsem);
169 	}
170 
171 	netif_start_queue(dev);
172 
173 	return 0;
174 
175 err_disable:
176 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
177 
178 	return -EINVAL;
179 }
180 
181 static int ipoib_stop(struct net_device *dev)
182 {
183 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
184 
185 	ipoib_dbg(priv, "stopping interface\n");
186 
187 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
188 
189 	netif_stop_queue(dev);
190 
191 	ipoib_ib_dev_down(dev);
192 	ipoib_ib_dev_stop(dev);
193 
194 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
195 		struct ipoib_dev_priv *cpriv;
196 
197 		/* Bring down any child interfaces too */
198 		down_read(&priv->vlan_rwsem);
199 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
200 			int flags;
201 
202 			flags = cpriv->dev->flags;
203 			if (!(flags & IFF_UP))
204 				continue;
205 
206 			dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
207 		}
208 		up_read(&priv->vlan_rwsem);
209 	}
210 
211 	return 0;
212 }
213 
214 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
215 {
216 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
217 
218 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
219 		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
220 
221 	return features;
222 }
223 
224 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
225 {
226 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
227 	int ret = 0;
228 
229 	/* dev->mtu > 2K ==> connected mode */
230 	if (ipoib_cm_admin_enabled(dev)) {
231 		if (new_mtu > ipoib_cm_max_mtu(dev))
232 			return -EINVAL;
233 
234 		if (new_mtu > priv->mcast_mtu)
235 			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
236 				   priv->mcast_mtu);
237 
238 		dev->mtu = new_mtu;
239 		return 0;
240 	}
241 
242 	if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
243 	    new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
244 		return -EINVAL;
245 
246 	priv->admin_mtu = new_mtu;
247 
248 	if (priv->mcast_mtu < priv->admin_mtu)
249 		ipoib_dbg(priv, "MTU must be smaller than the underlying "
250 				"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
251 
252 	new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
253 
254 	if (priv->rn_ops->ndo_change_mtu) {
255 		bool carrier_status = netif_carrier_ok(dev);
256 
257 		netif_carrier_off(dev);
258 
259 		/* notify lower level on the real mtu */
260 		ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
261 
262 		if (carrier_status)
263 			netif_carrier_on(dev);
264 	} else {
265 		dev->mtu = new_mtu;
266 	}
267 
268 	return ret;
269 }
270 
271 static void ipoib_get_stats(struct net_device *dev,
272 			    struct rtnl_link_stats64 *stats)
273 {
274 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
275 
276 	if (priv->rn_ops->ndo_get_stats64)
277 		priv->rn_ops->ndo_get_stats64(dev, stats);
278 	else
279 		netdev_stats_to_stats64(stats, &dev->stats);
280 }
281 
282 /* Called with an RCU read lock taken */
283 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
284 					struct net_device *dev)
285 {
286 	struct net *net = dev_net(dev);
287 	struct in_device *in_dev;
288 	struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
289 	struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
290 	__be32 ret_addr;
291 
292 	switch (addr->sa_family) {
293 	case AF_INET:
294 		in_dev = in_dev_get(dev);
295 		if (!in_dev)
296 			return false;
297 
298 		ret_addr = inet_confirm_addr(net, in_dev, 0,
299 					     addr_in->sin_addr.s_addr,
300 					     RT_SCOPE_HOST);
301 		in_dev_put(in_dev);
302 		if (ret_addr)
303 			return true;
304 
305 		break;
306 	case AF_INET6:
307 		if (IS_ENABLED(CONFIG_IPV6) &&
308 		    ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
309 			return true;
310 
311 		break;
312 	}
313 	return false;
314 }
315 
316 /**
317  * Find the master net_device on top of the given net_device.
318  * @dev: base IPoIB net_device
319  *
320  * Returns the master net_device with a reference held, or the same net_device
321  * if no master exists.
322  */
323 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
324 {
325 	struct net_device *master;
326 
327 	rcu_read_lock();
328 	master = netdev_master_upper_dev_get_rcu(dev);
329 	if (master)
330 		dev_hold(master);
331 	rcu_read_unlock();
332 
333 	if (master)
334 		return master;
335 
336 	dev_hold(dev);
337 	return dev;
338 }
339 
340 struct ipoib_walk_data {
341 	const struct sockaddr *addr;
342 	struct net_device *result;
343 };
344 
345 static int ipoib_upper_walk(struct net_device *upper, void *_data)
346 {
347 	struct ipoib_walk_data *data = _data;
348 	int ret = 0;
349 
350 	if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
351 		dev_hold(upper);
352 		data->result = upper;
353 		ret = 1;
354 	}
355 
356 	return ret;
357 }
358 
359 /**
360  * Find a net_device matching the given address, which is an upper device of
361  * the given net_device.
362  * @addr: IP address to look for.
363  * @dev: base IPoIB net_device
364  *
365  * If found, returns the net_device with a reference held. Otherwise return
366  * NULL.
367  */
368 static struct net_device *ipoib_get_net_dev_match_addr(
369 		const struct sockaddr *addr, struct net_device *dev)
370 {
371 	struct ipoib_walk_data data = {
372 		.addr = addr,
373 	};
374 
375 	rcu_read_lock();
376 	if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
377 		dev_hold(dev);
378 		data.result = dev;
379 		goto out;
380 	}
381 
382 	netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
383 out:
384 	rcu_read_unlock();
385 	return data.result;
386 }
387 
388 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
389  * pkey_index and address, if one exists.
390  *
391  * @found_net_dev: contains a matching net_device if the return value >= 1,
392  * with a reference held. */
393 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
394 				     const union ib_gid *gid,
395 				     u16 pkey_index,
396 				     const struct sockaddr *addr,
397 				     int nesting,
398 				     struct net_device **found_net_dev)
399 {
400 	struct ipoib_dev_priv *child_priv;
401 	struct net_device *net_dev = NULL;
402 	int matches = 0;
403 
404 	if (priv->pkey_index == pkey_index &&
405 	    (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
406 		if (!addr) {
407 			net_dev = ipoib_get_master_net_dev(priv->dev);
408 		} else {
409 			/* Verify the net_device matches the IP address, as
410 			 * IPoIB child devices currently share a GID. */
411 			net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
412 		}
413 		if (net_dev) {
414 			if (!*found_net_dev)
415 				*found_net_dev = net_dev;
416 			else
417 				dev_put(net_dev);
418 			++matches;
419 		}
420 	}
421 
422 	/* Check child interfaces */
423 	down_read_nested(&priv->vlan_rwsem, nesting);
424 	list_for_each_entry(child_priv, &priv->child_intfs, list) {
425 		matches += ipoib_match_gid_pkey_addr(child_priv, gid,
426 						    pkey_index, addr,
427 						    nesting + 1,
428 						    found_net_dev);
429 		if (matches > 1)
430 			break;
431 	}
432 	up_read(&priv->vlan_rwsem);
433 
434 	return matches;
435 }
436 
437 /* Returns the number of matching net_devs found (between 0 and 2). Also
438  * return the matching net_device in the @net_dev parameter, holding a
439  * reference to the net_device, if the number of matches >= 1 */
440 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
441 					 u16 pkey_index,
442 					 const union ib_gid *gid,
443 					 const struct sockaddr *addr,
444 					 struct net_device **net_dev)
445 {
446 	struct ipoib_dev_priv *priv;
447 	int matches = 0;
448 
449 	*net_dev = NULL;
450 
451 	list_for_each_entry(priv, dev_list, list) {
452 		if (priv->port != port)
453 			continue;
454 
455 		matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
456 						     addr, 0, net_dev);
457 		if (matches > 1)
458 			break;
459 	}
460 
461 	return matches;
462 }
463 
464 static struct net_device *ipoib_get_net_dev_by_params(
465 		struct ib_device *dev, u8 port, u16 pkey,
466 		const union ib_gid *gid, const struct sockaddr *addr,
467 		void *client_data)
468 {
469 	struct net_device *net_dev;
470 	struct list_head *dev_list = client_data;
471 	u16 pkey_index;
472 	int matches;
473 	int ret;
474 
475 	if (!rdma_protocol_ib(dev, port))
476 		return NULL;
477 
478 	ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
479 	if (ret)
480 		return NULL;
481 
482 	if (!dev_list)
483 		return NULL;
484 
485 	/* See if we can find a unique device matching the L2 parameters */
486 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
487 						gid, NULL, &net_dev);
488 
489 	switch (matches) {
490 	case 0:
491 		return NULL;
492 	case 1:
493 		return net_dev;
494 	}
495 
496 	dev_put(net_dev);
497 
498 	/* Couldn't find a unique device with L2 parameters only. Use L3
499 	 * address to uniquely match the net device */
500 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
501 						gid, addr, &net_dev);
502 	switch (matches) {
503 	case 0:
504 		return NULL;
505 	default:
506 		dev_warn_ratelimited(&dev->dev,
507 				     "duplicate IP address detected\n");
508 		/* Fall through */
509 	case 1:
510 		return net_dev;
511 	}
512 }
513 
514 int ipoib_set_mode(struct net_device *dev, const char *buf)
515 {
516 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
517 
518 	if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
519 	     !strcmp(buf, "connected\n")) ||
520 	     (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
521 	     !strcmp(buf, "datagram\n"))) {
522 		return 0;
523 	}
524 
525 	/* flush paths if we switch modes so that connections are restarted */
526 	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
527 		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
528 		ipoib_warn(priv, "enabling connected mode "
529 			   "will cause multicast packet drops\n");
530 		netdev_update_features(dev);
531 		dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
532 		rtnl_unlock();
533 		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
534 
535 		ipoib_flush_paths(dev);
536 		return (!rtnl_trylock()) ? -EBUSY : 0;
537 	}
538 
539 	if (!strcmp(buf, "datagram\n")) {
540 		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
541 		netdev_update_features(dev);
542 		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
543 		rtnl_unlock();
544 		ipoib_flush_paths(dev);
545 		return (!rtnl_trylock()) ? -EBUSY : 0;
546 	}
547 
548 	return -EINVAL;
549 }
550 
551 struct ipoib_path *__path_find(struct net_device *dev, void *gid)
552 {
553 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
554 	struct rb_node *n = priv->path_tree.rb_node;
555 	struct ipoib_path *path;
556 	int ret;
557 
558 	while (n) {
559 		path = rb_entry(n, struct ipoib_path, rb_node);
560 
561 		ret = memcmp(gid, path->pathrec.dgid.raw,
562 			     sizeof (union ib_gid));
563 
564 		if (ret < 0)
565 			n = n->rb_left;
566 		else if (ret > 0)
567 			n = n->rb_right;
568 		else
569 			return path;
570 	}
571 
572 	return NULL;
573 }
574 
575 static int __path_add(struct net_device *dev, struct ipoib_path *path)
576 {
577 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
578 	struct rb_node **n = &priv->path_tree.rb_node;
579 	struct rb_node *pn = NULL;
580 	struct ipoib_path *tpath;
581 	int ret;
582 
583 	while (*n) {
584 		pn = *n;
585 		tpath = rb_entry(pn, struct ipoib_path, rb_node);
586 
587 		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
588 			     sizeof (union ib_gid));
589 		if (ret < 0)
590 			n = &pn->rb_left;
591 		else if (ret > 0)
592 			n = &pn->rb_right;
593 		else
594 			return -EEXIST;
595 	}
596 
597 	rb_link_node(&path->rb_node, pn, n);
598 	rb_insert_color(&path->rb_node, &priv->path_tree);
599 
600 	list_add_tail(&path->list, &priv->path_list);
601 
602 	return 0;
603 }
604 
605 static void path_free(struct net_device *dev, struct ipoib_path *path)
606 {
607 	struct sk_buff *skb;
608 
609 	while ((skb = __skb_dequeue(&path->queue)))
610 		dev_kfree_skb_irq(skb);
611 
612 	ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
613 
614 	/* remove all neigh connected to this path */
615 	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
616 
617 	if (path->ah)
618 		ipoib_put_ah(path->ah);
619 
620 	kfree(path);
621 }
622 
623 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
624 
625 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
626 {
627 	struct ipoib_path_iter *iter;
628 
629 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
630 	if (!iter)
631 		return NULL;
632 
633 	iter->dev = dev;
634 	memset(iter->path.pathrec.dgid.raw, 0, 16);
635 
636 	if (ipoib_path_iter_next(iter)) {
637 		kfree(iter);
638 		return NULL;
639 	}
640 
641 	return iter;
642 }
643 
644 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
645 {
646 	struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
647 	struct rb_node *n;
648 	struct ipoib_path *path;
649 	int ret = 1;
650 
651 	spin_lock_irq(&priv->lock);
652 
653 	n = rb_first(&priv->path_tree);
654 
655 	while (n) {
656 		path = rb_entry(n, struct ipoib_path, rb_node);
657 
658 		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
659 			   sizeof (union ib_gid)) < 0) {
660 			iter->path = *path;
661 			ret = 0;
662 			break;
663 		}
664 
665 		n = rb_next(n);
666 	}
667 
668 	spin_unlock_irq(&priv->lock);
669 
670 	return ret;
671 }
672 
673 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
674 			  struct ipoib_path *path)
675 {
676 	*path = iter->path;
677 }
678 
679 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
680 
681 void ipoib_mark_paths_invalid(struct net_device *dev)
682 {
683 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
684 	struct ipoib_path *path, *tp;
685 
686 	spin_lock_irq(&priv->lock);
687 
688 	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
689 		ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
690 			  be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
691 			  path->pathrec.dgid.raw);
692 		if (path->ah)
693 			path->ah->valid = 0;
694 	}
695 
696 	spin_unlock_irq(&priv->lock);
697 }
698 
699 static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
700 {
701 	struct ipoib_pseudo_header *phdr;
702 
703 	phdr = skb_push(skb, sizeof(*phdr));
704 	memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
705 }
706 
707 void ipoib_flush_paths(struct net_device *dev)
708 {
709 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
710 	struct ipoib_path *path, *tp;
711 	LIST_HEAD(remove_list);
712 	unsigned long flags;
713 
714 	netif_tx_lock_bh(dev);
715 	spin_lock_irqsave(&priv->lock, flags);
716 
717 	list_splice_init(&priv->path_list, &remove_list);
718 
719 	list_for_each_entry(path, &remove_list, list)
720 		rb_erase(&path->rb_node, &priv->path_tree);
721 
722 	list_for_each_entry_safe(path, tp, &remove_list, list) {
723 		if (path->query)
724 			ib_sa_cancel_query(path->query_id, path->query);
725 		spin_unlock_irqrestore(&priv->lock, flags);
726 		netif_tx_unlock_bh(dev);
727 		wait_for_completion(&path->done);
728 		path_free(dev, path);
729 		netif_tx_lock_bh(dev);
730 		spin_lock_irqsave(&priv->lock, flags);
731 	}
732 
733 	spin_unlock_irqrestore(&priv->lock, flags);
734 	netif_tx_unlock_bh(dev);
735 }
736 
737 static void path_rec_completion(int status,
738 				struct sa_path_rec *pathrec,
739 				void *path_ptr)
740 {
741 	struct ipoib_path *path = path_ptr;
742 	struct net_device *dev = path->dev;
743 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
744 	struct ipoib_ah *ah = NULL;
745 	struct ipoib_ah *old_ah = NULL;
746 	struct ipoib_neigh *neigh, *tn;
747 	struct sk_buff_head skqueue;
748 	struct sk_buff *skb;
749 	unsigned long flags;
750 
751 	if (!status)
752 		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
753 			  be32_to_cpu(sa_path_get_dlid(pathrec)),
754 			  pathrec->dgid.raw);
755 	else
756 		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
757 			  status, path->pathrec.dgid.raw);
758 
759 	skb_queue_head_init(&skqueue);
760 
761 	if (!status) {
762 		struct rdma_ah_attr av;
763 
764 		if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
765 					       pathrec, &av, NULL)) {
766 			ah = ipoib_create_ah(dev, priv->pd, &av);
767 			rdma_destroy_ah_attr(&av);
768 		}
769 	}
770 
771 	spin_lock_irqsave(&priv->lock, flags);
772 
773 	if (!IS_ERR_OR_NULL(ah)) {
774 		/*
775 		 * pathrec.dgid is used as the database key from the LLADDR,
776 		 * it must remain unchanged even if the SA returns a different
777 		 * GID to use in the AH.
778 		 */
779 		if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
780 			   sizeof(union ib_gid))) {
781 			ipoib_dbg(
782 				priv,
783 				"%s got PathRec for gid %pI6 while asked for %pI6\n",
784 				dev->name, pathrec->dgid.raw,
785 				path->pathrec.dgid.raw);
786 			memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
787 			       sizeof(union ib_gid));
788 		}
789 
790 		path->pathrec = *pathrec;
791 
792 		old_ah   = path->ah;
793 		path->ah = ah;
794 
795 		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
796 			  ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
797 			  pathrec->sl);
798 
799 		while ((skb = __skb_dequeue(&path->queue)))
800 			__skb_queue_tail(&skqueue, skb);
801 
802 		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
803 			if (neigh->ah) {
804 				WARN_ON(neigh->ah != old_ah);
805 				/*
806 				 * Dropping the ah reference inside
807 				 * priv->lock is safe here, because we
808 				 * will hold one more reference from
809 				 * the original value of path->ah (ie
810 				 * old_ah).
811 				 */
812 				ipoib_put_ah(neigh->ah);
813 			}
814 			kref_get(&path->ah->ref);
815 			neigh->ah = path->ah;
816 
817 			if (ipoib_cm_enabled(dev, neigh->daddr)) {
818 				if (!ipoib_cm_get(neigh))
819 					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
820 									       path,
821 									       neigh));
822 				if (!ipoib_cm_get(neigh)) {
823 					ipoib_neigh_free(neigh);
824 					continue;
825 				}
826 			}
827 
828 			while ((skb = __skb_dequeue(&neigh->queue)))
829 				__skb_queue_tail(&skqueue, skb);
830 		}
831 		path->ah->valid = 1;
832 	}
833 
834 	path->query = NULL;
835 	complete(&path->done);
836 
837 	spin_unlock_irqrestore(&priv->lock, flags);
838 
839 	if (IS_ERR_OR_NULL(ah))
840 		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
841 
842 	if (old_ah)
843 		ipoib_put_ah(old_ah);
844 
845 	while ((skb = __skb_dequeue(&skqueue))) {
846 		int ret;
847 		skb->dev = dev;
848 		ret = dev_queue_xmit(skb);
849 		if (ret)
850 			ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
851 				   __func__, ret);
852 	}
853 }
854 
855 static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
856 			  void *gid)
857 {
858 	path->dev = priv->dev;
859 
860 	if (rdma_cap_opa_ah(priv->ca, priv->port))
861 		path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
862 	else
863 		path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
864 
865 	memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
866 	path->pathrec.sgid	    = priv->local_gid;
867 	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
868 	path->pathrec.numb_path     = 1;
869 	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
870 }
871 
872 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
873 {
874 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
875 	struct ipoib_path *path;
876 
877 	if (!priv->broadcast)
878 		return NULL;
879 
880 	path = kzalloc(sizeof(*path), GFP_ATOMIC);
881 	if (!path)
882 		return NULL;
883 
884 	skb_queue_head_init(&path->queue);
885 
886 	INIT_LIST_HEAD(&path->neigh_list);
887 
888 	init_path_rec(priv, path, gid);
889 
890 	return path;
891 }
892 
893 static int path_rec_start(struct net_device *dev,
894 			  struct ipoib_path *path)
895 {
896 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
897 
898 	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
899 		  path->pathrec.dgid.raw);
900 
901 	init_completion(&path->done);
902 
903 	path->query_id =
904 		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
905 				   &path->pathrec,
906 				   IB_SA_PATH_REC_DGID		|
907 				   IB_SA_PATH_REC_SGID		|
908 				   IB_SA_PATH_REC_NUMB_PATH	|
909 				   IB_SA_PATH_REC_TRAFFIC_CLASS |
910 				   IB_SA_PATH_REC_PKEY,
911 				   1000, GFP_ATOMIC,
912 				   path_rec_completion,
913 				   path, &path->query);
914 	if (path->query_id < 0) {
915 		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
916 		path->query = NULL;
917 		complete(&path->done);
918 		return path->query_id;
919 	}
920 
921 	return 0;
922 }
923 
924 static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
925 			       struct net_device *dev)
926 {
927 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
928 	struct ipoib_path *path;
929 	unsigned long flags;
930 
931 	spin_lock_irqsave(&priv->lock, flags);
932 
933 	path = __path_find(dev, daddr + 4);
934 	if (!path)
935 		goto out;
936 	if (!path->query)
937 		path_rec_start(dev, path);
938 out:
939 	spin_unlock_irqrestore(&priv->lock, flags);
940 }
941 
942 static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
943 					  struct net_device *dev)
944 {
945 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
946 	struct rdma_netdev *rn = netdev_priv(dev);
947 	struct ipoib_path *path;
948 	struct ipoib_neigh *neigh;
949 	unsigned long flags;
950 
951 	spin_lock_irqsave(&priv->lock, flags);
952 	neigh = ipoib_neigh_alloc(daddr, dev);
953 	if (!neigh) {
954 		spin_unlock_irqrestore(&priv->lock, flags);
955 		++dev->stats.tx_dropped;
956 		dev_kfree_skb_any(skb);
957 		return NULL;
958 	}
959 
960 	/* To avoid race condition, make sure that the
961 	 * neigh will be added only once.
962 	 */
963 	if (unlikely(!list_empty(&neigh->list))) {
964 		spin_unlock_irqrestore(&priv->lock, flags);
965 		return neigh;
966 	}
967 
968 	path = __path_find(dev, daddr + 4);
969 	if (!path) {
970 		path = path_rec_create(dev, daddr + 4);
971 		if (!path)
972 			goto err_path;
973 
974 		__path_add(dev, path);
975 	}
976 
977 	list_add_tail(&neigh->list, &path->neigh_list);
978 
979 	if (path->ah && path->ah->valid) {
980 		kref_get(&path->ah->ref);
981 		neigh->ah = path->ah;
982 
983 		if (ipoib_cm_enabled(dev, neigh->daddr)) {
984 			if (!ipoib_cm_get(neigh))
985 				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
986 			if (!ipoib_cm_get(neigh)) {
987 				ipoib_neigh_free(neigh);
988 				goto err_drop;
989 			}
990 			if (skb_queue_len(&neigh->queue) <
991 			    IPOIB_MAX_PATH_REC_QUEUE) {
992 				push_pseudo_header(skb, neigh->daddr);
993 				__skb_queue_tail(&neigh->queue, skb);
994 			} else {
995 				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
996 					   skb_queue_len(&neigh->queue));
997 				goto err_drop;
998 			}
999 		} else {
1000 			spin_unlock_irqrestore(&priv->lock, flags);
1001 			path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1002 						       IPOIB_QPN(daddr));
1003 			ipoib_neigh_put(neigh);
1004 			return NULL;
1005 		}
1006 	} else {
1007 		neigh->ah  = NULL;
1008 
1009 		if (!path->query && path_rec_start(dev, path))
1010 			goto err_path;
1011 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1012 			push_pseudo_header(skb, neigh->daddr);
1013 			__skb_queue_tail(&neigh->queue, skb);
1014 		} else {
1015 			goto err_drop;
1016 		}
1017 	}
1018 
1019 	spin_unlock_irqrestore(&priv->lock, flags);
1020 	ipoib_neigh_put(neigh);
1021 	return NULL;
1022 
1023 err_path:
1024 	ipoib_neigh_free(neigh);
1025 err_drop:
1026 	++dev->stats.tx_dropped;
1027 	dev_kfree_skb_any(skb);
1028 
1029 	spin_unlock_irqrestore(&priv->lock, flags);
1030 	ipoib_neigh_put(neigh);
1031 
1032 	return NULL;
1033 }
1034 
1035 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1036 			     struct ipoib_pseudo_header *phdr)
1037 {
1038 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1039 	struct rdma_netdev *rn = netdev_priv(dev);
1040 	struct ipoib_path *path;
1041 	unsigned long flags;
1042 
1043 	spin_lock_irqsave(&priv->lock, flags);
1044 
1045 	/* no broadcast means that all paths are (going to be) not valid */
1046 	if (!priv->broadcast)
1047 		goto drop_and_unlock;
1048 
1049 	path = __path_find(dev, phdr->hwaddr + 4);
1050 	if (!path || !path->ah || !path->ah->valid) {
1051 		if (!path) {
1052 			path = path_rec_create(dev, phdr->hwaddr + 4);
1053 			if (!path)
1054 				goto drop_and_unlock;
1055 			__path_add(dev, path);
1056 		} else {
1057 			/*
1058 			 * make sure there are no changes in the existing
1059 			 * path record
1060 			 */
1061 			init_path_rec(priv, path, phdr->hwaddr + 4);
1062 		}
1063 		if (!path->query && path_rec_start(dev, path)) {
1064 			goto drop_and_unlock;
1065 		}
1066 
1067 		if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1068 			push_pseudo_header(skb, phdr->hwaddr);
1069 			__skb_queue_tail(&path->queue, skb);
1070 			goto unlock;
1071 		} else {
1072 			goto drop_and_unlock;
1073 		}
1074 	}
1075 
1076 	spin_unlock_irqrestore(&priv->lock, flags);
1077 	ipoib_dbg(priv, "Send unicast ARP to %08x\n",
1078 		  be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
1079 	path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1080 				       IPOIB_QPN(phdr->hwaddr));
1081 	return;
1082 
1083 drop_and_unlock:
1084 	++dev->stats.tx_dropped;
1085 	dev_kfree_skb_any(skb);
1086 unlock:
1087 	spin_unlock_irqrestore(&priv->lock, flags);
1088 }
1089 
1090 static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1091 {
1092 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1093 	struct rdma_netdev *rn = netdev_priv(dev);
1094 	struct ipoib_neigh *neigh;
1095 	struct ipoib_pseudo_header *phdr;
1096 	struct ipoib_header *header;
1097 	unsigned long flags;
1098 
1099 	phdr = (struct ipoib_pseudo_header *) skb->data;
1100 	skb_pull(skb, sizeof(*phdr));
1101 	header = (struct ipoib_header *) skb->data;
1102 
1103 	if (unlikely(phdr->hwaddr[4] == 0xff)) {
1104 		/* multicast, arrange "if" according to probability */
1105 		if ((header->proto != htons(ETH_P_IP)) &&
1106 		    (header->proto != htons(ETH_P_IPV6)) &&
1107 		    (header->proto != htons(ETH_P_ARP)) &&
1108 		    (header->proto != htons(ETH_P_RARP)) &&
1109 		    (header->proto != htons(ETH_P_TIPC))) {
1110 			/* ethertype not supported by IPoIB */
1111 			++dev->stats.tx_dropped;
1112 			dev_kfree_skb_any(skb);
1113 			return NETDEV_TX_OK;
1114 		}
1115 		/* Add in the P_Key for multicast*/
1116 		phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1117 		phdr->hwaddr[9] = priv->pkey & 0xff;
1118 
1119 		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1120 		if (likely(neigh))
1121 			goto send_using_neigh;
1122 		ipoib_mcast_send(dev, phdr->hwaddr, skb);
1123 		return NETDEV_TX_OK;
1124 	}
1125 
1126 	/* unicast, arrange "switch" according to probability */
1127 	switch (header->proto) {
1128 	case htons(ETH_P_IP):
1129 	case htons(ETH_P_IPV6):
1130 	case htons(ETH_P_TIPC):
1131 		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1132 		if (unlikely(!neigh)) {
1133 			neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1134 			if (likely(!neigh))
1135 				return NETDEV_TX_OK;
1136 		}
1137 		break;
1138 	case htons(ETH_P_ARP):
1139 	case htons(ETH_P_RARP):
1140 		/* for unicast ARP and RARP should always perform path find */
1141 		unicast_arp_send(skb, dev, phdr);
1142 		return NETDEV_TX_OK;
1143 	default:
1144 		/* ethertype not supported by IPoIB */
1145 		++dev->stats.tx_dropped;
1146 		dev_kfree_skb_any(skb);
1147 		return NETDEV_TX_OK;
1148 	}
1149 
1150 send_using_neigh:
1151 	/* note we now hold a ref to neigh */
1152 	if (ipoib_cm_get(neigh)) {
1153 		if (ipoib_cm_up(neigh)) {
1154 			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1155 			goto unref;
1156 		}
1157 	} else if (neigh->ah && neigh->ah->valid) {
1158 		neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
1159 						IPOIB_QPN(phdr->hwaddr));
1160 		goto unref;
1161 	} else if (neigh->ah) {
1162 		neigh_refresh_path(neigh, phdr->hwaddr, dev);
1163 	}
1164 
1165 	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1166 		push_pseudo_header(skb, phdr->hwaddr);
1167 		spin_lock_irqsave(&priv->lock, flags);
1168 		__skb_queue_tail(&neigh->queue, skb);
1169 		spin_unlock_irqrestore(&priv->lock, flags);
1170 	} else {
1171 		++dev->stats.tx_dropped;
1172 		dev_kfree_skb_any(skb);
1173 	}
1174 
1175 unref:
1176 	ipoib_neigh_put(neigh);
1177 
1178 	return NETDEV_TX_OK;
1179 }
1180 
1181 static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
1182 {
1183 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1184 
1185 	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1186 		   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1187 	ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1188 		   netif_queue_stopped(dev),
1189 		   priv->tx_head, priv->tx_tail);
1190 	/* XXX reset QP, etc. */
1191 }
1192 
1193 static int ipoib_hard_header(struct sk_buff *skb,
1194 			     struct net_device *dev,
1195 			     unsigned short type,
1196 			     const void *daddr,
1197 			     const void *saddr,
1198 			     unsigned int len)
1199 {
1200 	struct ipoib_header *header;
1201 
1202 	header = skb_push(skb, sizeof(*header));
1203 
1204 	header->proto = htons(type);
1205 	header->reserved = 0;
1206 
1207 	/*
1208 	 * we don't rely on dst_entry structure,  always stuff the
1209 	 * destination address into skb hard header so we can figure out where
1210 	 * to send the packet later.
1211 	 */
1212 	push_pseudo_header(skb, daddr);
1213 
1214 	return IPOIB_HARD_LEN;
1215 }
1216 
1217 static void ipoib_set_mcast_list(struct net_device *dev)
1218 {
1219 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1220 
1221 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1222 		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1223 		return;
1224 	}
1225 
1226 	queue_work(priv->wq, &priv->restart_task);
1227 }
1228 
1229 static int ipoib_get_iflink(const struct net_device *dev)
1230 {
1231 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1232 
1233 	/* parent interface */
1234 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1235 		return dev->ifindex;
1236 
1237 	/* child/vlan interface */
1238 	return priv->parent->ifindex;
1239 }
1240 
1241 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1242 {
1243 	/*
1244 	 * Use only the address parts that contributes to spreading
1245 	 * The subnet prefix is not used as one can not connect to
1246 	 * same remote port (GUID) using the same remote QPN via two
1247 	 * different subnets.
1248 	 */
1249 	 /* qpn octets[1:4) & port GUID octets[12:20) */
1250 	u32 *d32 = (u32 *) daddr;
1251 	u32 hv;
1252 
1253 	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1254 	return hv & htbl->mask;
1255 }
1256 
1257 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1258 {
1259 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1260 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1261 	struct ipoib_neigh_hash *htbl;
1262 	struct ipoib_neigh *neigh = NULL;
1263 	u32 hash_val;
1264 
1265 	rcu_read_lock_bh();
1266 
1267 	htbl = rcu_dereference_bh(ntbl->htbl);
1268 
1269 	if (!htbl)
1270 		goto out_unlock;
1271 
1272 	hash_val = ipoib_addr_hash(htbl, daddr);
1273 	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1274 	     neigh != NULL;
1275 	     neigh = rcu_dereference_bh(neigh->hnext)) {
1276 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1277 			/* found, take one ref on behalf of the caller */
1278 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1279 				/* deleted */
1280 				neigh = NULL;
1281 				goto out_unlock;
1282 			}
1283 
1284 			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1285 				neigh->alive = jiffies;
1286 			goto out_unlock;
1287 		}
1288 	}
1289 
1290 out_unlock:
1291 	rcu_read_unlock_bh();
1292 	return neigh;
1293 }
1294 
1295 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1296 {
1297 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1298 	struct ipoib_neigh_hash *htbl;
1299 	unsigned long neigh_obsolete;
1300 	unsigned long dt;
1301 	unsigned long flags;
1302 	int i;
1303 	LIST_HEAD(remove_list);
1304 
1305 	spin_lock_irqsave(&priv->lock, flags);
1306 
1307 	htbl = rcu_dereference_protected(ntbl->htbl,
1308 					 lockdep_is_held(&priv->lock));
1309 
1310 	if (!htbl)
1311 		goto out_unlock;
1312 
1313 	/* neigh is obsolete if it was idle for two GC periods */
1314 	dt = 2 * arp_tbl.gc_interval;
1315 	neigh_obsolete = jiffies - dt;
1316 
1317 	for (i = 0; i < htbl->size; i++) {
1318 		struct ipoib_neigh *neigh;
1319 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1320 
1321 		while ((neigh = rcu_dereference_protected(*np,
1322 							  lockdep_is_held(&priv->lock))) != NULL) {
1323 			/* was the neigh idle for two GC periods */
1324 			if (time_after(neigh_obsolete, neigh->alive)) {
1325 
1326 				ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1327 
1328 				rcu_assign_pointer(*np,
1329 						   rcu_dereference_protected(neigh->hnext,
1330 									     lockdep_is_held(&priv->lock)));
1331 				/* remove from path/mc list */
1332 				list_del_init(&neigh->list);
1333 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1334 			} else {
1335 				np = &neigh->hnext;
1336 			}
1337 
1338 		}
1339 	}
1340 
1341 out_unlock:
1342 	spin_unlock_irqrestore(&priv->lock, flags);
1343 	ipoib_mcast_remove_list(&remove_list);
1344 }
1345 
1346 static void ipoib_reap_neigh(struct work_struct *work)
1347 {
1348 	struct ipoib_dev_priv *priv =
1349 		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1350 
1351 	__ipoib_reap_neigh(priv);
1352 
1353 	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1354 			   arp_tbl.gc_interval);
1355 }
1356 
1357 
1358 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1359 				      struct net_device *dev)
1360 {
1361 	struct ipoib_neigh *neigh;
1362 
1363 	neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
1364 	if (!neigh)
1365 		return NULL;
1366 
1367 	neigh->dev = dev;
1368 	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1369 	skb_queue_head_init(&neigh->queue);
1370 	INIT_LIST_HEAD(&neigh->list);
1371 	ipoib_cm_set(neigh, NULL);
1372 	/* one ref on behalf of the caller */
1373 	atomic_set(&neigh->refcnt, 1);
1374 
1375 	return neigh;
1376 }
1377 
1378 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1379 				      struct net_device *dev)
1380 {
1381 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1382 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1383 	struct ipoib_neigh_hash *htbl;
1384 	struct ipoib_neigh *neigh;
1385 	u32 hash_val;
1386 
1387 	htbl = rcu_dereference_protected(ntbl->htbl,
1388 					 lockdep_is_held(&priv->lock));
1389 	if (!htbl) {
1390 		neigh = NULL;
1391 		goto out_unlock;
1392 	}
1393 
1394 	/* need to add a new neigh, but maybe some other thread succeeded?
1395 	 * recalc hash, maybe hash resize took place so we do a search
1396 	 */
1397 	hash_val = ipoib_addr_hash(htbl, daddr);
1398 	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1399 					       lockdep_is_held(&priv->lock));
1400 	     neigh != NULL;
1401 	     neigh = rcu_dereference_protected(neigh->hnext,
1402 					       lockdep_is_held(&priv->lock))) {
1403 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1404 			/* found, take one ref on behalf of the caller */
1405 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1406 				/* deleted */
1407 				neigh = NULL;
1408 				break;
1409 			}
1410 			neigh->alive = jiffies;
1411 			goto out_unlock;
1412 		}
1413 	}
1414 
1415 	neigh = ipoib_neigh_ctor(daddr, dev);
1416 	if (!neigh)
1417 		goto out_unlock;
1418 
1419 	/* one ref on behalf of the hash table */
1420 	atomic_inc(&neigh->refcnt);
1421 	neigh->alive = jiffies;
1422 	/* put in hash */
1423 	rcu_assign_pointer(neigh->hnext,
1424 			   rcu_dereference_protected(htbl->buckets[hash_val],
1425 						     lockdep_is_held(&priv->lock)));
1426 	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1427 	atomic_inc(&ntbl->entries);
1428 
1429 out_unlock:
1430 
1431 	return neigh;
1432 }
1433 
1434 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1435 {
1436 	/* neigh reference count was dropprd to zero */
1437 	struct net_device *dev = neigh->dev;
1438 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1439 	struct sk_buff *skb;
1440 	if (neigh->ah)
1441 		ipoib_put_ah(neigh->ah);
1442 	while ((skb = __skb_dequeue(&neigh->queue))) {
1443 		++dev->stats.tx_dropped;
1444 		dev_kfree_skb_any(skb);
1445 	}
1446 	if (ipoib_cm_get(neigh))
1447 		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1448 	ipoib_dbg(ipoib_priv(dev),
1449 		  "neigh free for %06x %pI6\n",
1450 		  IPOIB_QPN(neigh->daddr),
1451 		  neigh->daddr + 4);
1452 	kfree(neigh);
1453 	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1454 		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1455 			complete(&priv->ntbl.flushed);
1456 	}
1457 }
1458 
1459 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1460 {
1461 	/* Called as a result of removal from hash table */
1462 	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1463 	/* note TX context may hold another ref */
1464 	ipoib_neigh_put(neigh);
1465 }
1466 
1467 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1468 {
1469 	struct net_device *dev = neigh->dev;
1470 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1471 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1472 	struct ipoib_neigh_hash *htbl;
1473 	struct ipoib_neigh __rcu **np;
1474 	struct ipoib_neigh *n;
1475 	u32 hash_val;
1476 
1477 	htbl = rcu_dereference_protected(ntbl->htbl,
1478 					lockdep_is_held(&priv->lock));
1479 	if (!htbl)
1480 		return;
1481 
1482 	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1483 	np = &htbl->buckets[hash_val];
1484 	for (n = rcu_dereference_protected(*np,
1485 					    lockdep_is_held(&priv->lock));
1486 	     n != NULL;
1487 	     n = rcu_dereference_protected(*np,
1488 					lockdep_is_held(&priv->lock))) {
1489 		if (n == neigh) {
1490 			/* found */
1491 			rcu_assign_pointer(*np,
1492 					   rcu_dereference_protected(neigh->hnext,
1493 								     lockdep_is_held(&priv->lock)));
1494 			/* remove from parent list */
1495 			list_del_init(&neigh->list);
1496 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1497 			return;
1498 		} else {
1499 			np = &n->hnext;
1500 		}
1501 	}
1502 }
1503 
1504 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1505 {
1506 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1507 	struct ipoib_neigh_hash *htbl;
1508 	struct ipoib_neigh __rcu **buckets;
1509 	u32 size;
1510 
1511 	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1512 	ntbl->htbl = NULL;
1513 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1514 	if (!htbl)
1515 		return -ENOMEM;
1516 	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1517 	buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
1518 	if (!buckets) {
1519 		kfree(htbl);
1520 		return -ENOMEM;
1521 	}
1522 	htbl->size = size;
1523 	htbl->mask = (size - 1);
1524 	htbl->buckets = buckets;
1525 	RCU_INIT_POINTER(ntbl->htbl, htbl);
1526 	htbl->ntbl = ntbl;
1527 	atomic_set(&ntbl->entries, 0);
1528 
1529 	/* start garbage collection */
1530 	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1531 			   arp_tbl.gc_interval);
1532 
1533 	return 0;
1534 }
1535 
1536 static void neigh_hash_free_rcu(struct rcu_head *head)
1537 {
1538 	struct ipoib_neigh_hash *htbl = container_of(head,
1539 						    struct ipoib_neigh_hash,
1540 						    rcu);
1541 	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1542 	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1543 
1544 	kvfree(buckets);
1545 	kfree(htbl);
1546 	complete(&ntbl->deleted);
1547 }
1548 
1549 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1550 {
1551 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1552 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1553 	struct ipoib_neigh_hash *htbl;
1554 	unsigned long flags;
1555 	int i;
1556 
1557 	/* remove all neigh connected to a given path or mcast */
1558 	spin_lock_irqsave(&priv->lock, flags);
1559 
1560 	htbl = rcu_dereference_protected(ntbl->htbl,
1561 					 lockdep_is_held(&priv->lock));
1562 
1563 	if (!htbl)
1564 		goto out_unlock;
1565 
1566 	for (i = 0; i < htbl->size; i++) {
1567 		struct ipoib_neigh *neigh;
1568 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1569 
1570 		while ((neigh = rcu_dereference_protected(*np,
1571 							  lockdep_is_held(&priv->lock))) != NULL) {
1572 			/* delete neighs belong to this parent */
1573 			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1574 				rcu_assign_pointer(*np,
1575 						   rcu_dereference_protected(neigh->hnext,
1576 									     lockdep_is_held(&priv->lock)));
1577 				/* remove from parent list */
1578 				list_del_init(&neigh->list);
1579 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1580 			} else {
1581 				np = &neigh->hnext;
1582 			}
1583 
1584 		}
1585 	}
1586 out_unlock:
1587 	spin_unlock_irqrestore(&priv->lock, flags);
1588 }
1589 
1590 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1591 {
1592 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1593 	struct ipoib_neigh_hash *htbl;
1594 	unsigned long flags;
1595 	int i, wait_flushed = 0;
1596 
1597 	init_completion(&priv->ntbl.flushed);
1598 	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1599 
1600 	spin_lock_irqsave(&priv->lock, flags);
1601 
1602 	htbl = rcu_dereference_protected(ntbl->htbl,
1603 					lockdep_is_held(&priv->lock));
1604 	if (!htbl)
1605 		goto out_unlock;
1606 
1607 	wait_flushed = atomic_read(&priv->ntbl.entries);
1608 	if (!wait_flushed)
1609 		goto free_htbl;
1610 
1611 	for (i = 0; i < htbl->size; i++) {
1612 		struct ipoib_neigh *neigh;
1613 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1614 
1615 		while ((neigh = rcu_dereference_protected(*np,
1616 				       lockdep_is_held(&priv->lock))) != NULL) {
1617 			rcu_assign_pointer(*np,
1618 					   rcu_dereference_protected(neigh->hnext,
1619 								     lockdep_is_held(&priv->lock)));
1620 			/* remove from path/mc list */
1621 			list_del_init(&neigh->list);
1622 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1623 		}
1624 	}
1625 
1626 free_htbl:
1627 	rcu_assign_pointer(ntbl->htbl, NULL);
1628 	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1629 
1630 out_unlock:
1631 	spin_unlock_irqrestore(&priv->lock, flags);
1632 	if (wait_flushed)
1633 		wait_for_completion(&priv->ntbl.flushed);
1634 }
1635 
1636 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1637 {
1638 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1639 
1640 	ipoib_dbg(priv, "%s\n", __func__);
1641 	init_completion(&priv->ntbl.deleted);
1642 
1643 	cancel_delayed_work_sync(&priv->neigh_reap_task);
1644 
1645 	ipoib_flush_neighs(priv);
1646 
1647 	wait_for_completion(&priv->ntbl.deleted);
1648 }
1649 
1650 static void ipoib_napi_add(struct net_device *dev)
1651 {
1652 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1653 
1654 	netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
1655 	netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
1656 }
1657 
1658 static void ipoib_napi_del(struct net_device *dev)
1659 {
1660 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1661 
1662 	netif_napi_del(&priv->recv_napi);
1663 	netif_napi_del(&priv->send_napi);
1664 }
1665 
1666 static void ipoib_dev_uninit_default(struct net_device *dev)
1667 {
1668 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1669 
1670 	ipoib_transport_dev_cleanup(dev);
1671 
1672 	ipoib_napi_del(dev);
1673 
1674 	ipoib_cm_dev_cleanup(dev);
1675 
1676 	kfree(priv->rx_ring);
1677 	vfree(priv->tx_ring);
1678 
1679 	priv->rx_ring = NULL;
1680 	priv->tx_ring = NULL;
1681 }
1682 
1683 static int ipoib_dev_init_default(struct net_device *dev)
1684 {
1685 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1686 
1687 	ipoib_napi_add(dev);
1688 
1689 	/* Allocate RX/TX "rings" to hold queued skbs */
1690 	priv->rx_ring =	kcalloc(ipoib_recvq_size,
1691 				       sizeof(*priv->rx_ring),
1692 				       GFP_KERNEL);
1693 	if (!priv->rx_ring)
1694 		goto out;
1695 
1696 	priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
1697 					   sizeof(*priv->tx_ring)));
1698 	if (!priv->tx_ring) {
1699 		pr_warn("%s: failed to allocate TX ring (%d entries)\n",
1700 			priv->ca->name, ipoib_sendq_size);
1701 		goto out_rx_ring_cleanup;
1702 	}
1703 
1704 	/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1705 
1706 	if (ipoib_transport_dev_init(dev, priv->ca)) {
1707 		pr_warn("%s: ipoib_transport_dev_init failed\n",
1708 			priv->ca->name);
1709 		goto out_tx_ring_cleanup;
1710 	}
1711 
1712 	/* after qp created set dev address */
1713 	priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
1714 	priv->dev->dev_addr[2] = (priv->qp->qp_num >>  8) & 0xff;
1715 	priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
1716 
1717 	return 0;
1718 
1719 out_tx_ring_cleanup:
1720 	vfree(priv->tx_ring);
1721 
1722 out_rx_ring_cleanup:
1723 	kfree(priv->rx_ring);
1724 
1725 out:
1726 	ipoib_napi_del(dev);
1727 	return -ENOMEM;
1728 }
1729 
1730 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
1731 		       int cmd)
1732 {
1733 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1734 
1735 	if (!priv->rn_ops->ndo_do_ioctl)
1736 		return -EOPNOTSUPP;
1737 
1738 	return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
1739 }
1740 
1741 static int ipoib_dev_init(struct net_device *dev)
1742 {
1743 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1744 	int ret = -ENOMEM;
1745 
1746 	priv->qp = NULL;
1747 
1748 	/*
1749 	 * the various IPoIB tasks assume they will never race against
1750 	 * themselves, so always use a single thread workqueue
1751 	 */
1752 	priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
1753 	if (!priv->wq) {
1754 		pr_warn("%s: failed to allocate device WQ\n", dev->name);
1755 		goto out;
1756 	}
1757 
1758 	/* create pd, which used both for control and datapath*/
1759 	priv->pd = ib_alloc_pd(priv->ca, 0);
1760 	if (IS_ERR(priv->pd)) {
1761 		pr_warn("%s: failed to allocate PD\n", priv->ca->name);
1762 		goto clean_wq;
1763 	}
1764 
1765 	ret = priv->rn_ops->ndo_init(dev);
1766 	if (ret) {
1767 		pr_warn("%s failed to init HW resource\n", dev->name);
1768 		goto out_free_pd;
1769 	}
1770 
1771 	ret = ipoib_neigh_hash_init(priv);
1772 	if (ret) {
1773 		pr_warn("%s failed to init neigh hash\n", dev->name);
1774 		goto out_dev_uninit;
1775 	}
1776 
1777 	if (dev->flags & IFF_UP) {
1778 		if (ipoib_ib_dev_open(dev)) {
1779 			pr_warn("%s failed to open device\n", dev->name);
1780 			ret = -ENODEV;
1781 			goto out_hash_uninit;
1782 		}
1783 	}
1784 
1785 	return 0;
1786 
1787 out_hash_uninit:
1788 	ipoib_neigh_hash_uninit(dev);
1789 
1790 out_dev_uninit:
1791 	ipoib_ib_dev_cleanup(dev);
1792 
1793 out_free_pd:
1794 	if (priv->pd) {
1795 		ib_dealloc_pd(priv->pd);
1796 		priv->pd = NULL;
1797 	}
1798 
1799 clean_wq:
1800 	if (priv->wq) {
1801 		destroy_workqueue(priv->wq);
1802 		priv->wq = NULL;
1803 	}
1804 
1805 out:
1806 	return ret;
1807 }
1808 
1809 /*
1810  * This must be called before doing an unregister_netdev on a parent device to
1811  * shutdown the IB event handler.
1812  */
1813 static void ipoib_parent_unregister_pre(struct net_device *ndev)
1814 {
1815 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1816 
1817 	/*
1818 	 * ipoib_set_mac checks netif_running before pushing work, clearing
1819 	 * running ensures the it will not add more work.
1820 	 */
1821 	rtnl_lock();
1822 	dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
1823 	rtnl_unlock();
1824 
1825 	/* ipoib_event() cannot be running once this returns */
1826 	ib_unregister_event_handler(&priv->event_handler);
1827 
1828 	/*
1829 	 * Work on the queue grabs the rtnl lock, so this cannot be done while
1830 	 * also holding it.
1831 	 */
1832 	flush_workqueue(ipoib_workqueue);
1833 }
1834 
1835 static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
1836 {
1837 	priv->hca_caps = priv->ca->attrs.device_cap_flags;
1838 
1839 	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1840 		priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1841 
1842 		if (priv->hca_caps & IB_DEVICE_UD_TSO)
1843 			priv->dev->hw_features |= NETIF_F_TSO;
1844 
1845 		priv->dev->features |= priv->dev->hw_features;
1846 	}
1847 }
1848 
1849 static int ipoib_parent_init(struct net_device *ndev)
1850 {
1851 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1852 	struct ib_port_attr attr;
1853 	int result;
1854 
1855 	result = ib_query_port(priv->ca, priv->port, &attr);
1856 	if (result) {
1857 		pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
1858 			priv->port);
1859 		return result;
1860 	}
1861 	priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1862 
1863 	result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1864 	if (result) {
1865 		pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
1866 			priv->ca->name, priv->port, result);
1867 		return result;
1868 	}
1869 
1870 	result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
1871 	if (result) {
1872 		pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
1873 			priv->ca->name, priv->port, result);
1874 		return result;
1875 	}
1876 	memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
1877 	       sizeof(union ib_gid));
1878 
1879 	SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
1880 	priv->dev->dev_port = priv->port - 1;
1881 	/* Let's set this one too for backwards compatibility. */
1882 	priv->dev->dev_id = priv->port - 1;
1883 
1884 	return 0;
1885 }
1886 
1887 static void ipoib_child_init(struct net_device *ndev)
1888 {
1889 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1890 	struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1891 
1892 	priv->max_ib_mtu = ppriv->max_ib_mtu;
1893 	set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
1894 	memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
1895 	memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
1896 }
1897 
1898 static int ipoib_ndo_init(struct net_device *ndev)
1899 {
1900 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1901 	int rc;
1902 
1903 	if (priv->parent) {
1904 		ipoib_child_init(ndev);
1905 	} else {
1906 		rc = ipoib_parent_init(ndev);
1907 		if (rc)
1908 			return rc;
1909 	}
1910 
1911 	/* MTU will be reset when mcast join happens */
1912 	ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1913 	priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
1914 	ndev->max_mtu = IPOIB_CM_MTU;
1915 
1916 	ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
1917 
1918 	/*
1919 	 * Set the full membership bit, so that we join the right
1920 	 * broadcast group, etc.
1921 	 */
1922 	priv->pkey |= 0x8000;
1923 
1924 	ndev->broadcast[8] = priv->pkey >> 8;
1925 	ndev->broadcast[9] = priv->pkey & 0xff;
1926 	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1927 
1928 	ipoib_set_dev_features(priv);
1929 
1930 	rc = ipoib_dev_init(ndev);
1931 	if (rc) {
1932 		pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
1933 			priv->ca->name, priv->dev->name, priv->port, rc);
1934 		return rc;
1935 	}
1936 
1937 	if (priv->parent) {
1938 		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1939 
1940 		dev_hold(priv->parent);
1941 
1942 		down_write(&ppriv->vlan_rwsem);
1943 		list_add_tail(&priv->list, &ppriv->child_intfs);
1944 		up_write(&ppriv->vlan_rwsem);
1945 	}
1946 
1947 	return 0;
1948 }
1949 
1950 static void ipoib_ndo_uninit(struct net_device *dev)
1951 {
1952 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1953 
1954 	ASSERT_RTNL();
1955 
1956 	/*
1957 	 * ipoib_remove_one guarantees the children are removed before the
1958 	 * parent, and that is the only place where a parent can be removed.
1959 	 */
1960 	WARN_ON(!list_empty(&priv->child_intfs));
1961 
1962 	if (priv->parent) {
1963 		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1964 
1965 		down_write(&ppriv->vlan_rwsem);
1966 		list_del(&priv->list);
1967 		up_write(&ppriv->vlan_rwsem);
1968 	}
1969 
1970 	ipoib_neigh_hash_uninit(dev);
1971 
1972 	ipoib_ib_dev_cleanup(dev);
1973 
1974 	/* no more works over the priv->wq */
1975 	if (priv->wq) {
1976 		flush_workqueue(priv->wq);
1977 		destroy_workqueue(priv->wq);
1978 		priv->wq = NULL;
1979 	}
1980 
1981 	if (priv->parent)
1982 		dev_put(priv->parent);
1983 }
1984 
1985 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
1986 {
1987 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1988 
1989 	return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
1990 }
1991 
1992 static int ipoib_get_vf_config(struct net_device *dev, int vf,
1993 			       struct ifla_vf_info *ivf)
1994 {
1995 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1996 	int err;
1997 
1998 	err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
1999 	if (err)
2000 		return err;
2001 
2002 	ivf->vf = vf;
2003 	memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
2004 
2005 	return 0;
2006 }
2007 
2008 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
2009 {
2010 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2011 
2012 	if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
2013 		return -EINVAL;
2014 
2015 	return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
2016 }
2017 
2018 static int ipoib_get_vf_guid(struct net_device *dev, int vf,
2019 			     struct ifla_vf_guid *node_guid,
2020 			     struct ifla_vf_guid *port_guid)
2021 {
2022 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2023 
2024 	return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
2025 }
2026 
2027 static int ipoib_get_vf_stats(struct net_device *dev, int vf,
2028 			      struct ifla_vf_stats *vf_stats)
2029 {
2030 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2031 
2032 	return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
2033 }
2034 
2035 static const struct header_ops ipoib_header_ops = {
2036 	.create	= ipoib_hard_header,
2037 };
2038 
2039 static const struct net_device_ops ipoib_netdev_ops_pf = {
2040 	.ndo_init		 = ipoib_ndo_init,
2041 	.ndo_uninit		 = ipoib_ndo_uninit,
2042 	.ndo_open		 = ipoib_open,
2043 	.ndo_stop		 = ipoib_stop,
2044 	.ndo_change_mtu		 = ipoib_change_mtu,
2045 	.ndo_fix_features	 = ipoib_fix_features,
2046 	.ndo_start_xmit		 = ipoib_start_xmit,
2047 	.ndo_tx_timeout		 = ipoib_timeout,
2048 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2049 	.ndo_get_iflink		 = ipoib_get_iflink,
2050 	.ndo_set_vf_link_state	 = ipoib_set_vf_link_state,
2051 	.ndo_get_vf_config	 = ipoib_get_vf_config,
2052 	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
2053 	.ndo_get_vf_guid	 = ipoib_get_vf_guid,
2054 	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
2055 	.ndo_set_mac_address	 = ipoib_set_mac,
2056 	.ndo_get_stats64	 = ipoib_get_stats,
2057 	.ndo_do_ioctl		 = ipoib_ioctl,
2058 };
2059 
2060 static const struct net_device_ops ipoib_netdev_ops_vf = {
2061 	.ndo_init		 = ipoib_ndo_init,
2062 	.ndo_uninit		 = ipoib_ndo_uninit,
2063 	.ndo_open		 = ipoib_open,
2064 	.ndo_stop		 = ipoib_stop,
2065 	.ndo_change_mtu		 = ipoib_change_mtu,
2066 	.ndo_fix_features	 = ipoib_fix_features,
2067 	.ndo_start_xmit	 	 = ipoib_start_xmit,
2068 	.ndo_tx_timeout		 = ipoib_timeout,
2069 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2070 	.ndo_get_iflink		 = ipoib_get_iflink,
2071 	.ndo_get_stats64	 = ipoib_get_stats,
2072 	.ndo_do_ioctl		 = ipoib_ioctl,
2073 };
2074 
2075 void ipoib_setup_common(struct net_device *dev)
2076 {
2077 	dev->header_ops		 = &ipoib_header_ops;
2078 
2079 	ipoib_set_ethtool_ops(dev);
2080 
2081 	dev->watchdog_timeo	 = HZ;
2082 
2083 	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
2084 
2085 	dev->hard_header_len	 = IPOIB_HARD_LEN;
2086 	dev->addr_len		 = INFINIBAND_ALEN;
2087 	dev->type		 = ARPHRD_INFINIBAND;
2088 	dev->tx_queue_len	 = ipoib_sendq_size * 2;
2089 	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
2090 				    NETIF_F_HIGHDMA);
2091 	netif_keep_dst(dev);
2092 
2093 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
2094 
2095 	/*
2096 	 * unregister_netdev always frees the netdev, we use this mode
2097 	 * consistently to unify all the various unregister paths, including
2098 	 * those connected to rtnl_link_ops which require it.
2099 	 */
2100 	dev->needs_free_netdev = true;
2101 }
2102 
2103 static void ipoib_build_priv(struct net_device *dev)
2104 {
2105 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2106 
2107 	priv->dev = dev;
2108 	spin_lock_init(&priv->lock);
2109 	init_rwsem(&priv->vlan_rwsem);
2110 	mutex_init(&priv->mcast_mutex);
2111 
2112 	INIT_LIST_HEAD(&priv->path_list);
2113 	INIT_LIST_HEAD(&priv->child_intfs);
2114 	INIT_LIST_HEAD(&priv->dead_ahs);
2115 	INIT_LIST_HEAD(&priv->multicast_list);
2116 
2117 	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
2118 	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
2119 	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
2120 	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
2121 	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
2122 	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
2123 	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
2124 	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
2125 }
2126 
2127 static const struct net_device_ops ipoib_netdev_default_pf = {
2128 	.ndo_init		 = ipoib_dev_init_default,
2129 	.ndo_uninit		 = ipoib_dev_uninit_default,
2130 	.ndo_open		 = ipoib_ib_dev_open_default,
2131 	.ndo_stop		 = ipoib_ib_dev_stop_default,
2132 };
2133 
2134 static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
2135 					     const char *name)
2136 {
2137 	struct net_device *dev;
2138 
2139 	dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2140 				NET_NAME_UNKNOWN, ipoib_setup_common);
2141 	if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
2142 		return dev;
2143 
2144 	dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
2145 			   ipoib_setup_common);
2146 	if (!dev)
2147 		return ERR_PTR(-ENOMEM);
2148 	return dev;
2149 }
2150 
2151 int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
2152 		    struct net_device *dev)
2153 {
2154 	struct rdma_netdev *rn = netdev_priv(dev);
2155 	struct ipoib_dev_priv *priv;
2156 	int rc;
2157 
2158 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2159 	if (!priv)
2160 		return -ENOMEM;
2161 
2162 	priv->ca = hca;
2163 	priv->port = port;
2164 
2165 	rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2166 			      NET_NAME_UNKNOWN, ipoib_setup_common, dev);
2167 	if (rc) {
2168 		if (rc != -EOPNOTSUPP)
2169 			goto out;
2170 
2171 		dev->netdev_ops = &ipoib_netdev_default_pf;
2172 		rn->send = ipoib_send;
2173 		rn->attach_mcast = ipoib_mcast_attach;
2174 		rn->detach_mcast = ipoib_mcast_detach;
2175 		rn->hca = hca;
2176 	}
2177 
2178 	priv->rn_ops = dev->netdev_ops;
2179 
2180 	if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION)
2181 		dev->netdev_ops	= &ipoib_netdev_ops_vf;
2182 	else
2183 		dev->netdev_ops	= &ipoib_netdev_ops_pf;
2184 
2185 	rn->clnt_priv = priv;
2186 	/*
2187 	 * Only the child register_netdev flows can handle priv_destructor
2188 	 * being set, so we force it to NULL here and handle manually until it
2189 	 * is safe to turn on.
2190 	 */
2191 	priv->next_priv_destructor = dev->priv_destructor;
2192 	dev->priv_destructor = NULL;
2193 
2194 	ipoib_build_priv(dev);
2195 
2196 	return 0;
2197 
2198 out:
2199 	kfree(priv);
2200 	return rc;
2201 }
2202 
2203 struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
2204 				    const char *name)
2205 {
2206 	struct net_device *dev;
2207 	int rc;
2208 
2209 	dev = ipoib_alloc_netdev(hca, port, name);
2210 	if (IS_ERR(dev))
2211 		return dev;
2212 
2213 	rc = ipoib_intf_init(hca, port, name, dev);
2214 	if (rc) {
2215 		free_netdev(dev);
2216 		return ERR_PTR(rc);
2217 	}
2218 
2219 	/*
2220 	 * Upon success the caller must ensure ipoib_intf_free is called or
2221 	 * register_netdevice succeed'd and priv_destructor is set to
2222 	 * ipoib_intf_free.
2223 	 */
2224 	return dev;
2225 }
2226 
2227 void ipoib_intf_free(struct net_device *dev)
2228 {
2229 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2230 	struct rdma_netdev *rn = netdev_priv(dev);
2231 
2232 	dev->priv_destructor = priv->next_priv_destructor;
2233 	if (dev->priv_destructor)
2234 		dev->priv_destructor(dev);
2235 
2236 	/*
2237 	 * There are some error flows around register_netdev failing that may
2238 	 * attempt to call priv_destructor twice, prevent that from happening.
2239 	 */
2240 	dev->priv_destructor = NULL;
2241 
2242 	/* unregister/destroy is very complicated. Make bugs more obvious. */
2243 	rn->clnt_priv = NULL;
2244 
2245 	kfree(priv);
2246 }
2247 
2248 static ssize_t show_pkey(struct device *dev,
2249 			 struct device_attribute *attr, char *buf)
2250 {
2251 	struct net_device *ndev = to_net_dev(dev);
2252 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2253 
2254 	return sprintf(buf, "0x%04x\n", priv->pkey);
2255 }
2256 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2257 
2258 static ssize_t show_umcast(struct device *dev,
2259 			   struct device_attribute *attr, char *buf)
2260 {
2261 	struct net_device *ndev = to_net_dev(dev);
2262 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2263 
2264 	return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
2265 }
2266 
2267 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
2268 {
2269 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2270 
2271 	if (umcast_val > 0) {
2272 		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2273 		ipoib_warn(priv, "ignoring multicast groups joined directly "
2274 				"by userspace\n");
2275 	} else
2276 		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2277 }
2278 
2279 static ssize_t set_umcast(struct device *dev,
2280 			  struct device_attribute *attr,
2281 			  const char *buf, size_t count)
2282 {
2283 	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
2284 
2285 	ipoib_set_umcast(to_net_dev(dev), umcast_val);
2286 
2287 	return count;
2288 }
2289 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
2290 
2291 int ipoib_add_umcast_attr(struct net_device *dev)
2292 {
2293 	return device_create_file(&dev->dev, &dev_attr_umcast);
2294 }
2295 
2296 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
2297 {
2298 	struct ipoib_dev_priv *child_priv;
2299 	struct net_device *netdev = priv->dev;
2300 
2301 	netif_addr_lock_bh(netdev);
2302 
2303 	memcpy(&priv->local_gid.global.interface_id,
2304 	       &gid->global.interface_id,
2305 	       sizeof(gid->global.interface_id));
2306 	memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
2307 	clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2308 
2309 	netif_addr_unlock_bh(netdev);
2310 
2311 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
2312 		down_read(&priv->vlan_rwsem);
2313 		list_for_each_entry(child_priv, &priv->child_intfs, list)
2314 			set_base_guid(child_priv, gid);
2315 		up_read(&priv->vlan_rwsem);
2316 	}
2317 }
2318 
2319 static int ipoib_check_lladdr(struct net_device *dev,
2320 			      struct sockaddr_storage *ss)
2321 {
2322 	union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
2323 	int ret = 0;
2324 
2325 	netif_addr_lock_bh(dev);
2326 
2327 	/* Make sure the QPN, reserved and subnet prefix match the current
2328 	 * lladdr, it also makes sure the lladdr is unicast.
2329 	 */
2330 	if (memcmp(dev->dev_addr, ss->__data,
2331 		   4 + sizeof(gid->global.subnet_prefix)) ||
2332 	    gid->global.interface_id == 0)
2333 		ret = -EINVAL;
2334 
2335 	netif_addr_unlock_bh(dev);
2336 
2337 	return ret;
2338 }
2339 
2340 static int ipoib_set_mac(struct net_device *dev, void *addr)
2341 {
2342 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2343 	struct sockaddr_storage *ss = addr;
2344 	int ret;
2345 
2346 	if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
2347 		return -EBUSY;
2348 
2349 	ret = ipoib_check_lladdr(dev, ss);
2350 	if (ret)
2351 		return ret;
2352 
2353 	set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
2354 
2355 	queue_work(ipoib_workqueue, &priv->flush_light);
2356 
2357 	return 0;
2358 }
2359 
2360 static ssize_t create_child(struct device *dev,
2361 			    struct device_attribute *attr,
2362 			    const char *buf, size_t count)
2363 {
2364 	int pkey;
2365 	int ret;
2366 
2367 	if (sscanf(buf, "%i", &pkey) != 1)
2368 		return -EINVAL;
2369 
2370 	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
2371 		return -EINVAL;
2372 
2373 	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
2374 
2375 	return ret ? ret : count;
2376 }
2377 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
2378 
2379 static ssize_t delete_child(struct device *dev,
2380 			    struct device_attribute *attr,
2381 			    const char *buf, size_t count)
2382 {
2383 	int pkey;
2384 	int ret;
2385 
2386 	if (sscanf(buf, "%i", &pkey) != 1)
2387 		return -EINVAL;
2388 
2389 	if (pkey < 0 || pkey > 0xffff)
2390 		return -EINVAL;
2391 
2392 	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
2393 
2394 	return ret ? ret : count;
2395 
2396 }
2397 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
2398 
2399 int ipoib_add_pkey_attr(struct net_device *dev)
2400 {
2401 	return device_create_file(&dev->dev, &dev_attr_pkey);
2402 }
2403 
2404 /*
2405  * We erroneously exposed the iface's port number in the dev_id
2406  * sysfs field long after dev_port was introduced for that purpose[1],
2407  * and we need to stop everyone from relying on that.
2408  * Let's overload the shower routine for the dev_id file here
2409  * to gently bring the issue up.
2410  *
2411  * [1] https://www.spinics.net/lists/netdev/msg272123.html
2412  */
2413 static ssize_t dev_id_show(struct device *dev,
2414 			   struct device_attribute *attr, char *buf)
2415 {
2416 	struct net_device *ndev = to_net_dev(dev);
2417 
2418 	/*
2419 	 * ndev->dev_port will be equal to 0 in old kernel prior to commit
2420 	 * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
2421 	 * port numbers") Zero was chosen as special case for user space
2422 	 * applications to fallback and query dev_id to check if it has
2423 	 * different value or not.
2424 	 *
2425 	 * Don't print warning in such scenario.
2426 	 *
2427 	 * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
2428 	 */
2429 	if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
2430 		netdev_info_once(ndev,
2431 			"\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
2432 			current->comm);
2433 
2434 	return sprintf(buf, "%#x\n", ndev->dev_id);
2435 }
2436 static DEVICE_ATTR_RO(dev_id);
2437 
2438 static int ipoib_intercept_dev_id_attr(struct net_device *dev)
2439 {
2440 	device_remove_file(&dev->dev, &dev_attr_dev_id);
2441 	return device_create_file(&dev->dev, &dev_attr_dev_id);
2442 }
2443 
2444 static struct net_device *ipoib_add_port(const char *format,
2445 					 struct ib_device *hca, u8 port)
2446 {
2447 	struct rtnl_link_ops *ops = ipoib_get_link_ops();
2448 	struct rdma_netdev_alloc_params params;
2449 	struct ipoib_dev_priv *priv;
2450 	struct net_device *ndev;
2451 	int result;
2452 
2453 	ndev = ipoib_intf_alloc(hca, port, format);
2454 	if (IS_ERR(ndev)) {
2455 		pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
2456 			PTR_ERR(ndev));
2457 		return ndev;
2458 	}
2459 	priv = ipoib_priv(ndev);
2460 
2461 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
2462 			      priv->ca, ipoib_event);
2463 	ib_register_event_handler(&priv->event_handler);
2464 
2465 	/* call event handler to ensure pkey in sync */
2466 	queue_work(ipoib_workqueue, &priv->flush_heavy);
2467 
2468 	result = register_netdev(ndev);
2469 	if (result) {
2470 		pr_warn("%s: couldn't register ipoib port %d; error %d\n",
2471 			hca->name, port, result);
2472 
2473 		ipoib_parent_unregister_pre(ndev);
2474 		ipoib_intf_free(ndev);
2475 		free_netdev(ndev);
2476 
2477 		return ERR_PTR(result);
2478 	}
2479 
2480 	if (hca->ops.rdma_netdev_get_params) {
2481 		int rc = hca->ops.rdma_netdev_get_params(hca, port,
2482 						     RDMA_NETDEV_IPOIB,
2483 						     &params);
2484 
2485 		if (!rc && ops->priv_size < params.sizeof_priv)
2486 			ops->priv_size = params.sizeof_priv;
2487 	}
2488 	/*
2489 	 * We cannot set priv_destructor before register_netdev because we
2490 	 * need priv to be always valid during the error flow to execute
2491 	 * ipoib_parent_unregister_pre(). Instead handle it manually and only
2492 	 * enter priv_destructor mode once we are completely registered.
2493 	 */
2494 	ndev->priv_destructor = ipoib_intf_free;
2495 
2496 	if (ipoib_intercept_dev_id_attr(ndev))
2497 		goto sysfs_failed;
2498 	if (ipoib_cm_add_mode_attr(ndev))
2499 		goto sysfs_failed;
2500 	if (ipoib_add_pkey_attr(ndev))
2501 		goto sysfs_failed;
2502 	if (ipoib_add_umcast_attr(ndev))
2503 		goto sysfs_failed;
2504 	if (device_create_file(&ndev->dev, &dev_attr_create_child))
2505 		goto sysfs_failed;
2506 	if (device_create_file(&ndev->dev, &dev_attr_delete_child))
2507 		goto sysfs_failed;
2508 
2509 	return ndev;
2510 
2511 sysfs_failed:
2512 	ipoib_parent_unregister_pre(ndev);
2513 	unregister_netdev(ndev);
2514 	return ERR_PTR(-ENOMEM);
2515 }
2516 
2517 static void ipoib_add_one(struct ib_device *device)
2518 {
2519 	struct list_head *dev_list;
2520 	struct net_device *dev;
2521 	struct ipoib_dev_priv *priv;
2522 	unsigned int p;
2523 	int count = 0;
2524 
2525 	dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
2526 	if (!dev_list)
2527 		return;
2528 
2529 	INIT_LIST_HEAD(dev_list);
2530 
2531 	rdma_for_each_port (device, p) {
2532 		if (!rdma_protocol_ib(device, p))
2533 			continue;
2534 		dev = ipoib_add_port("ib%d", device, p);
2535 		if (!IS_ERR(dev)) {
2536 			priv = ipoib_priv(dev);
2537 			list_add_tail(&priv->list, dev_list);
2538 			count++;
2539 		}
2540 	}
2541 
2542 	if (!count) {
2543 		kfree(dev_list);
2544 		return;
2545 	}
2546 
2547 	ib_set_client_data(device, &ipoib_client, dev_list);
2548 }
2549 
2550 static void ipoib_remove_one(struct ib_device *device, void *client_data)
2551 {
2552 	struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2553 	struct list_head *dev_list = client_data;
2554 
2555 	if (!dev_list)
2556 		return;
2557 
2558 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
2559 		LIST_HEAD(head);
2560 		ipoib_parent_unregister_pre(priv->dev);
2561 
2562 		rtnl_lock();
2563 
2564 		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
2565 					 list)
2566 			unregister_netdevice_queue(cpriv->dev, &head);
2567 		unregister_netdevice_queue(priv->dev, &head);
2568 		unregister_netdevice_many(&head);
2569 
2570 		rtnl_unlock();
2571 	}
2572 
2573 	kfree(dev_list);
2574 }
2575 
2576 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2577 static struct notifier_block ipoib_netdev_notifier = {
2578 	.notifier_call = ipoib_netdev_event,
2579 };
2580 #endif
2581 
2582 static int __init ipoib_init_module(void)
2583 {
2584 	int ret;
2585 
2586 	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2587 	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2588 	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2589 
2590 	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2591 	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2592 	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2593 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2594 	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2595 	ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2596 #endif
2597 
2598 	/*
2599 	 * When copying small received packets, we only copy from the
2600 	 * linear data part of the SKB, so we rely on this condition.
2601 	 */
2602 	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2603 
2604 	ipoib_register_debugfs();
2605 
2606 	/*
2607 	 * We create a global workqueue here that is used for all flush
2608 	 * operations.  However, if you attempt to flush a workqueue
2609 	 * from a task on that same workqueue, it deadlocks the system.
2610 	 * We want to be able to flush the tasks associated with a
2611 	 * specific net device, so we also create a workqueue for each
2612 	 * netdevice.  We queue up the tasks for that device only on
2613 	 * its private workqueue, and we only queue up flush events
2614 	 * on our global flush workqueue.  This avoids the deadlocks.
2615 	 */
2616 	ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
2617 	if (!ipoib_workqueue) {
2618 		ret = -ENOMEM;
2619 		goto err_fs;
2620 	}
2621 
2622 	ib_sa_register_client(&ipoib_sa_client);
2623 
2624 	ret = ib_register_client(&ipoib_client);
2625 	if (ret)
2626 		goto err_sa;
2627 
2628 	ret = ipoib_netlink_init();
2629 	if (ret)
2630 		goto err_client;
2631 
2632 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2633 	register_netdevice_notifier(&ipoib_netdev_notifier);
2634 #endif
2635 	return 0;
2636 
2637 err_client:
2638 	ib_unregister_client(&ipoib_client);
2639 
2640 err_sa:
2641 	ib_sa_unregister_client(&ipoib_sa_client);
2642 	destroy_workqueue(ipoib_workqueue);
2643 
2644 err_fs:
2645 	ipoib_unregister_debugfs();
2646 
2647 	return ret;
2648 }
2649 
2650 static void __exit ipoib_cleanup_module(void)
2651 {
2652 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2653 	unregister_netdevice_notifier(&ipoib_netdev_notifier);
2654 #endif
2655 	ipoib_netlink_fini();
2656 	ib_unregister_client(&ipoib_client);
2657 	ib_sa_unregister_client(&ipoib_sa_client);
2658 	ipoib_unregister_debugfs();
2659 	destroy_workqueue(ipoib_workqueue);
2660 }
2661 
2662 module_init(ipoib_init_module);
2663 module_exit(ipoib_cleanup_module);
2664