1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "ipoib.h"
36 
37 #include <linux/module.h>
38 
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43 
44 #include <linux/if_arp.h>	/* For ARPHRD_xxx */
45 
46 #include <linux/ip.h>
47 #include <linux/in.h>
48 
49 #include <linux/jhash.h>
50 #include <net/arp.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
54 
55 #define DRV_VERSION "1.0.0"
56 
57 const char ipoib_driver_version[] = DRV_VERSION;
58 
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
61 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_VERSION(DRV_VERSION);
63 
64 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
65 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
66 
67 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
68 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
69 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
70 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
71 
72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73 int ipoib_debug_level;
74 
75 module_param_named(debug_level, ipoib_debug_level, int, 0644);
76 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
77 #endif
78 
79 struct ipoib_path_iter {
80 	struct net_device *dev;
81 	struct ipoib_path  path;
82 };
83 
84 static const u8 ipv4_bcast_addr[] = {
85 	0x00, 0xff, 0xff, 0xff,
86 	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
87 	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
88 };
89 
90 struct workqueue_struct *ipoib_workqueue;
91 
92 struct ib_sa_client ipoib_sa_client;
93 
94 static void ipoib_add_one(struct ib_device *device);
95 static void ipoib_remove_one(struct ib_device *device, void *client_data);
96 static void ipoib_neigh_reclaim(struct rcu_head *rp);
97 static struct net_device *ipoib_get_net_dev_by_params(
98 		struct ib_device *dev, u8 port, u16 pkey,
99 		const union ib_gid *gid, const struct sockaddr *addr,
100 		void *client_data);
101 
102 static struct ib_client ipoib_client = {
103 	.name   = "ipoib",
104 	.add    = ipoib_add_one,
105 	.remove = ipoib_remove_one,
106 	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
107 };
108 
109 int ipoib_open(struct net_device *dev)
110 {
111 	struct ipoib_dev_priv *priv = netdev_priv(dev);
112 
113 	ipoib_dbg(priv, "bringing up interface\n");
114 
115 	netif_carrier_off(dev);
116 
117 	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
118 
119 	if (ipoib_ib_dev_open(dev)) {
120 		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
121 			return 0;
122 		goto err_disable;
123 	}
124 
125 	if (ipoib_ib_dev_up(dev))
126 		goto err_stop;
127 
128 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
129 		struct ipoib_dev_priv *cpriv;
130 
131 		/* Bring up any child interfaces too */
132 		down_read(&priv->vlan_rwsem);
133 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
134 			int flags;
135 
136 			flags = cpriv->dev->flags;
137 			if (flags & IFF_UP)
138 				continue;
139 
140 			dev_change_flags(cpriv->dev, flags | IFF_UP);
141 		}
142 		up_read(&priv->vlan_rwsem);
143 	}
144 
145 	netif_start_queue(dev);
146 
147 	return 0;
148 
149 err_stop:
150 	ipoib_ib_dev_stop(dev);
151 
152 err_disable:
153 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
154 
155 	return -EINVAL;
156 }
157 
158 static int ipoib_stop(struct net_device *dev)
159 {
160 	struct ipoib_dev_priv *priv = netdev_priv(dev);
161 
162 	ipoib_dbg(priv, "stopping interface\n");
163 
164 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
165 
166 	netif_stop_queue(dev);
167 
168 	ipoib_ib_dev_down(dev);
169 	ipoib_ib_dev_stop(dev);
170 
171 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
172 		struct ipoib_dev_priv *cpriv;
173 
174 		/* Bring down any child interfaces too */
175 		down_read(&priv->vlan_rwsem);
176 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
177 			int flags;
178 
179 			flags = cpriv->dev->flags;
180 			if (!(flags & IFF_UP))
181 				continue;
182 
183 			dev_change_flags(cpriv->dev, flags & ~IFF_UP);
184 		}
185 		up_read(&priv->vlan_rwsem);
186 	}
187 
188 	return 0;
189 }
190 
191 static void ipoib_uninit(struct net_device *dev)
192 {
193 	ipoib_dev_cleanup(dev);
194 }
195 
196 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
197 {
198 	struct ipoib_dev_priv *priv = netdev_priv(dev);
199 
200 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
201 		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
202 
203 	return features;
204 }
205 
206 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
207 {
208 	struct ipoib_dev_priv *priv = netdev_priv(dev);
209 
210 	/* dev->mtu > 2K ==> connected mode */
211 	if (ipoib_cm_admin_enabled(dev)) {
212 		if (new_mtu > ipoib_cm_max_mtu(dev))
213 			return -EINVAL;
214 
215 		if (new_mtu > priv->mcast_mtu)
216 			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
217 				   priv->mcast_mtu);
218 
219 		dev->mtu = new_mtu;
220 		return 0;
221 	}
222 
223 	if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
224 		return -EINVAL;
225 
226 	priv->admin_mtu = new_mtu;
227 
228 	dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
229 
230 	return 0;
231 }
232 
233 /* Called with an RCU read lock taken */
234 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
235 					struct net_device *dev)
236 {
237 	struct net *net = dev_net(dev);
238 	struct in_device *in_dev;
239 	struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
240 	struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
241 	__be32 ret_addr;
242 
243 	switch (addr->sa_family) {
244 	case AF_INET:
245 		in_dev = in_dev_get(dev);
246 		if (!in_dev)
247 			return false;
248 
249 		ret_addr = inet_confirm_addr(net, in_dev, 0,
250 					     addr_in->sin_addr.s_addr,
251 					     RT_SCOPE_HOST);
252 		in_dev_put(in_dev);
253 		if (ret_addr)
254 			return true;
255 
256 		break;
257 	case AF_INET6:
258 		if (IS_ENABLED(CONFIG_IPV6) &&
259 		    ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
260 			return true;
261 
262 		break;
263 	}
264 	return false;
265 }
266 
267 /**
268  * Find the master net_device on top of the given net_device.
269  * @dev: base IPoIB net_device
270  *
271  * Returns the master net_device with a reference held, or the same net_device
272  * if no master exists.
273  */
274 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
275 {
276 	struct net_device *master;
277 
278 	rcu_read_lock();
279 	master = netdev_master_upper_dev_get_rcu(dev);
280 	if (master)
281 		dev_hold(master);
282 	rcu_read_unlock();
283 
284 	if (master)
285 		return master;
286 
287 	dev_hold(dev);
288 	return dev;
289 }
290 
291 /**
292  * Find a net_device matching the given address, which is an upper device of
293  * the given net_device.
294  * @addr: IP address to look for.
295  * @dev: base IPoIB net_device
296  *
297  * If found, returns the net_device with a reference held. Otherwise return
298  * NULL.
299  */
300 static struct net_device *ipoib_get_net_dev_match_addr(
301 		const struct sockaddr *addr, struct net_device *dev)
302 {
303 	struct net_device *upper,
304 			  *result = NULL;
305 	struct list_head *iter;
306 
307 	rcu_read_lock();
308 	if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
309 		dev_hold(dev);
310 		result = dev;
311 		goto out;
312 	}
313 
314 	netdev_for_each_all_upper_dev_rcu(dev, upper, iter) {
315 		if (ipoib_is_dev_match_addr_rcu(addr, upper)) {
316 			dev_hold(upper);
317 			result = upper;
318 			break;
319 		}
320 	}
321 out:
322 	rcu_read_unlock();
323 	return result;
324 }
325 
326 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
327  * pkey_index and address, if one exists.
328  *
329  * @found_net_dev: contains a matching net_device if the return value >= 1,
330  * with a reference held. */
331 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
332 				     const union ib_gid *gid,
333 				     u16 pkey_index,
334 				     const struct sockaddr *addr,
335 				     int nesting,
336 				     struct net_device **found_net_dev)
337 {
338 	struct ipoib_dev_priv *child_priv;
339 	struct net_device *net_dev = NULL;
340 	int matches = 0;
341 
342 	if (priv->pkey_index == pkey_index &&
343 	    (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
344 		if (!addr) {
345 			net_dev = ipoib_get_master_net_dev(priv->dev);
346 		} else {
347 			/* Verify the net_device matches the IP address, as
348 			 * IPoIB child devices currently share a GID. */
349 			net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
350 		}
351 		if (net_dev) {
352 			if (!*found_net_dev)
353 				*found_net_dev = net_dev;
354 			else
355 				dev_put(net_dev);
356 			++matches;
357 		}
358 	}
359 
360 	/* Check child interfaces */
361 	down_read_nested(&priv->vlan_rwsem, nesting);
362 	list_for_each_entry(child_priv, &priv->child_intfs, list) {
363 		matches += ipoib_match_gid_pkey_addr(child_priv, gid,
364 						    pkey_index, addr,
365 						    nesting + 1,
366 						    found_net_dev);
367 		if (matches > 1)
368 			break;
369 	}
370 	up_read(&priv->vlan_rwsem);
371 
372 	return matches;
373 }
374 
375 /* Returns the number of matching net_devs found (between 0 and 2). Also
376  * return the matching net_device in the @net_dev parameter, holding a
377  * reference to the net_device, if the number of matches >= 1 */
378 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
379 					 u16 pkey_index,
380 					 const union ib_gid *gid,
381 					 const struct sockaddr *addr,
382 					 struct net_device **net_dev)
383 {
384 	struct ipoib_dev_priv *priv;
385 	int matches = 0;
386 
387 	*net_dev = NULL;
388 
389 	list_for_each_entry(priv, dev_list, list) {
390 		if (priv->port != port)
391 			continue;
392 
393 		matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
394 						     addr, 0, net_dev);
395 		if (matches > 1)
396 			break;
397 	}
398 
399 	return matches;
400 }
401 
402 static struct net_device *ipoib_get_net_dev_by_params(
403 		struct ib_device *dev, u8 port, u16 pkey,
404 		const union ib_gid *gid, const struct sockaddr *addr,
405 		void *client_data)
406 {
407 	struct net_device *net_dev;
408 	struct list_head *dev_list = client_data;
409 	u16 pkey_index;
410 	int matches;
411 	int ret;
412 
413 	if (!rdma_protocol_ib(dev, port))
414 		return NULL;
415 
416 	ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
417 	if (ret)
418 		return NULL;
419 
420 	if (!dev_list)
421 		return NULL;
422 
423 	/* See if we can find a unique device matching the L2 parameters */
424 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
425 						gid, NULL, &net_dev);
426 
427 	switch (matches) {
428 	case 0:
429 		return NULL;
430 	case 1:
431 		return net_dev;
432 	}
433 
434 	dev_put(net_dev);
435 
436 	/* Couldn't find a unique device with L2 parameters only. Use L3
437 	 * address to uniquely match the net device */
438 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
439 						gid, addr, &net_dev);
440 	switch (matches) {
441 	case 0:
442 		return NULL;
443 	default:
444 		dev_warn_ratelimited(&dev->dev,
445 				     "duplicate IP address detected\n");
446 		/* Fall through */
447 	case 1:
448 		return net_dev;
449 	}
450 }
451 
452 int ipoib_set_mode(struct net_device *dev, const char *buf)
453 {
454 	struct ipoib_dev_priv *priv = netdev_priv(dev);
455 
456 	/* flush paths if we switch modes so that connections are restarted */
457 	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
458 		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
459 		ipoib_warn(priv, "enabling connected mode "
460 			   "will cause multicast packet drops\n");
461 		netdev_update_features(dev);
462 		dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
463 		rtnl_unlock();
464 		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
465 
466 		ipoib_flush_paths(dev);
467 		rtnl_lock();
468 		return 0;
469 	}
470 
471 	if (!strcmp(buf, "datagram\n")) {
472 		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
473 		netdev_update_features(dev);
474 		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
475 		rtnl_unlock();
476 		ipoib_flush_paths(dev);
477 		rtnl_lock();
478 		return 0;
479 	}
480 
481 	return -EINVAL;
482 }
483 
484 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
485 {
486 	struct ipoib_dev_priv *priv = netdev_priv(dev);
487 	struct rb_node *n = priv->path_tree.rb_node;
488 	struct ipoib_path *path;
489 	int ret;
490 
491 	while (n) {
492 		path = rb_entry(n, struct ipoib_path, rb_node);
493 
494 		ret = memcmp(gid, path->pathrec.dgid.raw,
495 			     sizeof (union ib_gid));
496 
497 		if (ret < 0)
498 			n = n->rb_left;
499 		else if (ret > 0)
500 			n = n->rb_right;
501 		else
502 			return path;
503 	}
504 
505 	return NULL;
506 }
507 
508 static int __path_add(struct net_device *dev, struct ipoib_path *path)
509 {
510 	struct ipoib_dev_priv *priv = netdev_priv(dev);
511 	struct rb_node **n = &priv->path_tree.rb_node;
512 	struct rb_node *pn = NULL;
513 	struct ipoib_path *tpath;
514 	int ret;
515 
516 	while (*n) {
517 		pn = *n;
518 		tpath = rb_entry(pn, struct ipoib_path, rb_node);
519 
520 		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
521 			     sizeof (union ib_gid));
522 		if (ret < 0)
523 			n = &pn->rb_left;
524 		else if (ret > 0)
525 			n = &pn->rb_right;
526 		else
527 			return -EEXIST;
528 	}
529 
530 	rb_link_node(&path->rb_node, pn, n);
531 	rb_insert_color(&path->rb_node, &priv->path_tree);
532 
533 	list_add_tail(&path->list, &priv->path_list);
534 
535 	return 0;
536 }
537 
538 static void path_free(struct net_device *dev, struct ipoib_path *path)
539 {
540 	struct sk_buff *skb;
541 
542 	while ((skb = __skb_dequeue(&path->queue)))
543 		dev_kfree_skb_irq(skb);
544 
545 	ipoib_dbg(netdev_priv(dev), "path_free\n");
546 
547 	/* remove all neigh connected to this path */
548 	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
549 
550 	if (path->ah)
551 		ipoib_put_ah(path->ah);
552 
553 	kfree(path);
554 }
555 
556 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
557 
558 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
559 {
560 	struct ipoib_path_iter *iter;
561 
562 	iter = kmalloc(sizeof *iter, GFP_KERNEL);
563 	if (!iter)
564 		return NULL;
565 
566 	iter->dev = dev;
567 	memset(iter->path.pathrec.dgid.raw, 0, 16);
568 
569 	if (ipoib_path_iter_next(iter)) {
570 		kfree(iter);
571 		return NULL;
572 	}
573 
574 	return iter;
575 }
576 
577 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
578 {
579 	struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
580 	struct rb_node *n;
581 	struct ipoib_path *path;
582 	int ret = 1;
583 
584 	spin_lock_irq(&priv->lock);
585 
586 	n = rb_first(&priv->path_tree);
587 
588 	while (n) {
589 		path = rb_entry(n, struct ipoib_path, rb_node);
590 
591 		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
592 			   sizeof (union ib_gid)) < 0) {
593 			iter->path = *path;
594 			ret = 0;
595 			break;
596 		}
597 
598 		n = rb_next(n);
599 	}
600 
601 	spin_unlock_irq(&priv->lock);
602 
603 	return ret;
604 }
605 
606 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
607 			  struct ipoib_path *path)
608 {
609 	*path = iter->path;
610 }
611 
612 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
613 
614 void ipoib_mark_paths_invalid(struct net_device *dev)
615 {
616 	struct ipoib_dev_priv *priv = netdev_priv(dev);
617 	struct ipoib_path *path, *tp;
618 
619 	spin_lock_irq(&priv->lock);
620 
621 	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
622 		ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
623 			be16_to_cpu(path->pathrec.dlid),
624 			path->pathrec.dgid.raw);
625 		path->valid =  0;
626 	}
627 
628 	spin_unlock_irq(&priv->lock);
629 }
630 
631 void ipoib_flush_paths(struct net_device *dev)
632 {
633 	struct ipoib_dev_priv *priv = netdev_priv(dev);
634 	struct ipoib_path *path, *tp;
635 	LIST_HEAD(remove_list);
636 	unsigned long flags;
637 
638 	netif_tx_lock_bh(dev);
639 	spin_lock_irqsave(&priv->lock, flags);
640 
641 	list_splice_init(&priv->path_list, &remove_list);
642 
643 	list_for_each_entry(path, &remove_list, list)
644 		rb_erase(&path->rb_node, &priv->path_tree);
645 
646 	list_for_each_entry_safe(path, tp, &remove_list, list) {
647 		if (path->query)
648 			ib_sa_cancel_query(path->query_id, path->query);
649 		spin_unlock_irqrestore(&priv->lock, flags);
650 		netif_tx_unlock_bh(dev);
651 		wait_for_completion(&path->done);
652 		path_free(dev, path);
653 		netif_tx_lock_bh(dev);
654 		spin_lock_irqsave(&priv->lock, flags);
655 	}
656 
657 	spin_unlock_irqrestore(&priv->lock, flags);
658 	netif_tx_unlock_bh(dev);
659 }
660 
661 static void path_rec_completion(int status,
662 				struct ib_sa_path_rec *pathrec,
663 				void *path_ptr)
664 {
665 	struct ipoib_path *path = path_ptr;
666 	struct net_device *dev = path->dev;
667 	struct ipoib_dev_priv *priv = netdev_priv(dev);
668 	struct ipoib_ah *ah = NULL;
669 	struct ipoib_ah *old_ah = NULL;
670 	struct ipoib_neigh *neigh, *tn;
671 	struct sk_buff_head skqueue;
672 	struct sk_buff *skb;
673 	unsigned long flags;
674 
675 	if (!status)
676 		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
677 			  be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
678 	else
679 		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
680 			  status, path->pathrec.dgid.raw);
681 
682 	skb_queue_head_init(&skqueue);
683 
684 	if (!status) {
685 		struct ib_ah_attr av;
686 
687 		if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
688 			ah = ipoib_create_ah(dev, priv->pd, &av);
689 	}
690 
691 	spin_lock_irqsave(&priv->lock, flags);
692 
693 	if (!IS_ERR_OR_NULL(ah)) {
694 		path->pathrec = *pathrec;
695 
696 		old_ah   = path->ah;
697 		path->ah = ah;
698 
699 		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
700 			  ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
701 
702 		while ((skb = __skb_dequeue(&path->queue)))
703 			__skb_queue_tail(&skqueue, skb);
704 
705 		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
706 			if (neigh->ah) {
707 				WARN_ON(neigh->ah != old_ah);
708 				/*
709 				 * Dropping the ah reference inside
710 				 * priv->lock is safe here, because we
711 				 * will hold one more reference from
712 				 * the original value of path->ah (ie
713 				 * old_ah).
714 				 */
715 				ipoib_put_ah(neigh->ah);
716 			}
717 			kref_get(&path->ah->ref);
718 			neigh->ah = path->ah;
719 
720 			if (ipoib_cm_enabled(dev, neigh->daddr)) {
721 				if (!ipoib_cm_get(neigh))
722 					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
723 									       path,
724 									       neigh));
725 				if (!ipoib_cm_get(neigh)) {
726 					ipoib_neigh_free(neigh);
727 					continue;
728 				}
729 			}
730 
731 			while ((skb = __skb_dequeue(&neigh->queue)))
732 				__skb_queue_tail(&skqueue, skb);
733 		}
734 		path->valid = 1;
735 	}
736 
737 	path->query = NULL;
738 	complete(&path->done);
739 
740 	spin_unlock_irqrestore(&priv->lock, flags);
741 
742 	if (IS_ERR_OR_NULL(ah))
743 		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
744 
745 	if (old_ah)
746 		ipoib_put_ah(old_ah);
747 
748 	while ((skb = __skb_dequeue(&skqueue))) {
749 		skb->dev = dev;
750 		if (dev_queue_xmit(skb))
751 			ipoib_warn(priv, "dev_queue_xmit failed "
752 				   "to requeue packet\n");
753 	}
754 }
755 
756 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
757 {
758 	struct ipoib_dev_priv *priv = netdev_priv(dev);
759 	struct ipoib_path *path;
760 
761 	if (!priv->broadcast)
762 		return NULL;
763 
764 	path = kzalloc(sizeof *path, GFP_ATOMIC);
765 	if (!path)
766 		return NULL;
767 
768 	path->dev = dev;
769 
770 	skb_queue_head_init(&path->queue);
771 
772 	INIT_LIST_HEAD(&path->neigh_list);
773 
774 	memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
775 	path->pathrec.sgid	    = priv->local_gid;
776 	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
777 	path->pathrec.numb_path     = 1;
778 	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
779 
780 	return path;
781 }
782 
783 static int path_rec_start(struct net_device *dev,
784 			  struct ipoib_path *path)
785 {
786 	struct ipoib_dev_priv *priv = netdev_priv(dev);
787 
788 	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
789 		  path->pathrec.dgid.raw);
790 
791 	init_completion(&path->done);
792 
793 	path->query_id =
794 		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
795 				   &path->pathrec,
796 				   IB_SA_PATH_REC_DGID		|
797 				   IB_SA_PATH_REC_SGID		|
798 				   IB_SA_PATH_REC_NUMB_PATH	|
799 				   IB_SA_PATH_REC_TRAFFIC_CLASS |
800 				   IB_SA_PATH_REC_PKEY,
801 				   1000, GFP_ATOMIC,
802 				   path_rec_completion,
803 				   path, &path->query);
804 	if (path->query_id < 0) {
805 		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
806 		path->query = NULL;
807 		complete(&path->done);
808 		return path->query_id;
809 	}
810 
811 	return 0;
812 }
813 
814 static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
815 			   struct net_device *dev)
816 {
817 	struct ipoib_dev_priv *priv = netdev_priv(dev);
818 	struct ipoib_path *path;
819 	struct ipoib_neigh *neigh;
820 	unsigned long flags;
821 
822 	spin_lock_irqsave(&priv->lock, flags);
823 	neigh = ipoib_neigh_alloc(daddr, dev);
824 	if (!neigh) {
825 		spin_unlock_irqrestore(&priv->lock, flags);
826 		++dev->stats.tx_dropped;
827 		dev_kfree_skb_any(skb);
828 		return;
829 	}
830 
831 	path = __path_find(dev, daddr + 4);
832 	if (!path) {
833 		path = path_rec_create(dev, daddr + 4);
834 		if (!path)
835 			goto err_path;
836 
837 		__path_add(dev, path);
838 	}
839 
840 	list_add_tail(&neigh->list, &path->neigh_list);
841 
842 	if (path->ah) {
843 		kref_get(&path->ah->ref);
844 		neigh->ah = path->ah;
845 
846 		if (ipoib_cm_enabled(dev, neigh->daddr)) {
847 			if (!ipoib_cm_get(neigh))
848 				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
849 			if (!ipoib_cm_get(neigh)) {
850 				ipoib_neigh_free(neigh);
851 				goto err_drop;
852 			}
853 			if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
854 				__skb_queue_tail(&neigh->queue, skb);
855 			else {
856 				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
857 					   skb_queue_len(&neigh->queue));
858 				goto err_drop;
859 			}
860 		} else {
861 			spin_unlock_irqrestore(&priv->lock, flags);
862 			ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
863 			ipoib_neigh_put(neigh);
864 			return;
865 		}
866 	} else {
867 		neigh->ah  = NULL;
868 
869 		if (!path->query && path_rec_start(dev, path))
870 			goto err_path;
871 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
872 			__skb_queue_tail(&neigh->queue, skb);
873 		else
874 			goto err_drop;
875 	}
876 
877 	spin_unlock_irqrestore(&priv->lock, flags);
878 	ipoib_neigh_put(neigh);
879 	return;
880 
881 err_path:
882 	ipoib_neigh_free(neigh);
883 err_drop:
884 	++dev->stats.tx_dropped;
885 	dev_kfree_skb_any(skb);
886 
887 	spin_unlock_irqrestore(&priv->lock, flags);
888 	ipoib_neigh_put(neigh);
889 }
890 
891 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
892 			     struct ipoib_cb *cb)
893 {
894 	struct ipoib_dev_priv *priv = netdev_priv(dev);
895 	struct ipoib_path *path;
896 	unsigned long flags;
897 
898 	spin_lock_irqsave(&priv->lock, flags);
899 
900 	path = __path_find(dev, cb->hwaddr + 4);
901 	if (!path || !path->valid) {
902 		int new_path = 0;
903 
904 		if (!path) {
905 			path = path_rec_create(dev, cb->hwaddr + 4);
906 			new_path = 1;
907 		}
908 		if (path) {
909 			if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
910 				__skb_queue_tail(&path->queue, skb);
911 			} else {
912 				++dev->stats.tx_dropped;
913 				dev_kfree_skb_any(skb);
914 			}
915 
916 			if (!path->query && path_rec_start(dev, path)) {
917 				spin_unlock_irqrestore(&priv->lock, flags);
918 				if (new_path)
919 					path_free(dev, path);
920 				return;
921 			} else
922 				__path_add(dev, path);
923 		} else {
924 			++dev->stats.tx_dropped;
925 			dev_kfree_skb_any(skb);
926 		}
927 
928 		spin_unlock_irqrestore(&priv->lock, flags);
929 		return;
930 	}
931 
932 	if (path->ah) {
933 		ipoib_dbg(priv, "Send unicast ARP to %04x\n",
934 			  be16_to_cpu(path->pathrec.dlid));
935 
936 		spin_unlock_irqrestore(&priv->lock, flags);
937 		ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
938 		return;
939 	} else if ((path->query || !path_rec_start(dev, path)) &&
940 		   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
941 		__skb_queue_tail(&path->queue, skb);
942 	} else {
943 		++dev->stats.tx_dropped;
944 		dev_kfree_skb_any(skb);
945 	}
946 
947 	spin_unlock_irqrestore(&priv->lock, flags);
948 }
949 
950 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
951 {
952 	struct ipoib_dev_priv *priv = netdev_priv(dev);
953 	struct ipoib_neigh *neigh;
954 	struct ipoib_cb *cb = ipoib_skb_cb(skb);
955 	struct ipoib_header *header;
956 	unsigned long flags;
957 
958 	header = (struct ipoib_header *) skb->data;
959 
960 	if (unlikely(cb->hwaddr[4] == 0xff)) {
961 		/* multicast, arrange "if" according to probability */
962 		if ((header->proto != htons(ETH_P_IP)) &&
963 		    (header->proto != htons(ETH_P_IPV6)) &&
964 		    (header->proto != htons(ETH_P_ARP)) &&
965 		    (header->proto != htons(ETH_P_RARP)) &&
966 		    (header->proto != htons(ETH_P_TIPC))) {
967 			/* ethertype not supported by IPoIB */
968 			++dev->stats.tx_dropped;
969 			dev_kfree_skb_any(skb);
970 			return NETDEV_TX_OK;
971 		}
972 		/* Add in the P_Key for multicast*/
973 		cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
974 		cb->hwaddr[9] = priv->pkey & 0xff;
975 
976 		neigh = ipoib_neigh_get(dev, cb->hwaddr);
977 		if (likely(neigh))
978 			goto send_using_neigh;
979 		ipoib_mcast_send(dev, cb->hwaddr, skb);
980 		return NETDEV_TX_OK;
981 	}
982 
983 	/* unicast, arrange "switch" according to probability */
984 	switch (header->proto) {
985 	case htons(ETH_P_IP):
986 	case htons(ETH_P_IPV6):
987 	case htons(ETH_P_TIPC):
988 		neigh = ipoib_neigh_get(dev, cb->hwaddr);
989 		if (unlikely(!neigh)) {
990 			neigh_add_path(skb, cb->hwaddr, dev);
991 			return NETDEV_TX_OK;
992 		}
993 		break;
994 	case htons(ETH_P_ARP):
995 	case htons(ETH_P_RARP):
996 		/* for unicast ARP and RARP should always perform path find */
997 		unicast_arp_send(skb, dev, cb);
998 		return NETDEV_TX_OK;
999 	default:
1000 		/* ethertype not supported by IPoIB */
1001 		++dev->stats.tx_dropped;
1002 		dev_kfree_skb_any(skb);
1003 		return NETDEV_TX_OK;
1004 	}
1005 
1006 send_using_neigh:
1007 	/* note we now hold a ref to neigh */
1008 	if (ipoib_cm_get(neigh)) {
1009 		if (ipoib_cm_up(neigh)) {
1010 			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1011 			goto unref;
1012 		}
1013 	} else if (neigh->ah) {
1014 		ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
1015 		goto unref;
1016 	}
1017 
1018 	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1019 		spin_lock_irqsave(&priv->lock, flags);
1020 		__skb_queue_tail(&neigh->queue, skb);
1021 		spin_unlock_irqrestore(&priv->lock, flags);
1022 	} else {
1023 		++dev->stats.tx_dropped;
1024 		dev_kfree_skb_any(skb);
1025 	}
1026 
1027 unref:
1028 	ipoib_neigh_put(neigh);
1029 
1030 	return NETDEV_TX_OK;
1031 }
1032 
1033 static void ipoib_timeout(struct net_device *dev)
1034 {
1035 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1036 
1037 	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1038 		   jiffies_to_msecs(jiffies - dev->trans_start));
1039 	ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1040 		   netif_queue_stopped(dev),
1041 		   priv->tx_head, priv->tx_tail);
1042 	/* XXX reset QP, etc. */
1043 }
1044 
1045 static int ipoib_hard_header(struct sk_buff *skb,
1046 			     struct net_device *dev,
1047 			     unsigned short type,
1048 			     const void *daddr, const void *saddr, unsigned len)
1049 {
1050 	struct ipoib_header *header;
1051 	struct ipoib_cb *cb = ipoib_skb_cb(skb);
1052 
1053 	header = (struct ipoib_header *) skb_push(skb, sizeof *header);
1054 
1055 	header->proto = htons(type);
1056 	header->reserved = 0;
1057 
1058 	/*
1059 	 * we don't rely on dst_entry structure,  always stuff the
1060 	 * destination address into skb->cb so we can figure out where
1061 	 * to send the packet later.
1062 	 */
1063 	memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
1064 
1065 	return sizeof *header;
1066 }
1067 
1068 static void ipoib_set_mcast_list(struct net_device *dev)
1069 {
1070 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1071 
1072 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1073 		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1074 		return;
1075 	}
1076 
1077 	queue_work(priv->wq, &priv->restart_task);
1078 }
1079 
1080 static int ipoib_get_iflink(const struct net_device *dev)
1081 {
1082 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1083 
1084 	/* parent interface */
1085 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1086 		return dev->ifindex;
1087 
1088 	/* child/vlan interface */
1089 	return priv->parent->ifindex;
1090 }
1091 
1092 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1093 {
1094 	/*
1095 	 * Use only the address parts that contributes to spreading
1096 	 * The subnet prefix is not used as one can not connect to
1097 	 * same remote port (GUID) using the same remote QPN via two
1098 	 * different subnets.
1099 	 */
1100 	 /* qpn octets[1:4) & port GUID octets[12:20) */
1101 	u32 *d32 = (u32 *) daddr;
1102 	u32 hv;
1103 
1104 	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1105 	return hv & htbl->mask;
1106 }
1107 
1108 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1109 {
1110 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1111 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1112 	struct ipoib_neigh_hash *htbl;
1113 	struct ipoib_neigh *neigh = NULL;
1114 	u32 hash_val;
1115 
1116 	rcu_read_lock_bh();
1117 
1118 	htbl = rcu_dereference_bh(ntbl->htbl);
1119 
1120 	if (!htbl)
1121 		goto out_unlock;
1122 
1123 	hash_val = ipoib_addr_hash(htbl, daddr);
1124 	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1125 	     neigh != NULL;
1126 	     neigh = rcu_dereference_bh(neigh->hnext)) {
1127 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1128 			/* found, take one ref on behalf of the caller */
1129 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1130 				/* deleted */
1131 				neigh = NULL;
1132 				goto out_unlock;
1133 			}
1134 			neigh->alive = jiffies;
1135 			goto out_unlock;
1136 		}
1137 	}
1138 
1139 out_unlock:
1140 	rcu_read_unlock_bh();
1141 	return neigh;
1142 }
1143 
1144 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1145 {
1146 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1147 	struct ipoib_neigh_hash *htbl;
1148 	unsigned long neigh_obsolete;
1149 	unsigned long dt;
1150 	unsigned long flags;
1151 	int i;
1152 	LIST_HEAD(remove_list);
1153 	struct ipoib_mcast *mcast, *tmcast;
1154 	struct net_device *dev = priv->dev;
1155 
1156 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1157 		return;
1158 
1159 	spin_lock_irqsave(&priv->lock, flags);
1160 
1161 	htbl = rcu_dereference_protected(ntbl->htbl,
1162 					 lockdep_is_held(&priv->lock));
1163 
1164 	if (!htbl)
1165 		goto out_unlock;
1166 
1167 	/* neigh is obsolete if it was idle for two GC periods */
1168 	dt = 2 * arp_tbl.gc_interval;
1169 	neigh_obsolete = jiffies - dt;
1170 	/* handle possible race condition */
1171 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1172 		goto out_unlock;
1173 
1174 	for (i = 0; i < htbl->size; i++) {
1175 		struct ipoib_neigh *neigh;
1176 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1177 
1178 		while ((neigh = rcu_dereference_protected(*np,
1179 							  lockdep_is_held(&priv->lock))) != NULL) {
1180 			/* was the neigh idle for two GC periods */
1181 			if (time_after(neigh_obsolete, neigh->alive)) {
1182 				u8 *mgid = neigh->daddr + 4;
1183 
1184 				/* Is this multicast ? */
1185 				if (*mgid == 0xff) {
1186 					mcast = __ipoib_mcast_find(dev, mgid);
1187 
1188 					if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
1189 						list_del(&mcast->list);
1190 						rb_erase(&mcast->rb_node, &priv->multicast_tree);
1191 						list_add_tail(&mcast->list, &remove_list);
1192 					}
1193 				}
1194 
1195 				rcu_assign_pointer(*np,
1196 						   rcu_dereference_protected(neigh->hnext,
1197 									     lockdep_is_held(&priv->lock)));
1198 				/* remove from path/mc list */
1199 				list_del(&neigh->list);
1200 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1201 			} else {
1202 				np = &neigh->hnext;
1203 			}
1204 
1205 		}
1206 	}
1207 
1208 out_unlock:
1209 	spin_unlock_irqrestore(&priv->lock, flags);
1210 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
1211 		ipoib_mcast_leave(dev, mcast);
1212 		ipoib_mcast_free(mcast);
1213 	}
1214 }
1215 
1216 static void ipoib_reap_neigh(struct work_struct *work)
1217 {
1218 	struct ipoib_dev_priv *priv =
1219 		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1220 
1221 	__ipoib_reap_neigh(priv);
1222 
1223 	if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1224 		queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1225 				   arp_tbl.gc_interval);
1226 }
1227 
1228 
1229 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1230 				      struct net_device *dev)
1231 {
1232 	struct ipoib_neigh *neigh;
1233 
1234 	neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
1235 	if (!neigh)
1236 		return NULL;
1237 
1238 	neigh->dev = dev;
1239 	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1240 	skb_queue_head_init(&neigh->queue);
1241 	INIT_LIST_HEAD(&neigh->list);
1242 	ipoib_cm_set(neigh, NULL);
1243 	/* one ref on behalf of the caller */
1244 	atomic_set(&neigh->refcnt, 1);
1245 
1246 	return neigh;
1247 }
1248 
1249 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1250 				      struct net_device *dev)
1251 {
1252 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1253 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1254 	struct ipoib_neigh_hash *htbl;
1255 	struct ipoib_neigh *neigh;
1256 	u32 hash_val;
1257 
1258 	htbl = rcu_dereference_protected(ntbl->htbl,
1259 					 lockdep_is_held(&priv->lock));
1260 	if (!htbl) {
1261 		neigh = NULL;
1262 		goto out_unlock;
1263 	}
1264 
1265 	/* need to add a new neigh, but maybe some other thread succeeded?
1266 	 * recalc hash, maybe hash resize took place so we do a search
1267 	 */
1268 	hash_val = ipoib_addr_hash(htbl, daddr);
1269 	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1270 					       lockdep_is_held(&priv->lock));
1271 	     neigh != NULL;
1272 	     neigh = rcu_dereference_protected(neigh->hnext,
1273 					       lockdep_is_held(&priv->lock))) {
1274 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1275 			/* found, take one ref on behalf of the caller */
1276 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1277 				/* deleted */
1278 				neigh = NULL;
1279 				break;
1280 			}
1281 			neigh->alive = jiffies;
1282 			goto out_unlock;
1283 		}
1284 	}
1285 
1286 	neigh = ipoib_neigh_ctor(daddr, dev);
1287 	if (!neigh)
1288 		goto out_unlock;
1289 
1290 	/* one ref on behalf of the hash table */
1291 	atomic_inc(&neigh->refcnt);
1292 	neigh->alive = jiffies;
1293 	/* put in hash */
1294 	rcu_assign_pointer(neigh->hnext,
1295 			   rcu_dereference_protected(htbl->buckets[hash_val],
1296 						     lockdep_is_held(&priv->lock)));
1297 	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1298 	atomic_inc(&ntbl->entries);
1299 
1300 out_unlock:
1301 
1302 	return neigh;
1303 }
1304 
1305 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1306 {
1307 	/* neigh reference count was dropprd to zero */
1308 	struct net_device *dev = neigh->dev;
1309 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1310 	struct sk_buff *skb;
1311 	if (neigh->ah)
1312 		ipoib_put_ah(neigh->ah);
1313 	while ((skb = __skb_dequeue(&neigh->queue))) {
1314 		++dev->stats.tx_dropped;
1315 		dev_kfree_skb_any(skb);
1316 	}
1317 	if (ipoib_cm_get(neigh))
1318 		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1319 	ipoib_dbg(netdev_priv(dev),
1320 		  "neigh free for %06x %pI6\n",
1321 		  IPOIB_QPN(neigh->daddr),
1322 		  neigh->daddr + 4);
1323 	kfree(neigh);
1324 	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1325 		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1326 			complete(&priv->ntbl.flushed);
1327 	}
1328 }
1329 
1330 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1331 {
1332 	/* Called as a result of removal from hash table */
1333 	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1334 	/* note TX context may hold another ref */
1335 	ipoib_neigh_put(neigh);
1336 }
1337 
1338 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1339 {
1340 	struct net_device *dev = neigh->dev;
1341 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1342 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1343 	struct ipoib_neigh_hash *htbl;
1344 	struct ipoib_neigh __rcu **np;
1345 	struct ipoib_neigh *n;
1346 	u32 hash_val;
1347 
1348 	htbl = rcu_dereference_protected(ntbl->htbl,
1349 					lockdep_is_held(&priv->lock));
1350 	if (!htbl)
1351 		return;
1352 
1353 	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1354 	np = &htbl->buckets[hash_val];
1355 	for (n = rcu_dereference_protected(*np,
1356 					    lockdep_is_held(&priv->lock));
1357 	     n != NULL;
1358 	     n = rcu_dereference_protected(*np,
1359 					lockdep_is_held(&priv->lock))) {
1360 		if (n == neigh) {
1361 			/* found */
1362 			rcu_assign_pointer(*np,
1363 					   rcu_dereference_protected(neigh->hnext,
1364 								     lockdep_is_held(&priv->lock)));
1365 			/* remove from parent list */
1366 			list_del(&neigh->list);
1367 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1368 			return;
1369 		} else {
1370 			np = &n->hnext;
1371 		}
1372 	}
1373 }
1374 
1375 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1376 {
1377 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1378 	struct ipoib_neigh_hash *htbl;
1379 	struct ipoib_neigh __rcu **buckets;
1380 	u32 size;
1381 
1382 	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1383 	ntbl->htbl = NULL;
1384 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1385 	if (!htbl)
1386 		return -ENOMEM;
1387 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1388 	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1389 	buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1390 	if (!buckets) {
1391 		kfree(htbl);
1392 		return -ENOMEM;
1393 	}
1394 	htbl->size = size;
1395 	htbl->mask = (size - 1);
1396 	htbl->buckets = buckets;
1397 	RCU_INIT_POINTER(ntbl->htbl, htbl);
1398 	htbl->ntbl = ntbl;
1399 	atomic_set(&ntbl->entries, 0);
1400 
1401 	/* start garbage collection */
1402 	clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1403 	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1404 			   arp_tbl.gc_interval);
1405 
1406 	return 0;
1407 }
1408 
1409 static void neigh_hash_free_rcu(struct rcu_head *head)
1410 {
1411 	struct ipoib_neigh_hash *htbl = container_of(head,
1412 						    struct ipoib_neigh_hash,
1413 						    rcu);
1414 	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1415 	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1416 
1417 	kfree(buckets);
1418 	kfree(htbl);
1419 	complete(&ntbl->deleted);
1420 }
1421 
1422 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1423 {
1424 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1425 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1426 	struct ipoib_neigh_hash *htbl;
1427 	unsigned long flags;
1428 	int i;
1429 
1430 	/* remove all neigh connected to a given path or mcast */
1431 	spin_lock_irqsave(&priv->lock, flags);
1432 
1433 	htbl = rcu_dereference_protected(ntbl->htbl,
1434 					 lockdep_is_held(&priv->lock));
1435 
1436 	if (!htbl)
1437 		goto out_unlock;
1438 
1439 	for (i = 0; i < htbl->size; i++) {
1440 		struct ipoib_neigh *neigh;
1441 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1442 
1443 		while ((neigh = rcu_dereference_protected(*np,
1444 							  lockdep_is_held(&priv->lock))) != NULL) {
1445 			/* delete neighs belong to this parent */
1446 			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1447 				rcu_assign_pointer(*np,
1448 						   rcu_dereference_protected(neigh->hnext,
1449 									     lockdep_is_held(&priv->lock)));
1450 				/* remove from parent list */
1451 				list_del(&neigh->list);
1452 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1453 			} else {
1454 				np = &neigh->hnext;
1455 			}
1456 
1457 		}
1458 	}
1459 out_unlock:
1460 	spin_unlock_irqrestore(&priv->lock, flags);
1461 }
1462 
1463 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1464 {
1465 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1466 	struct ipoib_neigh_hash *htbl;
1467 	unsigned long flags;
1468 	int i, wait_flushed = 0;
1469 
1470 	init_completion(&priv->ntbl.flushed);
1471 
1472 	spin_lock_irqsave(&priv->lock, flags);
1473 
1474 	htbl = rcu_dereference_protected(ntbl->htbl,
1475 					lockdep_is_held(&priv->lock));
1476 	if (!htbl)
1477 		goto out_unlock;
1478 
1479 	wait_flushed = atomic_read(&priv->ntbl.entries);
1480 	if (!wait_flushed)
1481 		goto free_htbl;
1482 
1483 	for (i = 0; i < htbl->size; i++) {
1484 		struct ipoib_neigh *neigh;
1485 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1486 
1487 		while ((neigh = rcu_dereference_protected(*np,
1488 				       lockdep_is_held(&priv->lock))) != NULL) {
1489 			rcu_assign_pointer(*np,
1490 					   rcu_dereference_protected(neigh->hnext,
1491 								     lockdep_is_held(&priv->lock)));
1492 			/* remove from path/mc list */
1493 			list_del(&neigh->list);
1494 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1495 		}
1496 	}
1497 
1498 free_htbl:
1499 	rcu_assign_pointer(ntbl->htbl, NULL);
1500 	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1501 
1502 out_unlock:
1503 	spin_unlock_irqrestore(&priv->lock, flags);
1504 	if (wait_flushed)
1505 		wait_for_completion(&priv->ntbl.flushed);
1506 }
1507 
1508 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1509 {
1510 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1511 	int stopped;
1512 
1513 	ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1514 	init_completion(&priv->ntbl.deleted);
1515 	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1516 
1517 	/* Stop GC if called at init fail need to cancel work */
1518 	stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1519 	if (!stopped)
1520 		cancel_delayed_work(&priv->neigh_reap_task);
1521 
1522 	ipoib_flush_neighs(priv);
1523 
1524 	wait_for_completion(&priv->ntbl.deleted);
1525 }
1526 
1527 
1528 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1529 {
1530 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1531 
1532 	/* Allocate RX/TX "rings" to hold queued skbs */
1533 	priv->rx_ring =	kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1534 				GFP_KERNEL);
1535 	if (!priv->rx_ring) {
1536 		printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1537 		       ca->name, ipoib_recvq_size);
1538 		goto out;
1539 	}
1540 
1541 	priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1542 	if (!priv->tx_ring) {
1543 		printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
1544 		       ca->name, ipoib_sendq_size);
1545 		goto out_rx_ring_cleanup;
1546 	}
1547 
1548 	/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1549 
1550 	if (ipoib_ib_dev_init(dev, ca, port))
1551 		goto out_tx_ring_cleanup;
1552 
1553 	/*
1554 	 * Must be after ipoib_ib_dev_init so we can allocate a per
1555 	 * device wq there and use it here
1556 	 */
1557 	if (ipoib_neigh_hash_init(priv) < 0)
1558 		goto out_dev_uninit;
1559 
1560 	return 0;
1561 
1562 out_dev_uninit:
1563 	ipoib_ib_dev_cleanup(dev);
1564 
1565 out_tx_ring_cleanup:
1566 	vfree(priv->tx_ring);
1567 
1568 out_rx_ring_cleanup:
1569 	kfree(priv->rx_ring);
1570 
1571 out:
1572 	return -ENOMEM;
1573 }
1574 
1575 void ipoib_dev_cleanup(struct net_device *dev)
1576 {
1577 	struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1578 	LIST_HEAD(head);
1579 
1580 	ASSERT_RTNL();
1581 
1582 	ipoib_delete_debug_files(dev);
1583 
1584 	/* Delete any child interfaces first */
1585 	list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1586 		/* Stop GC on child */
1587 		set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1588 		cancel_delayed_work(&cpriv->neigh_reap_task);
1589 		unregister_netdevice_queue(cpriv->dev, &head);
1590 	}
1591 	unregister_netdevice_many(&head);
1592 
1593 	/*
1594 	 * Must be before ipoib_ib_dev_cleanup or we delete an in use
1595 	 * work queue
1596 	 */
1597 	ipoib_neigh_hash_uninit(dev);
1598 
1599 	ipoib_ib_dev_cleanup(dev);
1600 
1601 	kfree(priv->rx_ring);
1602 	vfree(priv->tx_ring);
1603 
1604 	priv->rx_ring = NULL;
1605 	priv->tx_ring = NULL;
1606 }
1607 
1608 static const struct header_ops ipoib_header_ops = {
1609 	.create	= ipoib_hard_header,
1610 };
1611 
1612 static const struct net_device_ops ipoib_netdev_ops = {
1613 	.ndo_uninit		 = ipoib_uninit,
1614 	.ndo_open		 = ipoib_open,
1615 	.ndo_stop		 = ipoib_stop,
1616 	.ndo_change_mtu		 = ipoib_change_mtu,
1617 	.ndo_fix_features	 = ipoib_fix_features,
1618 	.ndo_start_xmit	 	 = ipoib_start_xmit,
1619 	.ndo_tx_timeout		 = ipoib_timeout,
1620 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
1621 	.ndo_get_iflink		 = ipoib_get_iflink,
1622 };
1623 
1624 void ipoib_setup(struct net_device *dev)
1625 {
1626 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1627 
1628 	dev->netdev_ops		 = &ipoib_netdev_ops;
1629 	dev->header_ops		 = &ipoib_header_ops;
1630 
1631 	ipoib_set_ethtool_ops(dev);
1632 
1633 	netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
1634 
1635 	dev->watchdog_timeo	 = HZ;
1636 
1637 	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
1638 
1639 	dev->hard_header_len	 = IPOIB_ENCAP_LEN;
1640 	dev->addr_len		 = INFINIBAND_ALEN;
1641 	dev->type		 = ARPHRD_INFINIBAND;
1642 	dev->tx_queue_len	 = ipoib_sendq_size * 2;
1643 	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
1644 				    NETIF_F_HIGHDMA);
1645 	netif_keep_dst(dev);
1646 
1647 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1648 
1649 	priv->dev = dev;
1650 
1651 	spin_lock_init(&priv->lock);
1652 
1653 	init_rwsem(&priv->vlan_rwsem);
1654 
1655 	INIT_LIST_HEAD(&priv->path_list);
1656 	INIT_LIST_HEAD(&priv->child_intfs);
1657 	INIT_LIST_HEAD(&priv->dead_ahs);
1658 	INIT_LIST_HEAD(&priv->multicast_list);
1659 
1660 	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
1661 	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1662 	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
1663 	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
1664 	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
1665 	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1666 	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1667 	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1668 }
1669 
1670 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1671 {
1672 	struct net_device *dev;
1673 
1674 	dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
1675 			   NET_NAME_UNKNOWN, ipoib_setup);
1676 	if (!dev)
1677 		return NULL;
1678 
1679 	return netdev_priv(dev);
1680 }
1681 
1682 static ssize_t show_pkey(struct device *dev,
1683 			 struct device_attribute *attr, char *buf)
1684 {
1685 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1686 
1687 	return sprintf(buf, "0x%04x\n", priv->pkey);
1688 }
1689 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1690 
1691 static ssize_t show_umcast(struct device *dev,
1692 			   struct device_attribute *attr, char *buf)
1693 {
1694 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1695 
1696 	return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1697 }
1698 
1699 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1700 {
1701 	struct ipoib_dev_priv *priv = netdev_priv(ndev);
1702 
1703 	if (umcast_val > 0) {
1704 		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1705 		ipoib_warn(priv, "ignoring multicast groups joined directly "
1706 				"by userspace\n");
1707 	} else
1708 		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1709 }
1710 
1711 static ssize_t set_umcast(struct device *dev,
1712 			  struct device_attribute *attr,
1713 			  const char *buf, size_t count)
1714 {
1715 	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1716 
1717 	ipoib_set_umcast(to_net_dev(dev), umcast_val);
1718 
1719 	return count;
1720 }
1721 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1722 
1723 int ipoib_add_umcast_attr(struct net_device *dev)
1724 {
1725 	return device_create_file(&dev->dev, &dev_attr_umcast);
1726 }
1727 
1728 static ssize_t create_child(struct device *dev,
1729 			    struct device_attribute *attr,
1730 			    const char *buf, size_t count)
1731 {
1732 	int pkey;
1733 	int ret;
1734 
1735 	if (sscanf(buf, "%i", &pkey) != 1)
1736 		return -EINVAL;
1737 
1738 	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
1739 		return -EINVAL;
1740 
1741 	/*
1742 	 * Set the full membership bit, so that we join the right
1743 	 * broadcast group, etc.
1744 	 */
1745 	pkey |= 0x8000;
1746 
1747 	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1748 
1749 	return ret ? ret : count;
1750 }
1751 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1752 
1753 static ssize_t delete_child(struct device *dev,
1754 			    struct device_attribute *attr,
1755 			    const char *buf, size_t count)
1756 {
1757 	int pkey;
1758 	int ret;
1759 
1760 	if (sscanf(buf, "%i", &pkey) != 1)
1761 		return -EINVAL;
1762 
1763 	if (pkey < 0 || pkey > 0xffff)
1764 		return -EINVAL;
1765 
1766 	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1767 
1768 	return ret ? ret : count;
1769 
1770 }
1771 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1772 
1773 int ipoib_add_pkey_attr(struct net_device *dev)
1774 {
1775 	return device_create_file(&dev->dev, &dev_attr_pkey);
1776 }
1777 
1778 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1779 {
1780 	struct ib_device_attr *device_attr;
1781 	int result = -ENOMEM;
1782 
1783 	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1784 	if (!device_attr) {
1785 		printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1786 		       hca->name, sizeof *device_attr);
1787 		return result;
1788 	}
1789 
1790 	result = ib_query_device(hca, device_attr);
1791 	if (result) {
1792 		printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1793 		       hca->name, result);
1794 		kfree(device_attr);
1795 		return result;
1796 	}
1797 	priv->hca_caps = device_attr->device_cap_flags;
1798 
1799 	kfree(device_attr);
1800 
1801 	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1802 		priv->dev->hw_features = NETIF_F_SG |
1803 			NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1804 
1805 		if (priv->hca_caps & IB_DEVICE_UD_TSO)
1806 			priv->dev->hw_features |= NETIF_F_TSO;
1807 
1808 		priv->dev->features |= priv->dev->hw_features;
1809 	}
1810 
1811 	return 0;
1812 }
1813 
1814 static struct net_device *ipoib_add_port(const char *format,
1815 					 struct ib_device *hca, u8 port)
1816 {
1817 	struct ipoib_dev_priv *priv;
1818 	struct ib_port_attr attr;
1819 	int result = -ENOMEM;
1820 
1821 	priv = ipoib_intf_alloc(format);
1822 	if (!priv)
1823 		goto alloc_mem_failed;
1824 
1825 	SET_NETDEV_DEV(priv->dev, hca->dma_device);
1826 	priv->dev->dev_id = port - 1;
1827 
1828 	result = ib_query_port(hca, port, &attr);
1829 	if (!result)
1830 		priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1831 	else {
1832 		printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1833 		       hca->name, port);
1834 		goto device_init_failed;
1835 	}
1836 
1837 	/* MTU will be reset when mcast join happens */
1838 	priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
1839 	priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
1840 
1841 	priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
1842 
1843 	result = ib_query_pkey(hca, port, 0, &priv->pkey);
1844 	if (result) {
1845 		printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1846 		       hca->name, port, result);
1847 		goto device_init_failed;
1848 	}
1849 
1850 	result = ipoib_set_dev_features(priv, hca);
1851 	if (result)
1852 		goto device_init_failed;
1853 
1854 	/*
1855 	 * Set the full membership bit, so that we join the right
1856 	 * broadcast group, etc.
1857 	 */
1858 	priv->pkey |= 0x8000;
1859 
1860 	priv->dev->broadcast[8] = priv->pkey >> 8;
1861 	priv->dev->broadcast[9] = priv->pkey & 0xff;
1862 
1863 	result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
1864 	if (result) {
1865 		printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1866 		       hca->name, port, result);
1867 		goto device_init_failed;
1868 	} else
1869 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1870 
1871 	result = ipoib_dev_init(priv->dev, hca, port);
1872 	if (result < 0) {
1873 		printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1874 		       hca->name, port, result);
1875 		goto device_init_failed;
1876 	}
1877 
1878 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
1879 			      priv->ca, ipoib_event);
1880 	result = ib_register_event_handler(&priv->event_handler);
1881 	if (result < 0) {
1882 		printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1883 		       "port %d (ret = %d)\n",
1884 		       hca->name, port, result);
1885 		goto event_failed;
1886 	}
1887 
1888 	result = register_netdev(priv->dev);
1889 	if (result) {
1890 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1891 		       hca->name, port, result);
1892 		goto register_failed;
1893 	}
1894 
1895 	ipoib_create_debug_files(priv->dev);
1896 
1897 	if (ipoib_cm_add_mode_attr(priv->dev))
1898 		goto sysfs_failed;
1899 	if (ipoib_add_pkey_attr(priv->dev))
1900 		goto sysfs_failed;
1901 	if (ipoib_add_umcast_attr(priv->dev))
1902 		goto sysfs_failed;
1903 	if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1904 		goto sysfs_failed;
1905 	if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1906 		goto sysfs_failed;
1907 
1908 	return priv->dev;
1909 
1910 sysfs_failed:
1911 	ipoib_delete_debug_files(priv->dev);
1912 	unregister_netdev(priv->dev);
1913 
1914 register_failed:
1915 	ib_unregister_event_handler(&priv->event_handler);
1916 	flush_workqueue(ipoib_workqueue);
1917 	/* Stop GC if started before flush */
1918 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1919 	cancel_delayed_work(&priv->neigh_reap_task);
1920 	flush_workqueue(priv->wq);
1921 
1922 event_failed:
1923 	ipoib_dev_cleanup(priv->dev);
1924 
1925 device_init_failed:
1926 	free_netdev(priv->dev);
1927 
1928 alloc_mem_failed:
1929 	return ERR_PTR(result);
1930 }
1931 
1932 static void ipoib_add_one(struct ib_device *device)
1933 {
1934 	struct list_head *dev_list;
1935 	struct net_device *dev;
1936 	struct ipoib_dev_priv *priv;
1937 	int p;
1938 	int count = 0;
1939 
1940 	dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1941 	if (!dev_list)
1942 		return;
1943 
1944 	INIT_LIST_HEAD(dev_list);
1945 
1946 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
1947 		if (!rdma_protocol_ib(device, p))
1948 			continue;
1949 		dev = ipoib_add_port("ib%d", device, p);
1950 		if (!IS_ERR(dev)) {
1951 			priv = netdev_priv(dev);
1952 			list_add_tail(&priv->list, dev_list);
1953 			count++;
1954 		}
1955 	}
1956 
1957 	if (!count) {
1958 		kfree(dev_list);
1959 		return;
1960 	}
1961 
1962 	ib_set_client_data(device, &ipoib_client, dev_list);
1963 }
1964 
1965 static void ipoib_remove_one(struct ib_device *device, void *client_data)
1966 {
1967 	struct ipoib_dev_priv *priv, *tmp;
1968 	struct list_head *dev_list = client_data;
1969 
1970 	if (!dev_list)
1971 		return;
1972 
1973 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
1974 		ib_unregister_event_handler(&priv->event_handler);
1975 		flush_workqueue(ipoib_workqueue);
1976 
1977 		rtnl_lock();
1978 		dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1979 		rtnl_unlock();
1980 
1981 		/* Stop GC */
1982 		set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1983 		cancel_delayed_work(&priv->neigh_reap_task);
1984 		flush_workqueue(priv->wq);
1985 
1986 		unregister_netdev(priv->dev);
1987 		free_netdev(priv->dev);
1988 	}
1989 
1990 	kfree(dev_list);
1991 }
1992 
1993 static int __init ipoib_init_module(void)
1994 {
1995 	int ret;
1996 
1997 	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1998 	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1999 	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2000 
2001 	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2002 	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2003 	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2004 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2005 	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2006 #endif
2007 
2008 	/*
2009 	 * When copying small received packets, we only copy from the
2010 	 * linear data part of the SKB, so we rely on this condition.
2011 	 */
2012 	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2013 
2014 	ret = ipoib_register_debugfs();
2015 	if (ret)
2016 		return ret;
2017 
2018 	/*
2019 	 * We create a global workqueue here that is used for all flush
2020 	 * operations.  However, if you attempt to flush a workqueue
2021 	 * from a task on that same workqueue, it deadlocks the system.
2022 	 * We want to be able to flush the tasks associated with a
2023 	 * specific net device, so we also create a workqueue for each
2024 	 * netdevice.  We queue up the tasks for that device only on
2025 	 * its private workqueue, and we only queue up flush events
2026 	 * on our global flush workqueue.  This avoids the deadlocks.
2027 	 */
2028 	ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
2029 	if (!ipoib_workqueue) {
2030 		ret = -ENOMEM;
2031 		goto err_fs;
2032 	}
2033 
2034 	ib_sa_register_client(&ipoib_sa_client);
2035 
2036 	ret = ib_register_client(&ipoib_client);
2037 	if (ret)
2038 		goto err_sa;
2039 
2040 	ret = ipoib_netlink_init();
2041 	if (ret)
2042 		goto err_client;
2043 
2044 	return 0;
2045 
2046 err_client:
2047 	ib_unregister_client(&ipoib_client);
2048 
2049 err_sa:
2050 	ib_sa_unregister_client(&ipoib_sa_client);
2051 	destroy_workqueue(ipoib_workqueue);
2052 
2053 err_fs:
2054 	ipoib_unregister_debugfs();
2055 
2056 	return ret;
2057 }
2058 
2059 static void __exit ipoib_cleanup_module(void)
2060 {
2061 	ipoib_netlink_fini();
2062 	ib_unregister_client(&ipoib_client);
2063 	ib_sa_unregister_client(&ipoib_sa_client);
2064 	ipoib_unregister_debugfs();
2065 	destroy_workqueue(ipoib_workqueue);
2066 }
2067 
2068 module_init(ipoib_init_module);
2069 module_exit(ipoib_cleanup_module);
2070