1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "ipoib.h"
36 
37 #include <linux/module.h>
38 
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43 
44 #include <linux/if_arp.h>	/* For ARPHRD_xxx */
45 
46 #include <linux/ip.h>
47 #include <linux/in.h>
48 
49 #include <linux/jhash.h>
50 #include <net/arp.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
54 #include <linux/pci.h>
55 
56 #define DRV_VERSION "1.0.0"
57 
58 const char ipoib_driver_version[] = DRV_VERSION;
59 
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_VERSION(DRV_VERSION);
64 
65 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
66 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
67 
68 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
69 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
70 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
71 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
72 
73 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
74 int ipoib_debug_level;
75 
76 module_param_named(debug_level, ipoib_debug_level, int, 0644);
77 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
78 #endif
79 
80 struct ipoib_path_iter {
81 	struct net_device *dev;
82 	struct ipoib_path  path;
83 };
84 
85 static const u8 ipv4_bcast_addr[] = {
86 	0x00, 0xff, 0xff, 0xff,
87 	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
88 	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
89 };
90 
91 struct workqueue_struct *ipoib_workqueue;
92 
93 struct ib_sa_client ipoib_sa_client;
94 
95 static void ipoib_add_one(struct ib_device *device);
96 static void ipoib_remove_one(struct ib_device *device, void *client_data);
97 static void ipoib_neigh_reclaim(struct rcu_head *rp);
98 static struct net_device *ipoib_get_net_dev_by_params(
99 		struct ib_device *dev, u8 port, u16 pkey,
100 		const union ib_gid *gid, const struct sockaddr *addr,
101 		void *client_data);
102 static int ipoib_set_mac(struct net_device *dev, void *addr);
103 
104 static struct ib_client ipoib_client = {
105 	.name   = "ipoib",
106 	.add    = ipoib_add_one,
107 	.remove = ipoib_remove_one,
108 	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
109 };
110 
111 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
112 static int ipoib_netdev_event(struct notifier_block *this,
113 			      unsigned long event, void *ptr)
114 {
115 	struct netdev_notifier_info *ni = ptr;
116 	struct net_device *dev = ni->dev;
117 
118 	if (dev->netdev_ops->ndo_open != ipoib_open)
119 		return NOTIFY_DONE;
120 
121 	switch (event) {
122 	case NETDEV_REGISTER:
123 		ipoib_create_debug_files(dev);
124 		break;
125 	case NETDEV_CHANGENAME:
126 		ipoib_delete_debug_files(dev);
127 		ipoib_create_debug_files(dev);
128 		break;
129 	case NETDEV_UNREGISTER:
130 		ipoib_delete_debug_files(dev);
131 		break;
132 	}
133 
134 	return NOTIFY_DONE;
135 }
136 #endif
137 
138 int ipoib_open(struct net_device *dev)
139 {
140 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
141 
142 	ipoib_dbg(priv, "bringing up interface\n");
143 
144 	netif_carrier_off(dev);
145 
146 	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
147 
148 	priv->sm_fullmember_sendonly_support = false;
149 
150 	if (ipoib_ib_dev_open(dev)) {
151 		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
152 			return 0;
153 		goto err_disable;
154 	}
155 
156 	ipoib_ib_dev_up(dev);
157 
158 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
159 		struct ipoib_dev_priv *cpriv;
160 
161 		/* Bring up any child interfaces too */
162 		down_read(&priv->vlan_rwsem);
163 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
164 			int flags;
165 
166 			flags = cpriv->dev->flags;
167 			if (flags & IFF_UP)
168 				continue;
169 
170 			dev_change_flags(cpriv->dev, flags | IFF_UP);
171 		}
172 		up_read(&priv->vlan_rwsem);
173 	}
174 
175 	netif_start_queue(dev);
176 
177 	return 0;
178 
179 err_disable:
180 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
181 
182 	return -EINVAL;
183 }
184 
185 static int ipoib_stop(struct net_device *dev)
186 {
187 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
188 
189 	ipoib_dbg(priv, "stopping interface\n");
190 
191 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
192 
193 	netif_stop_queue(dev);
194 
195 	ipoib_ib_dev_down(dev);
196 	ipoib_ib_dev_stop(dev);
197 
198 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
199 		struct ipoib_dev_priv *cpriv;
200 
201 		/* Bring down any child interfaces too */
202 		down_read(&priv->vlan_rwsem);
203 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
204 			int flags;
205 
206 			flags = cpriv->dev->flags;
207 			if (!(flags & IFF_UP))
208 				continue;
209 
210 			dev_change_flags(cpriv->dev, flags & ~IFF_UP);
211 		}
212 		up_read(&priv->vlan_rwsem);
213 	}
214 
215 	return 0;
216 }
217 
218 static void ipoib_uninit(struct net_device *dev)
219 {
220 	ipoib_dev_cleanup(dev);
221 }
222 
223 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
224 {
225 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
226 
227 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
228 		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
229 
230 	return features;
231 }
232 
233 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
234 {
235 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
236 
237 	/* dev->mtu > 2K ==> connected mode */
238 	if (ipoib_cm_admin_enabled(dev)) {
239 		if (new_mtu > ipoib_cm_max_mtu(dev))
240 			return -EINVAL;
241 
242 		if (new_mtu > priv->mcast_mtu)
243 			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
244 				   priv->mcast_mtu);
245 
246 		dev->mtu = new_mtu;
247 		return 0;
248 	}
249 
250 	if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
251 		return -EINVAL;
252 
253 	priv->admin_mtu = new_mtu;
254 
255 	if (priv->mcast_mtu < priv->admin_mtu)
256 		ipoib_dbg(priv, "MTU must be smaller than the underlying "
257 				"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
258 
259 	dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
260 
261 	return 0;
262 }
263 
264 /* Called with an RCU read lock taken */
265 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
266 					struct net_device *dev)
267 {
268 	struct net *net = dev_net(dev);
269 	struct in_device *in_dev;
270 	struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
271 	struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
272 	__be32 ret_addr;
273 
274 	switch (addr->sa_family) {
275 	case AF_INET:
276 		in_dev = in_dev_get(dev);
277 		if (!in_dev)
278 			return false;
279 
280 		ret_addr = inet_confirm_addr(net, in_dev, 0,
281 					     addr_in->sin_addr.s_addr,
282 					     RT_SCOPE_HOST);
283 		in_dev_put(in_dev);
284 		if (ret_addr)
285 			return true;
286 
287 		break;
288 	case AF_INET6:
289 		if (IS_ENABLED(CONFIG_IPV6) &&
290 		    ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
291 			return true;
292 
293 		break;
294 	}
295 	return false;
296 }
297 
298 /**
299  * Find the master net_device on top of the given net_device.
300  * @dev: base IPoIB net_device
301  *
302  * Returns the master net_device with a reference held, or the same net_device
303  * if no master exists.
304  */
305 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
306 {
307 	struct net_device *master;
308 
309 	rcu_read_lock();
310 	master = netdev_master_upper_dev_get_rcu(dev);
311 	if (master)
312 		dev_hold(master);
313 	rcu_read_unlock();
314 
315 	if (master)
316 		return master;
317 
318 	dev_hold(dev);
319 	return dev;
320 }
321 
322 struct ipoib_walk_data {
323 	const struct sockaddr *addr;
324 	struct net_device *result;
325 };
326 
327 static int ipoib_upper_walk(struct net_device *upper, void *_data)
328 {
329 	struct ipoib_walk_data *data = _data;
330 	int ret = 0;
331 
332 	if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
333 		dev_hold(upper);
334 		data->result = upper;
335 		ret = 1;
336 	}
337 
338 	return ret;
339 }
340 
341 /**
342  * Find a net_device matching the given address, which is an upper device of
343  * the given net_device.
344  * @addr: IP address to look for.
345  * @dev: base IPoIB net_device
346  *
347  * If found, returns the net_device with a reference held. Otherwise return
348  * NULL.
349  */
350 static struct net_device *ipoib_get_net_dev_match_addr(
351 		const struct sockaddr *addr, struct net_device *dev)
352 {
353 	struct ipoib_walk_data data = {
354 		.addr = addr,
355 	};
356 
357 	rcu_read_lock();
358 	if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
359 		dev_hold(dev);
360 		data.result = dev;
361 		goto out;
362 	}
363 
364 	netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
365 out:
366 	rcu_read_unlock();
367 	return data.result;
368 }
369 
370 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
371  * pkey_index and address, if one exists.
372  *
373  * @found_net_dev: contains a matching net_device if the return value >= 1,
374  * with a reference held. */
375 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
376 				     const union ib_gid *gid,
377 				     u16 pkey_index,
378 				     const struct sockaddr *addr,
379 				     int nesting,
380 				     struct net_device **found_net_dev)
381 {
382 	struct ipoib_dev_priv *child_priv;
383 	struct net_device *net_dev = NULL;
384 	int matches = 0;
385 
386 	if (priv->pkey_index == pkey_index &&
387 	    (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
388 		if (!addr) {
389 			net_dev = ipoib_get_master_net_dev(priv->dev);
390 		} else {
391 			/* Verify the net_device matches the IP address, as
392 			 * IPoIB child devices currently share a GID. */
393 			net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
394 		}
395 		if (net_dev) {
396 			if (!*found_net_dev)
397 				*found_net_dev = net_dev;
398 			else
399 				dev_put(net_dev);
400 			++matches;
401 		}
402 	}
403 
404 	/* Check child interfaces */
405 	down_read_nested(&priv->vlan_rwsem, nesting);
406 	list_for_each_entry(child_priv, &priv->child_intfs, list) {
407 		matches += ipoib_match_gid_pkey_addr(child_priv, gid,
408 						    pkey_index, addr,
409 						    nesting + 1,
410 						    found_net_dev);
411 		if (matches > 1)
412 			break;
413 	}
414 	up_read(&priv->vlan_rwsem);
415 
416 	return matches;
417 }
418 
419 /* Returns the number of matching net_devs found (between 0 and 2). Also
420  * return the matching net_device in the @net_dev parameter, holding a
421  * reference to the net_device, if the number of matches >= 1 */
422 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
423 					 u16 pkey_index,
424 					 const union ib_gid *gid,
425 					 const struct sockaddr *addr,
426 					 struct net_device **net_dev)
427 {
428 	struct ipoib_dev_priv *priv;
429 	int matches = 0;
430 
431 	*net_dev = NULL;
432 
433 	list_for_each_entry(priv, dev_list, list) {
434 		if (priv->port != port)
435 			continue;
436 
437 		matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
438 						     addr, 0, net_dev);
439 		if (matches > 1)
440 			break;
441 	}
442 
443 	return matches;
444 }
445 
446 static struct net_device *ipoib_get_net_dev_by_params(
447 		struct ib_device *dev, u8 port, u16 pkey,
448 		const union ib_gid *gid, const struct sockaddr *addr,
449 		void *client_data)
450 {
451 	struct net_device *net_dev;
452 	struct list_head *dev_list = client_data;
453 	u16 pkey_index;
454 	int matches;
455 	int ret;
456 
457 	if (!rdma_protocol_ib(dev, port))
458 		return NULL;
459 
460 	ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
461 	if (ret)
462 		return NULL;
463 
464 	if (!dev_list)
465 		return NULL;
466 
467 	/* See if we can find a unique device matching the L2 parameters */
468 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
469 						gid, NULL, &net_dev);
470 
471 	switch (matches) {
472 	case 0:
473 		return NULL;
474 	case 1:
475 		return net_dev;
476 	}
477 
478 	dev_put(net_dev);
479 
480 	/* Couldn't find a unique device with L2 parameters only. Use L3
481 	 * address to uniquely match the net device */
482 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
483 						gid, addr, &net_dev);
484 	switch (matches) {
485 	case 0:
486 		return NULL;
487 	default:
488 		dev_warn_ratelimited(&dev->dev,
489 				     "duplicate IP address detected\n");
490 		/* Fall through */
491 	case 1:
492 		return net_dev;
493 	}
494 }
495 
496 int ipoib_set_mode(struct net_device *dev, const char *buf)
497 {
498 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
499 
500 	if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
501 	     !strcmp(buf, "connected\n")) ||
502 	     (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
503 	     !strcmp(buf, "datagram\n"))) {
504 		return 0;
505 	}
506 
507 	/* flush paths if we switch modes so that connections are restarted */
508 	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
509 		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
510 		ipoib_warn(priv, "enabling connected mode "
511 			   "will cause multicast packet drops\n");
512 		netdev_update_features(dev);
513 		dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
514 		rtnl_unlock();
515 		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
516 
517 		ipoib_flush_paths(dev);
518 		return (!rtnl_trylock()) ? -EBUSY : 0;
519 	}
520 
521 	if (!strcmp(buf, "datagram\n")) {
522 		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
523 		netdev_update_features(dev);
524 		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
525 		rtnl_unlock();
526 		ipoib_flush_paths(dev);
527 		return (!rtnl_trylock()) ? -EBUSY : 0;
528 	}
529 
530 	return -EINVAL;
531 }
532 
533 struct ipoib_path *__path_find(struct net_device *dev, void *gid)
534 {
535 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
536 	struct rb_node *n = priv->path_tree.rb_node;
537 	struct ipoib_path *path;
538 	int ret;
539 
540 	while (n) {
541 		path = rb_entry(n, struct ipoib_path, rb_node);
542 
543 		ret = memcmp(gid, path->pathrec.dgid.raw,
544 			     sizeof (union ib_gid));
545 
546 		if (ret < 0)
547 			n = n->rb_left;
548 		else if (ret > 0)
549 			n = n->rb_right;
550 		else
551 			return path;
552 	}
553 
554 	return NULL;
555 }
556 
557 static int __path_add(struct net_device *dev, struct ipoib_path *path)
558 {
559 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
560 	struct rb_node **n = &priv->path_tree.rb_node;
561 	struct rb_node *pn = NULL;
562 	struct ipoib_path *tpath;
563 	int ret;
564 
565 	while (*n) {
566 		pn = *n;
567 		tpath = rb_entry(pn, struct ipoib_path, rb_node);
568 
569 		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
570 			     sizeof (union ib_gid));
571 		if (ret < 0)
572 			n = &pn->rb_left;
573 		else if (ret > 0)
574 			n = &pn->rb_right;
575 		else
576 			return -EEXIST;
577 	}
578 
579 	rb_link_node(&path->rb_node, pn, n);
580 	rb_insert_color(&path->rb_node, &priv->path_tree);
581 
582 	list_add_tail(&path->list, &priv->path_list);
583 
584 	return 0;
585 }
586 
587 static void path_free(struct net_device *dev, struct ipoib_path *path)
588 {
589 	struct sk_buff *skb;
590 
591 	while ((skb = __skb_dequeue(&path->queue)))
592 		dev_kfree_skb_irq(skb);
593 
594 	ipoib_dbg(ipoib_priv(dev), "path_free\n");
595 
596 	/* remove all neigh connected to this path */
597 	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
598 
599 	if (path->ah)
600 		ipoib_put_ah(path->ah);
601 
602 	kfree(path);
603 }
604 
605 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
606 
607 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
608 {
609 	struct ipoib_path_iter *iter;
610 
611 	iter = kmalloc(sizeof *iter, GFP_KERNEL);
612 	if (!iter)
613 		return NULL;
614 
615 	iter->dev = dev;
616 	memset(iter->path.pathrec.dgid.raw, 0, 16);
617 
618 	if (ipoib_path_iter_next(iter)) {
619 		kfree(iter);
620 		return NULL;
621 	}
622 
623 	return iter;
624 }
625 
626 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
627 {
628 	struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
629 	struct rb_node *n;
630 	struct ipoib_path *path;
631 	int ret = 1;
632 
633 	spin_lock_irq(&priv->lock);
634 
635 	n = rb_first(&priv->path_tree);
636 
637 	while (n) {
638 		path = rb_entry(n, struct ipoib_path, rb_node);
639 
640 		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
641 			   sizeof (union ib_gid)) < 0) {
642 			iter->path = *path;
643 			ret = 0;
644 			break;
645 		}
646 
647 		n = rb_next(n);
648 	}
649 
650 	spin_unlock_irq(&priv->lock);
651 
652 	return ret;
653 }
654 
655 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
656 			  struct ipoib_path *path)
657 {
658 	*path = iter->path;
659 }
660 
661 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
662 
663 void ipoib_mark_paths_invalid(struct net_device *dev)
664 {
665 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
666 	struct ipoib_path *path, *tp;
667 
668 	spin_lock_irq(&priv->lock);
669 
670 	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
671 		ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
672 			  be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
673 			  path->pathrec.dgid.raw);
674 		path->valid =  0;
675 	}
676 
677 	spin_unlock_irq(&priv->lock);
678 }
679 
680 static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
681 {
682 	struct ipoib_pseudo_header *phdr;
683 
684 	phdr = skb_push(skb, sizeof(*phdr));
685 	memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
686 }
687 
688 void ipoib_flush_paths(struct net_device *dev)
689 {
690 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
691 	struct ipoib_path *path, *tp;
692 	LIST_HEAD(remove_list);
693 	unsigned long flags;
694 
695 	netif_tx_lock_bh(dev);
696 	spin_lock_irqsave(&priv->lock, flags);
697 
698 	list_splice_init(&priv->path_list, &remove_list);
699 
700 	list_for_each_entry(path, &remove_list, list)
701 		rb_erase(&path->rb_node, &priv->path_tree);
702 
703 	list_for_each_entry_safe(path, tp, &remove_list, list) {
704 		if (path->query)
705 			ib_sa_cancel_query(path->query_id, path->query);
706 		spin_unlock_irqrestore(&priv->lock, flags);
707 		netif_tx_unlock_bh(dev);
708 		wait_for_completion(&path->done);
709 		path_free(dev, path);
710 		netif_tx_lock_bh(dev);
711 		spin_lock_irqsave(&priv->lock, flags);
712 	}
713 
714 	spin_unlock_irqrestore(&priv->lock, flags);
715 	netif_tx_unlock_bh(dev);
716 }
717 
718 static void path_rec_completion(int status,
719 				struct sa_path_rec *pathrec,
720 				void *path_ptr)
721 {
722 	struct ipoib_path *path = path_ptr;
723 	struct net_device *dev = path->dev;
724 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
725 	struct ipoib_ah *ah = NULL;
726 	struct ipoib_ah *old_ah = NULL;
727 	struct ipoib_neigh *neigh, *tn;
728 	struct sk_buff_head skqueue;
729 	struct sk_buff *skb;
730 	unsigned long flags;
731 
732 	if (!status)
733 		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
734 			  be32_to_cpu(sa_path_get_dlid(pathrec)),
735 			  pathrec->dgid.raw);
736 	else
737 		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
738 			  status, path->pathrec.dgid.raw);
739 
740 	skb_queue_head_init(&skqueue);
741 
742 	if (!status) {
743 		struct rdma_ah_attr av;
744 
745 		if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
746 			ah = ipoib_create_ah(dev, priv->pd, &av);
747 	}
748 
749 	spin_lock_irqsave(&priv->lock, flags);
750 
751 	if (!IS_ERR_OR_NULL(ah)) {
752 		path->pathrec = *pathrec;
753 
754 		old_ah   = path->ah;
755 		path->ah = ah;
756 
757 		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
758 			  ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
759 			  pathrec->sl);
760 
761 		while ((skb = __skb_dequeue(&path->queue)))
762 			__skb_queue_tail(&skqueue, skb);
763 
764 		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
765 			if (neigh->ah) {
766 				WARN_ON(neigh->ah != old_ah);
767 				/*
768 				 * Dropping the ah reference inside
769 				 * priv->lock is safe here, because we
770 				 * will hold one more reference from
771 				 * the original value of path->ah (ie
772 				 * old_ah).
773 				 */
774 				ipoib_put_ah(neigh->ah);
775 			}
776 			kref_get(&path->ah->ref);
777 			neigh->ah = path->ah;
778 
779 			if (ipoib_cm_enabled(dev, neigh->daddr)) {
780 				if (!ipoib_cm_get(neigh))
781 					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
782 									       path,
783 									       neigh));
784 				if (!ipoib_cm_get(neigh)) {
785 					ipoib_neigh_free(neigh);
786 					continue;
787 				}
788 			}
789 
790 			while ((skb = __skb_dequeue(&neigh->queue)))
791 				__skb_queue_tail(&skqueue, skb);
792 		}
793 		path->valid = 1;
794 	}
795 
796 	path->query = NULL;
797 	complete(&path->done);
798 
799 	spin_unlock_irqrestore(&priv->lock, flags);
800 
801 	if (IS_ERR_OR_NULL(ah))
802 		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
803 
804 	if (old_ah)
805 		ipoib_put_ah(old_ah);
806 
807 	while ((skb = __skb_dequeue(&skqueue))) {
808 		int ret;
809 		skb->dev = dev;
810 		ret = dev_queue_xmit(skb);
811 		if (ret)
812 			ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
813 				   __func__, ret);
814 	}
815 }
816 
817 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
818 {
819 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
820 	struct ipoib_path *path;
821 
822 	if (!priv->broadcast)
823 		return NULL;
824 
825 	path = kzalloc(sizeof *path, GFP_ATOMIC);
826 	if (!path)
827 		return NULL;
828 
829 	path->dev = dev;
830 
831 	skb_queue_head_init(&path->queue);
832 
833 	INIT_LIST_HEAD(&path->neigh_list);
834 
835 	if (rdma_cap_opa_ah(priv->ca, priv->port))
836 		path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
837 	else
838 		path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
839 	memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
840 	path->pathrec.sgid	    = priv->local_gid;
841 	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
842 	path->pathrec.numb_path     = 1;
843 	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
844 
845 	return path;
846 }
847 
848 static int path_rec_start(struct net_device *dev,
849 			  struct ipoib_path *path)
850 {
851 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
852 
853 	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
854 		  path->pathrec.dgid.raw);
855 
856 	init_completion(&path->done);
857 
858 	path->query_id =
859 		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
860 				   &path->pathrec,
861 				   IB_SA_PATH_REC_DGID		|
862 				   IB_SA_PATH_REC_SGID		|
863 				   IB_SA_PATH_REC_NUMB_PATH	|
864 				   IB_SA_PATH_REC_TRAFFIC_CLASS |
865 				   IB_SA_PATH_REC_PKEY,
866 				   1000, GFP_ATOMIC,
867 				   path_rec_completion,
868 				   path, &path->query);
869 	if (path->query_id < 0) {
870 		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
871 		path->query = NULL;
872 		complete(&path->done);
873 		return path->query_id;
874 	}
875 
876 	return 0;
877 }
878 
879 static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
880 			   struct net_device *dev)
881 {
882 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
883 	struct rdma_netdev *rn = netdev_priv(dev);
884 	struct ipoib_path *path;
885 	struct ipoib_neigh *neigh;
886 	unsigned long flags;
887 
888 	spin_lock_irqsave(&priv->lock, flags);
889 	neigh = ipoib_neigh_alloc(daddr, dev);
890 	if (!neigh) {
891 		spin_unlock_irqrestore(&priv->lock, flags);
892 		++dev->stats.tx_dropped;
893 		dev_kfree_skb_any(skb);
894 		return;
895 	}
896 
897 	path = __path_find(dev, daddr + 4);
898 	if (!path) {
899 		path = path_rec_create(dev, daddr + 4);
900 		if (!path)
901 			goto err_path;
902 
903 		__path_add(dev, path);
904 	}
905 
906 	list_add_tail(&neigh->list, &path->neigh_list);
907 
908 	if (path->ah) {
909 		kref_get(&path->ah->ref);
910 		neigh->ah = path->ah;
911 
912 		if (ipoib_cm_enabled(dev, neigh->daddr)) {
913 			if (!ipoib_cm_get(neigh))
914 				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
915 			if (!ipoib_cm_get(neigh)) {
916 				ipoib_neigh_free(neigh);
917 				goto err_drop;
918 			}
919 			if (skb_queue_len(&neigh->queue) <
920 			    IPOIB_MAX_PATH_REC_QUEUE) {
921 				push_pseudo_header(skb, neigh->daddr);
922 				__skb_queue_tail(&neigh->queue, skb);
923 			} else {
924 				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
925 					   skb_queue_len(&neigh->queue));
926 				goto err_drop;
927 			}
928 		} else {
929 			spin_unlock_irqrestore(&priv->lock, flags);
930 			path->ah->last_send = rn->send(dev, skb, path->ah->ah,
931 						       IPOIB_QPN(daddr));
932 			ipoib_neigh_put(neigh);
933 			return;
934 		}
935 	} else {
936 		neigh->ah  = NULL;
937 
938 		if (!path->query && path_rec_start(dev, path))
939 			goto err_path;
940 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
941 			push_pseudo_header(skb, neigh->daddr);
942 			__skb_queue_tail(&neigh->queue, skb);
943 		} else {
944 			goto err_drop;
945 		}
946 	}
947 
948 	spin_unlock_irqrestore(&priv->lock, flags);
949 	ipoib_neigh_put(neigh);
950 	return;
951 
952 err_path:
953 	ipoib_neigh_free(neigh);
954 err_drop:
955 	++dev->stats.tx_dropped;
956 	dev_kfree_skb_any(skb);
957 
958 	spin_unlock_irqrestore(&priv->lock, flags);
959 	ipoib_neigh_put(neigh);
960 }
961 
962 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
963 			     struct ipoib_pseudo_header *phdr)
964 {
965 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
966 	struct rdma_netdev *rn = netdev_priv(dev);
967 	struct ipoib_path *path;
968 	unsigned long flags;
969 
970 	spin_lock_irqsave(&priv->lock, flags);
971 
972 	path = __path_find(dev, phdr->hwaddr + 4);
973 	if (!path || !path->valid) {
974 		int new_path = 0;
975 
976 		if (!path) {
977 			path = path_rec_create(dev, phdr->hwaddr + 4);
978 			new_path = 1;
979 		}
980 		if (path) {
981 			if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
982 				push_pseudo_header(skb, phdr->hwaddr);
983 				__skb_queue_tail(&path->queue, skb);
984 			} else {
985 				++dev->stats.tx_dropped;
986 				dev_kfree_skb_any(skb);
987 			}
988 
989 			if (!path->query && path_rec_start(dev, path)) {
990 				spin_unlock_irqrestore(&priv->lock, flags);
991 				if (new_path)
992 					path_free(dev, path);
993 				return;
994 			} else
995 				__path_add(dev, path);
996 		} else {
997 			++dev->stats.tx_dropped;
998 			dev_kfree_skb_any(skb);
999 		}
1000 
1001 		spin_unlock_irqrestore(&priv->lock, flags);
1002 		return;
1003 	}
1004 
1005 	if (path->ah) {
1006 		ipoib_dbg(priv, "Send unicast ARP to %08x\n",
1007 			  be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
1008 
1009 		spin_unlock_irqrestore(&priv->lock, flags);
1010 		path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1011 					       IPOIB_QPN(phdr->hwaddr));
1012 		return;
1013 	} else if ((path->query || !path_rec_start(dev, path)) &&
1014 		   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1015 		push_pseudo_header(skb, phdr->hwaddr);
1016 		__skb_queue_tail(&path->queue, skb);
1017 	} else {
1018 		++dev->stats.tx_dropped;
1019 		dev_kfree_skb_any(skb);
1020 	}
1021 
1022 	spin_unlock_irqrestore(&priv->lock, flags);
1023 }
1024 
1025 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026 {
1027 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1028 	struct rdma_netdev *rn = netdev_priv(dev);
1029 	struct ipoib_neigh *neigh;
1030 	struct ipoib_pseudo_header *phdr;
1031 	struct ipoib_header *header;
1032 	unsigned long flags;
1033 
1034 	phdr = (struct ipoib_pseudo_header *) skb->data;
1035 	skb_pull(skb, sizeof(*phdr));
1036 	header = (struct ipoib_header *) skb->data;
1037 
1038 	if (unlikely(phdr->hwaddr[4] == 0xff)) {
1039 		/* multicast, arrange "if" according to probability */
1040 		if ((header->proto != htons(ETH_P_IP)) &&
1041 		    (header->proto != htons(ETH_P_IPV6)) &&
1042 		    (header->proto != htons(ETH_P_ARP)) &&
1043 		    (header->proto != htons(ETH_P_RARP)) &&
1044 		    (header->proto != htons(ETH_P_TIPC))) {
1045 			/* ethertype not supported by IPoIB */
1046 			++dev->stats.tx_dropped;
1047 			dev_kfree_skb_any(skb);
1048 			return NETDEV_TX_OK;
1049 		}
1050 		/* Add in the P_Key for multicast*/
1051 		phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1052 		phdr->hwaddr[9] = priv->pkey & 0xff;
1053 
1054 		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1055 		if (likely(neigh))
1056 			goto send_using_neigh;
1057 		ipoib_mcast_send(dev, phdr->hwaddr, skb);
1058 		return NETDEV_TX_OK;
1059 	}
1060 
1061 	/* unicast, arrange "switch" according to probability */
1062 	switch (header->proto) {
1063 	case htons(ETH_P_IP):
1064 	case htons(ETH_P_IPV6):
1065 	case htons(ETH_P_TIPC):
1066 		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1067 		if (unlikely(!neigh)) {
1068 			neigh_add_path(skb, phdr->hwaddr, dev);
1069 			return NETDEV_TX_OK;
1070 		}
1071 		break;
1072 	case htons(ETH_P_ARP):
1073 	case htons(ETH_P_RARP):
1074 		/* for unicast ARP and RARP should always perform path find */
1075 		unicast_arp_send(skb, dev, phdr);
1076 		return NETDEV_TX_OK;
1077 	default:
1078 		/* ethertype not supported by IPoIB */
1079 		++dev->stats.tx_dropped;
1080 		dev_kfree_skb_any(skb);
1081 		return NETDEV_TX_OK;
1082 	}
1083 
1084 send_using_neigh:
1085 	/* note we now hold a ref to neigh */
1086 	if (ipoib_cm_get(neigh)) {
1087 		if (ipoib_cm_up(neigh)) {
1088 			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1089 			goto unref;
1090 		}
1091 	} else if (neigh->ah) {
1092 		neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
1093 						IPOIB_QPN(phdr->hwaddr));
1094 		goto unref;
1095 	}
1096 
1097 	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1098 		push_pseudo_header(skb, phdr->hwaddr);
1099 		spin_lock_irqsave(&priv->lock, flags);
1100 		__skb_queue_tail(&neigh->queue, skb);
1101 		spin_unlock_irqrestore(&priv->lock, flags);
1102 	} else {
1103 		++dev->stats.tx_dropped;
1104 		dev_kfree_skb_any(skb);
1105 	}
1106 
1107 unref:
1108 	ipoib_neigh_put(neigh);
1109 
1110 	return NETDEV_TX_OK;
1111 }
1112 
1113 static void ipoib_timeout(struct net_device *dev)
1114 {
1115 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1116 
1117 	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1118 		   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1119 	ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1120 		   netif_queue_stopped(dev),
1121 		   priv->tx_head, priv->tx_tail);
1122 	/* XXX reset QP, etc. */
1123 }
1124 
1125 static int ipoib_hard_header(struct sk_buff *skb,
1126 			     struct net_device *dev,
1127 			     unsigned short type,
1128 			     const void *daddr, const void *saddr, unsigned len)
1129 {
1130 	struct ipoib_header *header;
1131 
1132 	header = skb_push(skb, sizeof *header);
1133 
1134 	header->proto = htons(type);
1135 	header->reserved = 0;
1136 
1137 	/*
1138 	 * we don't rely on dst_entry structure,  always stuff the
1139 	 * destination address into skb hard header so we can figure out where
1140 	 * to send the packet later.
1141 	 */
1142 	push_pseudo_header(skb, daddr);
1143 
1144 	return IPOIB_HARD_LEN;
1145 }
1146 
1147 static void ipoib_set_mcast_list(struct net_device *dev)
1148 {
1149 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1150 
1151 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1152 		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1153 		return;
1154 	}
1155 
1156 	queue_work(priv->wq, &priv->restart_task);
1157 }
1158 
1159 static int ipoib_get_iflink(const struct net_device *dev)
1160 {
1161 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1162 
1163 	/* parent interface */
1164 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1165 		return dev->ifindex;
1166 
1167 	/* child/vlan interface */
1168 	return priv->parent->ifindex;
1169 }
1170 
1171 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1172 {
1173 	/*
1174 	 * Use only the address parts that contributes to spreading
1175 	 * The subnet prefix is not used as one can not connect to
1176 	 * same remote port (GUID) using the same remote QPN via two
1177 	 * different subnets.
1178 	 */
1179 	 /* qpn octets[1:4) & port GUID octets[12:20) */
1180 	u32 *d32 = (u32 *) daddr;
1181 	u32 hv;
1182 
1183 	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1184 	return hv & htbl->mask;
1185 }
1186 
1187 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1188 {
1189 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1190 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1191 	struct ipoib_neigh_hash *htbl;
1192 	struct ipoib_neigh *neigh = NULL;
1193 	u32 hash_val;
1194 
1195 	rcu_read_lock_bh();
1196 
1197 	htbl = rcu_dereference_bh(ntbl->htbl);
1198 
1199 	if (!htbl)
1200 		goto out_unlock;
1201 
1202 	hash_val = ipoib_addr_hash(htbl, daddr);
1203 	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1204 	     neigh != NULL;
1205 	     neigh = rcu_dereference_bh(neigh->hnext)) {
1206 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1207 			/* found, take one ref on behalf of the caller */
1208 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1209 				/* deleted */
1210 				neigh = NULL;
1211 				goto out_unlock;
1212 			}
1213 
1214 			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1215 				neigh->alive = jiffies;
1216 			goto out_unlock;
1217 		}
1218 	}
1219 
1220 out_unlock:
1221 	rcu_read_unlock_bh();
1222 	return neigh;
1223 }
1224 
1225 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1226 {
1227 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1228 	struct ipoib_neigh_hash *htbl;
1229 	unsigned long neigh_obsolete;
1230 	unsigned long dt;
1231 	unsigned long flags;
1232 	int i;
1233 	LIST_HEAD(remove_list);
1234 
1235 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1236 		return;
1237 
1238 	spin_lock_irqsave(&priv->lock, flags);
1239 
1240 	htbl = rcu_dereference_protected(ntbl->htbl,
1241 					 lockdep_is_held(&priv->lock));
1242 
1243 	if (!htbl)
1244 		goto out_unlock;
1245 
1246 	/* neigh is obsolete if it was idle for two GC periods */
1247 	dt = 2 * arp_tbl.gc_interval;
1248 	neigh_obsolete = jiffies - dt;
1249 	/* handle possible race condition */
1250 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1251 		goto out_unlock;
1252 
1253 	for (i = 0; i < htbl->size; i++) {
1254 		struct ipoib_neigh *neigh;
1255 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1256 
1257 		while ((neigh = rcu_dereference_protected(*np,
1258 							  lockdep_is_held(&priv->lock))) != NULL) {
1259 			/* was the neigh idle for two GC periods */
1260 			if (time_after(neigh_obsolete, neigh->alive)) {
1261 
1262 				ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1263 
1264 				rcu_assign_pointer(*np,
1265 						   rcu_dereference_protected(neigh->hnext,
1266 									     lockdep_is_held(&priv->lock)));
1267 				/* remove from path/mc list */
1268 				list_del_init(&neigh->list);
1269 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1270 			} else {
1271 				np = &neigh->hnext;
1272 			}
1273 
1274 		}
1275 	}
1276 
1277 out_unlock:
1278 	spin_unlock_irqrestore(&priv->lock, flags);
1279 	ipoib_mcast_remove_list(&remove_list);
1280 }
1281 
1282 static void ipoib_reap_neigh(struct work_struct *work)
1283 {
1284 	struct ipoib_dev_priv *priv =
1285 		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1286 
1287 	__ipoib_reap_neigh(priv);
1288 
1289 	if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1290 		queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1291 				   arp_tbl.gc_interval);
1292 }
1293 
1294 
1295 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1296 				      struct net_device *dev)
1297 {
1298 	struct ipoib_neigh *neigh;
1299 
1300 	neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
1301 	if (!neigh)
1302 		return NULL;
1303 
1304 	neigh->dev = dev;
1305 	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1306 	skb_queue_head_init(&neigh->queue);
1307 	INIT_LIST_HEAD(&neigh->list);
1308 	ipoib_cm_set(neigh, NULL);
1309 	/* one ref on behalf of the caller */
1310 	atomic_set(&neigh->refcnt, 1);
1311 
1312 	return neigh;
1313 }
1314 
1315 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1316 				      struct net_device *dev)
1317 {
1318 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1319 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1320 	struct ipoib_neigh_hash *htbl;
1321 	struct ipoib_neigh *neigh;
1322 	u32 hash_val;
1323 
1324 	htbl = rcu_dereference_protected(ntbl->htbl,
1325 					 lockdep_is_held(&priv->lock));
1326 	if (!htbl) {
1327 		neigh = NULL;
1328 		goto out_unlock;
1329 	}
1330 
1331 	/* need to add a new neigh, but maybe some other thread succeeded?
1332 	 * recalc hash, maybe hash resize took place so we do a search
1333 	 */
1334 	hash_val = ipoib_addr_hash(htbl, daddr);
1335 	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1336 					       lockdep_is_held(&priv->lock));
1337 	     neigh != NULL;
1338 	     neigh = rcu_dereference_protected(neigh->hnext,
1339 					       lockdep_is_held(&priv->lock))) {
1340 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1341 			/* found, take one ref on behalf of the caller */
1342 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1343 				/* deleted */
1344 				neigh = NULL;
1345 				break;
1346 			}
1347 			neigh->alive = jiffies;
1348 			goto out_unlock;
1349 		}
1350 	}
1351 
1352 	neigh = ipoib_neigh_ctor(daddr, dev);
1353 	if (!neigh)
1354 		goto out_unlock;
1355 
1356 	/* one ref on behalf of the hash table */
1357 	atomic_inc(&neigh->refcnt);
1358 	neigh->alive = jiffies;
1359 	/* put in hash */
1360 	rcu_assign_pointer(neigh->hnext,
1361 			   rcu_dereference_protected(htbl->buckets[hash_val],
1362 						     lockdep_is_held(&priv->lock)));
1363 	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1364 	atomic_inc(&ntbl->entries);
1365 
1366 out_unlock:
1367 
1368 	return neigh;
1369 }
1370 
1371 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1372 {
1373 	/* neigh reference count was dropprd to zero */
1374 	struct net_device *dev = neigh->dev;
1375 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1376 	struct sk_buff *skb;
1377 	if (neigh->ah)
1378 		ipoib_put_ah(neigh->ah);
1379 	while ((skb = __skb_dequeue(&neigh->queue))) {
1380 		++dev->stats.tx_dropped;
1381 		dev_kfree_skb_any(skb);
1382 	}
1383 	if (ipoib_cm_get(neigh))
1384 		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1385 	ipoib_dbg(ipoib_priv(dev),
1386 		  "neigh free for %06x %pI6\n",
1387 		  IPOIB_QPN(neigh->daddr),
1388 		  neigh->daddr + 4);
1389 	kfree(neigh);
1390 	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1391 		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1392 			complete(&priv->ntbl.flushed);
1393 	}
1394 }
1395 
1396 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1397 {
1398 	/* Called as a result of removal from hash table */
1399 	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1400 	/* note TX context may hold another ref */
1401 	ipoib_neigh_put(neigh);
1402 }
1403 
1404 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1405 {
1406 	struct net_device *dev = neigh->dev;
1407 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1408 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1409 	struct ipoib_neigh_hash *htbl;
1410 	struct ipoib_neigh __rcu **np;
1411 	struct ipoib_neigh *n;
1412 	u32 hash_val;
1413 
1414 	htbl = rcu_dereference_protected(ntbl->htbl,
1415 					lockdep_is_held(&priv->lock));
1416 	if (!htbl)
1417 		return;
1418 
1419 	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1420 	np = &htbl->buckets[hash_val];
1421 	for (n = rcu_dereference_protected(*np,
1422 					    lockdep_is_held(&priv->lock));
1423 	     n != NULL;
1424 	     n = rcu_dereference_protected(*np,
1425 					lockdep_is_held(&priv->lock))) {
1426 		if (n == neigh) {
1427 			/* found */
1428 			rcu_assign_pointer(*np,
1429 					   rcu_dereference_protected(neigh->hnext,
1430 								     lockdep_is_held(&priv->lock)));
1431 			/* remove from parent list */
1432 			list_del_init(&neigh->list);
1433 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1434 			return;
1435 		} else {
1436 			np = &n->hnext;
1437 		}
1438 	}
1439 }
1440 
1441 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1442 {
1443 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1444 	struct ipoib_neigh_hash *htbl;
1445 	struct ipoib_neigh __rcu **buckets;
1446 	u32 size;
1447 
1448 	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1449 	ntbl->htbl = NULL;
1450 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1451 	if (!htbl)
1452 		return -ENOMEM;
1453 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1454 	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1455 	buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1456 	if (!buckets) {
1457 		kfree(htbl);
1458 		return -ENOMEM;
1459 	}
1460 	htbl->size = size;
1461 	htbl->mask = (size - 1);
1462 	htbl->buckets = buckets;
1463 	RCU_INIT_POINTER(ntbl->htbl, htbl);
1464 	htbl->ntbl = ntbl;
1465 	atomic_set(&ntbl->entries, 0);
1466 
1467 	/* start garbage collection */
1468 	clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1469 	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1470 			   arp_tbl.gc_interval);
1471 
1472 	return 0;
1473 }
1474 
1475 static void neigh_hash_free_rcu(struct rcu_head *head)
1476 {
1477 	struct ipoib_neigh_hash *htbl = container_of(head,
1478 						    struct ipoib_neigh_hash,
1479 						    rcu);
1480 	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1481 	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1482 
1483 	kfree(buckets);
1484 	kfree(htbl);
1485 	complete(&ntbl->deleted);
1486 }
1487 
1488 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1489 {
1490 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1491 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1492 	struct ipoib_neigh_hash *htbl;
1493 	unsigned long flags;
1494 	int i;
1495 
1496 	/* remove all neigh connected to a given path or mcast */
1497 	spin_lock_irqsave(&priv->lock, flags);
1498 
1499 	htbl = rcu_dereference_protected(ntbl->htbl,
1500 					 lockdep_is_held(&priv->lock));
1501 
1502 	if (!htbl)
1503 		goto out_unlock;
1504 
1505 	for (i = 0; i < htbl->size; i++) {
1506 		struct ipoib_neigh *neigh;
1507 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1508 
1509 		while ((neigh = rcu_dereference_protected(*np,
1510 							  lockdep_is_held(&priv->lock))) != NULL) {
1511 			/* delete neighs belong to this parent */
1512 			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1513 				rcu_assign_pointer(*np,
1514 						   rcu_dereference_protected(neigh->hnext,
1515 									     lockdep_is_held(&priv->lock)));
1516 				/* remove from parent list */
1517 				list_del_init(&neigh->list);
1518 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1519 			} else {
1520 				np = &neigh->hnext;
1521 			}
1522 
1523 		}
1524 	}
1525 out_unlock:
1526 	spin_unlock_irqrestore(&priv->lock, flags);
1527 }
1528 
1529 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1530 {
1531 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1532 	struct ipoib_neigh_hash *htbl;
1533 	unsigned long flags;
1534 	int i, wait_flushed = 0;
1535 
1536 	init_completion(&priv->ntbl.flushed);
1537 
1538 	spin_lock_irqsave(&priv->lock, flags);
1539 
1540 	htbl = rcu_dereference_protected(ntbl->htbl,
1541 					lockdep_is_held(&priv->lock));
1542 	if (!htbl)
1543 		goto out_unlock;
1544 
1545 	wait_flushed = atomic_read(&priv->ntbl.entries);
1546 	if (!wait_flushed)
1547 		goto free_htbl;
1548 
1549 	for (i = 0; i < htbl->size; i++) {
1550 		struct ipoib_neigh *neigh;
1551 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1552 
1553 		while ((neigh = rcu_dereference_protected(*np,
1554 				       lockdep_is_held(&priv->lock))) != NULL) {
1555 			rcu_assign_pointer(*np,
1556 					   rcu_dereference_protected(neigh->hnext,
1557 								     lockdep_is_held(&priv->lock)));
1558 			/* remove from path/mc list */
1559 			list_del_init(&neigh->list);
1560 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1561 		}
1562 	}
1563 
1564 free_htbl:
1565 	rcu_assign_pointer(ntbl->htbl, NULL);
1566 	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1567 
1568 out_unlock:
1569 	spin_unlock_irqrestore(&priv->lock, flags);
1570 	if (wait_flushed)
1571 		wait_for_completion(&priv->ntbl.flushed);
1572 }
1573 
1574 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1575 {
1576 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1577 	int stopped;
1578 
1579 	ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1580 	init_completion(&priv->ntbl.deleted);
1581 	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1582 
1583 	/* Stop GC if called at init fail need to cancel work */
1584 	stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1585 	if (!stopped)
1586 		cancel_delayed_work(&priv->neigh_reap_task);
1587 
1588 	ipoib_flush_neighs(priv);
1589 
1590 	wait_for_completion(&priv->ntbl.deleted);
1591 }
1592 
1593 static void ipoib_dev_uninit_default(struct net_device *dev)
1594 {
1595 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1596 
1597 	ipoib_transport_dev_cleanup(dev);
1598 
1599 	netif_napi_del(&priv->napi);
1600 
1601 	ipoib_cm_dev_cleanup(dev);
1602 
1603 	kfree(priv->rx_ring);
1604 	vfree(priv->tx_ring);
1605 
1606 	priv->rx_ring = NULL;
1607 	priv->tx_ring = NULL;
1608 }
1609 
1610 static int ipoib_dev_init_default(struct net_device *dev)
1611 {
1612 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1613 
1614 	netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
1615 
1616 	/* Allocate RX/TX "rings" to hold queued skbs */
1617 	priv->rx_ring =	kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1618 				GFP_KERNEL);
1619 	if (!priv->rx_ring)
1620 		goto out;
1621 
1622 	priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1623 	if (!priv->tx_ring) {
1624 		printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
1625 		       priv->ca->name, ipoib_sendq_size);
1626 		goto out_rx_ring_cleanup;
1627 	}
1628 
1629 	/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1630 
1631 	if (ipoib_transport_dev_init(dev, priv->ca)) {
1632 		pr_warn("%s: ipoib_transport_dev_init failed\n",
1633 			priv->ca->name);
1634 		goto out_tx_ring_cleanup;
1635 	}
1636 
1637 	/* after qp created set dev address */
1638 	priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
1639 	priv->dev->dev_addr[2] = (priv->qp->qp_num >>  8) & 0xff;
1640 	priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
1641 
1642 	setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
1643 		    (unsigned long)dev);
1644 
1645 	return 0;
1646 
1647 out_tx_ring_cleanup:
1648 	vfree(priv->tx_ring);
1649 
1650 out_rx_ring_cleanup:
1651 	kfree(priv->rx_ring);
1652 
1653 out:
1654 	netif_napi_del(&priv->napi);
1655 	return -ENOMEM;
1656 }
1657 
1658 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1659 {
1660 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1661 	int ret = -ENOMEM;
1662 
1663 	priv->ca = ca;
1664 	priv->port = port;
1665 	priv->qp = NULL;
1666 
1667 	/*
1668 	 * the various IPoIB tasks assume they will never race against
1669 	 * themselves, so always use a single thread workqueue
1670 	 */
1671 	priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
1672 	if (!priv->wq) {
1673 		pr_warn("%s: failed to allocate device WQ\n", dev->name);
1674 		goto out;
1675 	}
1676 
1677 	/* create pd, which used both for control and datapath*/
1678 	priv->pd = ib_alloc_pd(priv->ca, 0);
1679 	if (IS_ERR(priv->pd)) {
1680 		pr_warn("%s: failed to allocate PD\n", ca->name);
1681 		goto clean_wq;
1682 	}
1683 
1684 	ret = priv->rn_ops->ndo_init(dev);
1685 	if (ret) {
1686 		pr_warn("%s failed to init HW resource\n", dev->name);
1687 		goto out_free_pd;
1688 	}
1689 
1690 	if (ipoib_neigh_hash_init(priv) < 0) {
1691 		pr_warn("%s failed to init neigh hash\n", dev->name);
1692 		goto out_dev_uninit;
1693 	}
1694 
1695 	if (dev->flags & IFF_UP) {
1696 		if (ipoib_ib_dev_open(dev)) {
1697 			pr_warn("%s failed to open device\n", dev->name);
1698 			ret = -ENODEV;
1699 			goto out_dev_uninit;
1700 		}
1701 	}
1702 
1703 	return 0;
1704 
1705 out_dev_uninit:
1706 	ipoib_ib_dev_cleanup(dev);
1707 
1708 out_free_pd:
1709 	if (priv->pd) {
1710 		ib_dealloc_pd(priv->pd);
1711 		priv->pd = NULL;
1712 	}
1713 
1714 clean_wq:
1715 	if (priv->wq) {
1716 		destroy_workqueue(priv->wq);
1717 		priv->wq = NULL;
1718 	}
1719 
1720 out:
1721 	return ret;
1722 }
1723 
1724 void ipoib_dev_cleanup(struct net_device *dev)
1725 {
1726 	struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv;
1727 	LIST_HEAD(head);
1728 
1729 	ASSERT_RTNL();
1730 
1731 	/* Delete any child interfaces first */
1732 	list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1733 		/* Stop GC on child */
1734 		set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1735 		cancel_delayed_work(&cpriv->neigh_reap_task);
1736 		unregister_netdevice_queue(cpriv->dev, &head);
1737 	}
1738 	unregister_netdevice_many(&head);
1739 
1740 	ipoib_neigh_hash_uninit(dev);
1741 
1742 	ipoib_ib_dev_cleanup(dev);
1743 
1744 	/* no more works over the priv->wq */
1745 	if (priv->wq) {
1746 		flush_workqueue(priv->wq);
1747 		destroy_workqueue(priv->wq);
1748 		priv->wq = NULL;
1749 	}
1750 }
1751 
1752 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
1753 {
1754 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1755 
1756 	return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
1757 }
1758 
1759 static int ipoib_get_vf_config(struct net_device *dev, int vf,
1760 			       struct ifla_vf_info *ivf)
1761 {
1762 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1763 	int err;
1764 
1765 	err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
1766 	if (err)
1767 		return err;
1768 
1769 	ivf->vf = vf;
1770 
1771 	return 0;
1772 }
1773 
1774 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
1775 {
1776 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1777 
1778 	if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
1779 		return -EINVAL;
1780 
1781 	return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
1782 }
1783 
1784 static int ipoib_get_vf_stats(struct net_device *dev, int vf,
1785 			      struct ifla_vf_stats *vf_stats)
1786 {
1787 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1788 
1789 	return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
1790 }
1791 
1792 static const struct header_ops ipoib_header_ops = {
1793 	.create	= ipoib_hard_header,
1794 };
1795 
1796 static const struct net_device_ops ipoib_netdev_ops_pf = {
1797 	.ndo_uninit		 = ipoib_uninit,
1798 	.ndo_open		 = ipoib_open,
1799 	.ndo_stop		 = ipoib_stop,
1800 	.ndo_change_mtu		 = ipoib_change_mtu,
1801 	.ndo_fix_features	 = ipoib_fix_features,
1802 	.ndo_start_xmit		 = ipoib_start_xmit,
1803 	.ndo_tx_timeout		 = ipoib_timeout,
1804 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
1805 	.ndo_get_iflink		 = ipoib_get_iflink,
1806 	.ndo_set_vf_link_state	 = ipoib_set_vf_link_state,
1807 	.ndo_get_vf_config	 = ipoib_get_vf_config,
1808 	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
1809 	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
1810 	.ndo_set_mac_address	 = ipoib_set_mac,
1811 };
1812 
1813 static const struct net_device_ops ipoib_netdev_ops_vf = {
1814 	.ndo_uninit		 = ipoib_uninit,
1815 	.ndo_open		 = ipoib_open,
1816 	.ndo_stop		 = ipoib_stop,
1817 	.ndo_change_mtu		 = ipoib_change_mtu,
1818 	.ndo_fix_features	 = ipoib_fix_features,
1819 	.ndo_start_xmit	 	 = ipoib_start_xmit,
1820 	.ndo_tx_timeout		 = ipoib_timeout,
1821 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
1822 	.ndo_get_iflink		 = ipoib_get_iflink,
1823 };
1824 
1825 void ipoib_setup_common(struct net_device *dev)
1826 {
1827 	dev->header_ops		 = &ipoib_header_ops;
1828 
1829 	ipoib_set_ethtool_ops(dev);
1830 
1831 	dev->watchdog_timeo	 = HZ;
1832 
1833 	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
1834 
1835 	dev->hard_header_len	 = IPOIB_HARD_LEN;
1836 	dev->addr_len		 = INFINIBAND_ALEN;
1837 	dev->type		 = ARPHRD_INFINIBAND;
1838 	dev->tx_queue_len	 = ipoib_sendq_size * 2;
1839 	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
1840 				    NETIF_F_HIGHDMA);
1841 	netif_keep_dst(dev);
1842 
1843 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1844 }
1845 
1846 static void ipoib_build_priv(struct net_device *dev)
1847 {
1848 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1849 
1850 	priv->dev = dev;
1851 	spin_lock_init(&priv->lock);
1852 	init_rwsem(&priv->vlan_rwsem);
1853 
1854 	INIT_LIST_HEAD(&priv->path_list);
1855 	INIT_LIST_HEAD(&priv->child_intfs);
1856 	INIT_LIST_HEAD(&priv->dead_ahs);
1857 	INIT_LIST_HEAD(&priv->multicast_list);
1858 
1859 	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
1860 	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1861 	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
1862 	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
1863 	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
1864 	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1865 	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1866 	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1867 }
1868 
1869 static const struct net_device_ops ipoib_netdev_default_pf = {
1870 	.ndo_init		 = ipoib_dev_init_default,
1871 	.ndo_uninit		 = ipoib_dev_uninit_default,
1872 	.ndo_open		 = ipoib_ib_dev_open_default,
1873 	.ndo_stop		 = ipoib_ib_dev_stop_default,
1874 };
1875 
1876 static struct net_device
1877 *ipoib_create_netdev_default(struct ib_device *hca,
1878 			     const char *name,
1879 			     unsigned char name_assign_type,
1880 			     void (*setup)(struct net_device *))
1881 {
1882 	struct net_device *dev;
1883 	struct rdma_netdev *rn;
1884 
1885 	dev = alloc_netdev((int)sizeof(struct rdma_netdev),
1886 			   name,
1887 			   name_assign_type, setup);
1888 	if (!dev)
1889 		return NULL;
1890 
1891 	rn = netdev_priv(dev);
1892 
1893 	rn->send = ipoib_send;
1894 	rn->attach_mcast = ipoib_mcast_attach;
1895 	rn->detach_mcast = ipoib_mcast_detach;
1896 	rn->free_rdma_netdev = free_netdev;
1897 	rn->hca = hca;
1898 
1899 	dev->netdev_ops = &ipoib_netdev_default_pf;
1900 
1901 	return dev;
1902 }
1903 
1904 static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port,
1905 					   const char *name)
1906 {
1907 	struct net_device *dev;
1908 
1909 	if (hca->alloc_rdma_netdev) {
1910 		dev = hca->alloc_rdma_netdev(hca, port,
1911 					     RDMA_NETDEV_IPOIB, name,
1912 					     NET_NAME_UNKNOWN,
1913 					     ipoib_setup_common);
1914 		if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP)
1915 			return NULL;
1916 	}
1917 
1918 	if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP)
1919 		dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN,
1920 						  ipoib_setup_common);
1921 
1922 	return dev;
1923 }
1924 
1925 struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
1926 					const char *name)
1927 {
1928 	struct net_device *dev;
1929 	struct ipoib_dev_priv *priv;
1930 	struct rdma_netdev *rn;
1931 
1932 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1933 	if (!priv)
1934 		return NULL;
1935 
1936 	dev = ipoib_get_netdev(hca, port, name);
1937 	if (!dev)
1938 		goto free_priv;
1939 
1940 	priv->rn_ops = dev->netdev_ops;
1941 
1942 	/* fixme : should be after the query_cap */
1943 	if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
1944 		dev->netdev_ops	= &ipoib_netdev_ops_vf;
1945 	else
1946 		dev->netdev_ops	= &ipoib_netdev_ops_pf;
1947 
1948 	rn = netdev_priv(dev);
1949 	rn->clnt_priv = priv;
1950 	ipoib_build_priv(dev);
1951 
1952 	return priv;
1953 free_priv:
1954 	kfree(priv);
1955 	return NULL;
1956 }
1957 
1958 static ssize_t show_pkey(struct device *dev,
1959 			 struct device_attribute *attr, char *buf)
1960 {
1961 	struct net_device *ndev = to_net_dev(dev);
1962 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1963 
1964 	return sprintf(buf, "0x%04x\n", priv->pkey);
1965 }
1966 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1967 
1968 static ssize_t show_umcast(struct device *dev,
1969 			   struct device_attribute *attr, char *buf)
1970 {
1971 	struct net_device *ndev = to_net_dev(dev);
1972 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1973 
1974 	return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1975 }
1976 
1977 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1978 {
1979 	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1980 
1981 	if (umcast_val > 0) {
1982 		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1983 		ipoib_warn(priv, "ignoring multicast groups joined directly "
1984 				"by userspace\n");
1985 	} else
1986 		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1987 }
1988 
1989 static ssize_t set_umcast(struct device *dev,
1990 			  struct device_attribute *attr,
1991 			  const char *buf, size_t count)
1992 {
1993 	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1994 
1995 	ipoib_set_umcast(to_net_dev(dev), umcast_val);
1996 
1997 	return count;
1998 }
1999 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
2000 
2001 int ipoib_add_umcast_attr(struct net_device *dev)
2002 {
2003 	return device_create_file(&dev->dev, &dev_attr_umcast);
2004 }
2005 
2006 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
2007 {
2008 	struct ipoib_dev_priv *child_priv;
2009 	struct net_device *netdev = priv->dev;
2010 
2011 	netif_addr_lock_bh(netdev);
2012 
2013 	memcpy(&priv->local_gid.global.interface_id,
2014 	       &gid->global.interface_id,
2015 	       sizeof(gid->global.interface_id));
2016 	memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
2017 	clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2018 
2019 	netif_addr_unlock_bh(netdev);
2020 
2021 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
2022 		down_read(&priv->vlan_rwsem);
2023 		list_for_each_entry(child_priv, &priv->child_intfs, list)
2024 			set_base_guid(child_priv, gid);
2025 		up_read(&priv->vlan_rwsem);
2026 	}
2027 }
2028 
2029 static int ipoib_check_lladdr(struct net_device *dev,
2030 			      struct sockaddr_storage *ss)
2031 {
2032 	union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
2033 	int ret = 0;
2034 
2035 	netif_addr_lock_bh(dev);
2036 
2037 	/* Make sure the QPN, reserved and subnet prefix match the current
2038 	 * lladdr, it also makes sure the lladdr is unicast.
2039 	 */
2040 	if (memcmp(dev->dev_addr, ss->__data,
2041 		   4 + sizeof(gid->global.subnet_prefix)) ||
2042 	    gid->global.interface_id == 0)
2043 		ret = -EINVAL;
2044 
2045 	netif_addr_unlock_bh(dev);
2046 
2047 	return ret;
2048 }
2049 
2050 static int ipoib_set_mac(struct net_device *dev, void *addr)
2051 {
2052 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2053 	struct sockaddr_storage *ss = addr;
2054 	int ret;
2055 
2056 	if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
2057 		return -EBUSY;
2058 
2059 	ret = ipoib_check_lladdr(dev, ss);
2060 	if (ret)
2061 		return ret;
2062 
2063 	set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
2064 
2065 	queue_work(ipoib_workqueue, &priv->flush_light);
2066 
2067 	return 0;
2068 }
2069 
2070 static ssize_t create_child(struct device *dev,
2071 			    struct device_attribute *attr,
2072 			    const char *buf, size_t count)
2073 {
2074 	int pkey;
2075 	int ret;
2076 
2077 	if (sscanf(buf, "%i", &pkey) != 1)
2078 		return -EINVAL;
2079 
2080 	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
2081 		return -EINVAL;
2082 
2083 	/*
2084 	 * Set the full membership bit, so that we join the right
2085 	 * broadcast group, etc.
2086 	 */
2087 	pkey |= 0x8000;
2088 
2089 	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
2090 
2091 	return ret ? ret : count;
2092 }
2093 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
2094 
2095 static ssize_t delete_child(struct device *dev,
2096 			    struct device_attribute *attr,
2097 			    const char *buf, size_t count)
2098 {
2099 	int pkey;
2100 	int ret;
2101 
2102 	if (sscanf(buf, "%i", &pkey) != 1)
2103 		return -EINVAL;
2104 
2105 	if (pkey < 0 || pkey > 0xffff)
2106 		return -EINVAL;
2107 
2108 	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
2109 
2110 	return ret ? ret : count;
2111 
2112 }
2113 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
2114 
2115 int ipoib_add_pkey_attr(struct net_device *dev)
2116 {
2117 	return device_create_file(&dev->dev, &dev_attr_pkey);
2118 }
2119 
2120 void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
2121 {
2122 	priv->hca_caps = hca->attrs.device_cap_flags;
2123 
2124 	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
2125 		priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2126 
2127 		if (priv->hca_caps & IB_DEVICE_UD_TSO)
2128 			priv->dev->hw_features |= NETIF_F_TSO;
2129 
2130 		priv->dev->features |= priv->dev->hw_features;
2131 	}
2132 }
2133 
2134 static struct net_device *ipoib_add_port(const char *format,
2135 					 struct ib_device *hca, u8 port)
2136 {
2137 	struct ipoib_dev_priv *priv;
2138 	struct ib_port_attr attr;
2139 	int result = -ENOMEM;
2140 
2141 	priv = ipoib_intf_alloc(hca, port, format);
2142 	if (!priv)
2143 		goto alloc_mem_failed;
2144 
2145 	SET_NETDEV_DEV(priv->dev, hca->dev.parent);
2146 	priv->dev->dev_id = port - 1;
2147 
2148 	result = ib_query_port(hca, port, &attr);
2149 	if (!result)
2150 		priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2151 	else {
2152 		printk(KERN_WARNING "%s: ib_query_port %d failed\n",
2153 		       hca->name, port);
2154 		goto device_init_failed;
2155 	}
2156 
2157 	/* MTU will be reset when mcast join happens */
2158 	priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
2159 	priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
2160 	priv->dev->max_mtu = IPOIB_CM_MTU;
2161 
2162 	priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
2163 
2164 	result = ib_query_pkey(hca, port, 0, &priv->pkey);
2165 	if (result) {
2166 		printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
2167 		       hca->name, port, result);
2168 		goto device_init_failed;
2169 	}
2170 
2171 	ipoib_set_dev_features(priv, hca);
2172 
2173 	/*
2174 	 * Set the full membership bit, so that we join the right
2175 	 * broadcast group, etc.
2176 	 */
2177 	priv->pkey |= 0x8000;
2178 
2179 	priv->dev->broadcast[8] = priv->pkey >> 8;
2180 	priv->dev->broadcast[9] = priv->pkey & 0xff;
2181 
2182 	result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
2183 	if (result) {
2184 		printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
2185 		       hca->name, port, result);
2186 		goto device_init_failed;
2187 	} else
2188 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
2189 	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2190 
2191 	result = ipoib_dev_init(priv->dev, hca, port);
2192 	if (result < 0) {
2193 		printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
2194 		       hca->name, port, result);
2195 		goto device_init_failed;
2196 	}
2197 
2198 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
2199 			      priv->ca, ipoib_event);
2200 	result = ib_register_event_handler(&priv->event_handler);
2201 	if (result < 0) {
2202 		printk(KERN_WARNING "%s: ib_register_event_handler failed for "
2203 		       "port %d (ret = %d)\n",
2204 		       hca->name, port, result);
2205 		goto event_failed;
2206 	}
2207 
2208 	result = register_netdev(priv->dev);
2209 	if (result) {
2210 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
2211 		       hca->name, port, result);
2212 		goto register_failed;
2213 	}
2214 
2215 	if (ipoib_cm_add_mode_attr(priv->dev))
2216 		goto sysfs_failed;
2217 	if (ipoib_add_pkey_attr(priv->dev))
2218 		goto sysfs_failed;
2219 	if (ipoib_add_umcast_attr(priv->dev))
2220 		goto sysfs_failed;
2221 	if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
2222 		goto sysfs_failed;
2223 	if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
2224 		goto sysfs_failed;
2225 
2226 	return priv->dev;
2227 
2228 sysfs_failed:
2229 	unregister_netdev(priv->dev);
2230 
2231 register_failed:
2232 	ib_unregister_event_handler(&priv->event_handler);
2233 	flush_workqueue(ipoib_workqueue);
2234 	/* Stop GC if started before flush */
2235 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2236 	cancel_delayed_work(&priv->neigh_reap_task);
2237 	flush_workqueue(priv->wq);
2238 
2239 event_failed:
2240 	ipoib_dev_cleanup(priv->dev);
2241 
2242 device_init_failed:
2243 	free_netdev(priv->dev);
2244 	kfree(priv);
2245 
2246 alloc_mem_failed:
2247 	return ERR_PTR(result);
2248 }
2249 
2250 static void ipoib_add_one(struct ib_device *device)
2251 {
2252 	struct list_head *dev_list;
2253 	struct net_device *dev;
2254 	struct ipoib_dev_priv *priv;
2255 	int p;
2256 	int count = 0;
2257 
2258 	dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
2259 	if (!dev_list)
2260 		return;
2261 
2262 	INIT_LIST_HEAD(dev_list);
2263 
2264 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
2265 		if (!rdma_protocol_ib(device, p))
2266 			continue;
2267 		dev = ipoib_add_port("ib%d", device, p);
2268 		if (!IS_ERR(dev)) {
2269 			priv = ipoib_priv(dev);
2270 			list_add_tail(&priv->list, dev_list);
2271 			count++;
2272 		}
2273 	}
2274 
2275 	if (!count) {
2276 		kfree(dev_list);
2277 		return;
2278 	}
2279 
2280 	ib_set_client_data(device, &ipoib_client, dev_list);
2281 }
2282 
2283 static void ipoib_remove_one(struct ib_device *device, void *client_data)
2284 {
2285 	struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2286 	struct list_head *dev_list = client_data;
2287 
2288 	if (!dev_list)
2289 		return;
2290 
2291 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
2292 		struct rdma_netdev *rn = netdev_priv(priv->dev);
2293 
2294 		ib_unregister_event_handler(&priv->event_handler);
2295 		flush_workqueue(ipoib_workqueue);
2296 
2297 		/* mark interface in the middle of destruction */
2298 		set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
2299 
2300 		rtnl_lock();
2301 		dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
2302 		rtnl_unlock();
2303 
2304 		/* Stop GC */
2305 		set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2306 		cancel_delayed_work(&priv->neigh_reap_task);
2307 		flush_workqueue(priv->wq);
2308 
2309 		unregister_netdev(priv->dev);
2310 		rn->free_rdma_netdev(priv->dev);
2311 
2312 		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
2313 			kfree(cpriv);
2314 
2315 		kfree(priv);
2316 	}
2317 
2318 	kfree(dev_list);
2319 }
2320 
2321 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2322 static struct notifier_block ipoib_netdev_notifier = {
2323 	.notifier_call = ipoib_netdev_event,
2324 };
2325 #endif
2326 
2327 static int __init ipoib_init_module(void)
2328 {
2329 	int ret;
2330 
2331 	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2332 	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2333 	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2334 
2335 	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2336 	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2337 	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2338 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2339 	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2340 #endif
2341 
2342 	/*
2343 	 * When copying small received packets, we only copy from the
2344 	 * linear data part of the SKB, so we rely on this condition.
2345 	 */
2346 	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2347 
2348 	ret = ipoib_register_debugfs();
2349 	if (ret)
2350 		return ret;
2351 
2352 	/*
2353 	 * We create a global workqueue here that is used for all flush
2354 	 * operations.  However, if you attempt to flush a workqueue
2355 	 * from a task on that same workqueue, it deadlocks the system.
2356 	 * We want to be able to flush the tasks associated with a
2357 	 * specific net device, so we also create a workqueue for each
2358 	 * netdevice.  We queue up the tasks for that device only on
2359 	 * its private workqueue, and we only queue up flush events
2360 	 * on our global flush workqueue.  This avoids the deadlocks.
2361 	 */
2362 	ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
2363 						  WQ_MEM_RECLAIM);
2364 	if (!ipoib_workqueue) {
2365 		ret = -ENOMEM;
2366 		goto err_fs;
2367 	}
2368 
2369 	ib_sa_register_client(&ipoib_sa_client);
2370 
2371 	ret = ib_register_client(&ipoib_client);
2372 	if (ret)
2373 		goto err_sa;
2374 
2375 	ret = ipoib_netlink_init();
2376 	if (ret)
2377 		goto err_client;
2378 
2379 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2380 	register_netdevice_notifier(&ipoib_netdev_notifier);
2381 #endif
2382 	return 0;
2383 
2384 err_client:
2385 	ib_unregister_client(&ipoib_client);
2386 
2387 err_sa:
2388 	ib_sa_unregister_client(&ipoib_sa_client);
2389 	destroy_workqueue(ipoib_workqueue);
2390 
2391 err_fs:
2392 	ipoib_unregister_debugfs();
2393 
2394 	return ret;
2395 }
2396 
2397 static void __exit ipoib_cleanup_module(void)
2398 {
2399 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2400 	unregister_netdevice_notifier(&ipoib_netdev_notifier);
2401 #endif
2402 	ipoib_netlink_fini();
2403 	ib_unregister_client(&ipoib_client);
2404 	ib_sa_unregister_client(&ipoib_sa_client);
2405 	ipoib_unregister_debugfs();
2406 	destroy_workqueue(ipoib_workqueue);
2407 }
2408 
2409 module_init(ipoib_init_module);
2410 module_exit(ipoib_cleanup_module);
2411