1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "ipoib.h"
36 
37 #include <linux/module.h>
38 
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43 
44 #include <linux/if_arp.h>	/* For ARPHRD_xxx */
45 
46 #include <linux/ip.h>
47 #include <linux/in.h>
48 
49 #include <linux/jhash.h>
50 #include <net/arp.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
54 #include <linux/pci.h>
55 
56 #define DRV_VERSION "1.0.0"
57 
58 const char ipoib_driver_version[] = DRV_VERSION;
59 
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_VERSION(DRV_VERSION);
64 
65 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
66 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
67 
68 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
69 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
70 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
71 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
72 
73 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
74 int ipoib_debug_level;
75 
76 module_param_named(debug_level, ipoib_debug_level, int, 0644);
77 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
78 #endif
79 
80 struct ipoib_path_iter {
81 	struct net_device *dev;
82 	struct ipoib_path  path;
83 };
84 
85 static const u8 ipv4_bcast_addr[] = {
86 	0x00, 0xff, 0xff, 0xff,
87 	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
88 	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
89 };
90 
91 struct workqueue_struct *ipoib_workqueue;
92 
93 struct ib_sa_client ipoib_sa_client;
94 
95 static void ipoib_add_one(struct ib_device *device);
96 static void ipoib_remove_one(struct ib_device *device, void *client_data);
97 static void ipoib_neigh_reclaim(struct rcu_head *rp);
98 static struct net_device *ipoib_get_net_dev_by_params(
99 		struct ib_device *dev, u8 port, u16 pkey,
100 		const union ib_gid *gid, const struct sockaddr *addr,
101 		void *client_data);
102 static int ipoib_set_mac(struct net_device *dev, void *addr);
103 
104 static struct ib_client ipoib_client = {
105 	.name   = "ipoib",
106 	.add    = ipoib_add_one,
107 	.remove = ipoib_remove_one,
108 	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
109 };
110 
111 int ipoib_open(struct net_device *dev)
112 {
113 	struct ipoib_dev_priv *priv = netdev_priv(dev);
114 
115 	ipoib_dbg(priv, "bringing up interface\n");
116 
117 	netif_carrier_off(dev);
118 
119 	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
120 
121 	priv->sm_fullmember_sendonly_support = false;
122 
123 	if (ipoib_ib_dev_open(dev)) {
124 		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
125 			return 0;
126 		goto err_disable;
127 	}
128 
129 	if (ipoib_ib_dev_up(dev))
130 		goto err_stop;
131 
132 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
133 		struct ipoib_dev_priv *cpriv;
134 
135 		/* Bring up any child interfaces too */
136 		down_read(&priv->vlan_rwsem);
137 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
138 			int flags;
139 
140 			flags = cpriv->dev->flags;
141 			if (flags & IFF_UP)
142 				continue;
143 
144 			dev_change_flags(cpriv->dev, flags | IFF_UP);
145 		}
146 		up_read(&priv->vlan_rwsem);
147 	}
148 
149 	netif_start_queue(dev);
150 
151 	return 0;
152 
153 err_stop:
154 	ipoib_ib_dev_stop(dev);
155 
156 err_disable:
157 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
158 
159 	return -EINVAL;
160 }
161 
162 static int ipoib_stop(struct net_device *dev)
163 {
164 	struct ipoib_dev_priv *priv = netdev_priv(dev);
165 
166 	ipoib_dbg(priv, "stopping interface\n");
167 
168 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
169 
170 	netif_stop_queue(dev);
171 
172 	ipoib_ib_dev_down(dev);
173 	ipoib_ib_dev_stop(dev);
174 
175 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
176 		struct ipoib_dev_priv *cpriv;
177 
178 		/* Bring down any child interfaces too */
179 		down_read(&priv->vlan_rwsem);
180 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
181 			int flags;
182 
183 			flags = cpriv->dev->flags;
184 			if (!(flags & IFF_UP))
185 				continue;
186 
187 			dev_change_flags(cpriv->dev, flags & ~IFF_UP);
188 		}
189 		up_read(&priv->vlan_rwsem);
190 	}
191 
192 	return 0;
193 }
194 
195 static void ipoib_uninit(struct net_device *dev)
196 {
197 	ipoib_dev_cleanup(dev);
198 }
199 
200 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
201 {
202 	struct ipoib_dev_priv *priv = netdev_priv(dev);
203 
204 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
205 		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
206 
207 	return features;
208 }
209 
210 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
211 {
212 	struct ipoib_dev_priv *priv = netdev_priv(dev);
213 
214 	/* dev->mtu > 2K ==> connected mode */
215 	if (ipoib_cm_admin_enabled(dev)) {
216 		if (new_mtu > ipoib_cm_max_mtu(dev))
217 			return -EINVAL;
218 
219 		if (new_mtu > priv->mcast_mtu)
220 			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
221 				   priv->mcast_mtu);
222 
223 		dev->mtu = new_mtu;
224 		return 0;
225 	}
226 
227 	if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
228 		return -EINVAL;
229 
230 	priv->admin_mtu = new_mtu;
231 
232 	dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
233 
234 	return 0;
235 }
236 
237 /* Called with an RCU read lock taken */
238 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
239 					struct net_device *dev)
240 {
241 	struct net *net = dev_net(dev);
242 	struct in_device *in_dev;
243 	struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
244 	struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
245 	__be32 ret_addr;
246 
247 	switch (addr->sa_family) {
248 	case AF_INET:
249 		in_dev = in_dev_get(dev);
250 		if (!in_dev)
251 			return false;
252 
253 		ret_addr = inet_confirm_addr(net, in_dev, 0,
254 					     addr_in->sin_addr.s_addr,
255 					     RT_SCOPE_HOST);
256 		in_dev_put(in_dev);
257 		if (ret_addr)
258 			return true;
259 
260 		break;
261 	case AF_INET6:
262 		if (IS_ENABLED(CONFIG_IPV6) &&
263 		    ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
264 			return true;
265 
266 		break;
267 	}
268 	return false;
269 }
270 
271 /**
272  * Find the master net_device on top of the given net_device.
273  * @dev: base IPoIB net_device
274  *
275  * Returns the master net_device with a reference held, or the same net_device
276  * if no master exists.
277  */
278 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
279 {
280 	struct net_device *master;
281 
282 	rcu_read_lock();
283 	master = netdev_master_upper_dev_get_rcu(dev);
284 	if (master)
285 		dev_hold(master);
286 	rcu_read_unlock();
287 
288 	if (master)
289 		return master;
290 
291 	dev_hold(dev);
292 	return dev;
293 }
294 
295 struct ipoib_walk_data {
296 	const struct sockaddr *addr;
297 	struct net_device *result;
298 };
299 
300 static int ipoib_upper_walk(struct net_device *upper, void *_data)
301 {
302 	struct ipoib_walk_data *data = _data;
303 	int ret = 0;
304 
305 	if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
306 		dev_hold(upper);
307 		data->result = upper;
308 		ret = 1;
309 	}
310 
311 	return ret;
312 }
313 
314 /**
315  * Find a net_device matching the given address, which is an upper device of
316  * the given net_device.
317  * @addr: IP address to look for.
318  * @dev: base IPoIB net_device
319  *
320  * If found, returns the net_device with a reference held. Otherwise return
321  * NULL.
322  */
323 static struct net_device *ipoib_get_net_dev_match_addr(
324 		const struct sockaddr *addr, struct net_device *dev)
325 {
326 	struct ipoib_walk_data data = {
327 		.addr = addr,
328 	};
329 
330 	rcu_read_lock();
331 	if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
332 		dev_hold(dev);
333 		data.result = dev;
334 		goto out;
335 	}
336 
337 	netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
338 out:
339 	rcu_read_unlock();
340 	return data.result;
341 }
342 
343 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
344  * pkey_index and address, if one exists.
345  *
346  * @found_net_dev: contains a matching net_device if the return value >= 1,
347  * with a reference held. */
348 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
349 				     const union ib_gid *gid,
350 				     u16 pkey_index,
351 				     const struct sockaddr *addr,
352 				     int nesting,
353 				     struct net_device **found_net_dev)
354 {
355 	struct ipoib_dev_priv *child_priv;
356 	struct net_device *net_dev = NULL;
357 	int matches = 0;
358 
359 	if (priv->pkey_index == pkey_index &&
360 	    (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
361 		if (!addr) {
362 			net_dev = ipoib_get_master_net_dev(priv->dev);
363 		} else {
364 			/* Verify the net_device matches the IP address, as
365 			 * IPoIB child devices currently share a GID. */
366 			net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
367 		}
368 		if (net_dev) {
369 			if (!*found_net_dev)
370 				*found_net_dev = net_dev;
371 			else
372 				dev_put(net_dev);
373 			++matches;
374 		}
375 	}
376 
377 	/* Check child interfaces */
378 	down_read_nested(&priv->vlan_rwsem, nesting);
379 	list_for_each_entry(child_priv, &priv->child_intfs, list) {
380 		matches += ipoib_match_gid_pkey_addr(child_priv, gid,
381 						    pkey_index, addr,
382 						    nesting + 1,
383 						    found_net_dev);
384 		if (matches > 1)
385 			break;
386 	}
387 	up_read(&priv->vlan_rwsem);
388 
389 	return matches;
390 }
391 
392 /* Returns the number of matching net_devs found (between 0 and 2). Also
393  * return the matching net_device in the @net_dev parameter, holding a
394  * reference to the net_device, if the number of matches >= 1 */
395 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
396 					 u16 pkey_index,
397 					 const union ib_gid *gid,
398 					 const struct sockaddr *addr,
399 					 struct net_device **net_dev)
400 {
401 	struct ipoib_dev_priv *priv;
402 	int matches = 0;
403 
404 	*net_dev = NULL;
405 
406 	list_for_each_entry(priv, dev_list, list) {
407 		if (priv->port != port)
408 			continue;
409 
410 		matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
411 						     addr, 0, net_dev);
412 		if (matches > 1)
413 			break;
414 	}
415 
416 	return matches;
417 }
418 
419 static struct net_device *ipoib_get_net_dev_by_params(
420 		struct ib_device *dev, u8 port, u16 pkey,
421 		const union ib_gid *gid, const struct sockaddr *addr,
422 		void *client_data)
423 {
424 	struct net_device *net_dev;
425 	struct list_head *dev_list = client_data;
426 	u16 pkey_index;
427 	int matches;
428 	int ret;
429 
430 	if (!rdma_protocol_ib(dev, port))
431 		return NULL;
432 
433 	ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
434 	if (ret)
435 		return NULL;
436 
437 	if (!dev_list)
438 		return NULL;
439 
440 	/* See if we can find a unique device matching the L2 parameters */
441 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
442 						gid, NULL, &net_dev);
443 
444 	switch (matches) {
445 	case 0:
446 		return NULL;
447 	case 1:
448 		return net_dev;
449 	}
450 
451 	dev_put(net_dev);
452 
453 	/* Couldn't find a unique device with L2 parameters only. Use L3
454 	 * address to uniquely match the net device */
455 	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
456 						gid, addr, &net_dev);
457 	switch (matches) {
458 	case 0:
459 		return NULL;
460 	default:
461 		dev_warn_ratelimited(&dev->dev,
462 				     "duplicate IP address detected\n");
463 		/* Fall through */
464 	case 1:
465 		return net_dev;
466 	}
467 }
468 
469 int ipoib_set_mode(struct net_device *dev, const char *buf)
470 {
471 	struct ipoib_dev_priv *priv = netdev_priv(dev);
472 
473 	/* flush paths if we switch modes so that connections are restarted */
474 	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
475 		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
476 		ipoib_warn(priv, "enabling connected mode "
477 			   "will cause multicast packet drops\n");
478 		netdev_update_features(dev);
479 		dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
480 		rtnl_unlock();
481 		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
482 
483 		ipoib_flush_paths(dev);
484 		rtnl_lock();
485 		return 0;
486 	}
487 
488 	if (!strcmp(buf, "datagram\n")) {
489 		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
490 		netdev_update_features(dev);
491 		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
492 		rtnl_unlock();
493 		ipoib_flush_paths(dev);
494 		rtnl_lock();
495 		return 0;
496 	}
497 
498 	return -EINVAL;
499 }
500 
501 struct ipoib_path *__path_find(struct net_device *dev, void *gid)
502 {
503 	struct ipoib_dev_priv *priv = netdev_priv(dev);
504 	struct rb_node *n = priv->path_tree.rb_node;
505 	struct ipoib_path *path;
506 	int ret;
507 
508 	while (n) {
509 		path = rb_entry(n, struct ipoib_path, rb_node);
510 
511 		ret = memcmp(gid, path->pathrec.dgid.raw,
512 			     sizeof (union ib_gid));
513 
514 		if (ret < 0)
515 			n = n->rb_left;
516 		else if (ret > 0)
517 			n = n->rb_right;
518 		else
519 			return path;
520 	}
521 
522 	return NULL;
523 }
524 
525 static int __path_add(struct net_device *dev, struct ipoib_path *path)
526 {
527 	struct ipoib_dev_priv *priv = netdev_priv(dev);
528 	struct rb_node **n = &priv->path_tree.rb_node;
529 	struct rb_node *pn = NULL;
530 	struct ipoib_path *tpath;
531 	int ret;
532 
533 	while (*n) {
534 		pn = *n;
535 		tpath = rb_entry(pn, struct ipoib_path, rb_node);
536 
537 		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
538 			     sizeof (union ib_gid));
539 		if (ret < 0)
540 			n = &pn->rb_left;
541 		else if (ret > 0)
542 			n = &pn->rb_right;
543 		else
544 			return -EEXIST;
545 	}
546 
547 	rb_link_node(&path->rb_node, pn, n);
548 	rb_insert_color(&path->rb_node, &priv->path_tree);
549 
550 	list_add_tail(&path->list, &priv->path_list);
551 
552 	return 0;
553 }
554 
555 static void path_free(struct net_device *dev, struct ipoib_path *path)
556 {
557 	struct sk_buff *skb;
558 
559 	while ((skb = __skb_dequeue(&path->queue)))
560 		dev_kfree_skb_irq(skb);
561 
562 	ipoib_dbg(netdev_priv(dev), "path_free\n");
563 
564 	/* remove all neigh connected to this path */
565 	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
566 
567 	if (path->ah)
568 		ipoib_put_ah(path->ah);
569 
570 	kfree(path);
571 }
572 
573 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
574 
575 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
576 {
577 	struct ipoib_path_iter *iter;
578 
579 	iter = kmalloc(sizeof *iter, GFP_KERNEL);
580 	if (!iter)
581 		return NULL;
582 
583 	iter->dev = dev;
584 	memset(iter->path.pathrec.dgid.raw, 0, 16);
585 
586 	if (ipoib_path_iter_next(iter)) {
587 		kfree(iter);
588 		return NULL;
589 	}
590 
591 	return iter;
592 }
593 
594 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
595 {
596 	struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
597 	struct rb_node *n;
598 	struct ipoib_path *path;
599 	int ret = 1;
600 
601 	spin_lock_irq(&priv->lock);
602 
603 	n = rb_first(&priv->path_tree);
604 
605 	while (n) {
606 		path = rb_entry(n, struct ipoib_path, rb_node);
607 
608 		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
609 			   sizeof (union ib_gid)) < 0) {
610 			iter->path = *path;
611 			ret = 0;
612 			break;
613 		}
614 
615 		n = rb_next(n);
616 	}
617 
618 	spin_unlock_irq(&priv->lock);
619 
620 	return ret;
621 }
622 
623 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
624 			  struct ipoib_path *path)
625 {
626 	*path = iter->path;
627 }
628 
629 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
630 
631 void ipoib_mark_paths_invalid(struct net_device *dev)
632 {
633 	struct ipoib_dev_priv *priv = netdev_priv(dev);
634 	struct ipoib_path *path, *tp;
635 
636 	spin_lock_irq(&priv->lock);
637 
638 	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
639 		ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
640 			be16_to_cpu(path->pathrec.dlid),
641 			path->pathrec.dgid.raw);
642 		path->valid =  0;
643 	}
644 
645 	spin_unlock_irq(&priv->lock);
646 }
647 
648 struct classport_info_context {
649 	struct ipoib_dev_priv	*priv;
650 	struct completion	done;
651 	struct ib_sa_query	*sa_query;
652 };
653 
654 static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
655 				    void *context)
656 {
657 	struct classport_info_context *cb_ctx = context;
658 	struct ipoib_dev_priv *priv;
659 
660 	WARN_ON(!context);
661 
662 	priv = cb_ctx->priv;
663 
664 	if (status || !rec) {
665 		pr_debug("device: %s failed query classport_info status: %d\n",
666 			 priv->dev->name, status);
667 		/* keeps the default, will try next mcast_restart */
668 		priv->sm_fullmember_sendonly_support = false;
669 		goto out;
670 	}
671 
672 	if (ib_get_cpi_capmask2(rec) &
673 	    IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
674 		pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
675 			 priv->dev->name);
676 		priv->sm_fullmember_sendonly_support = true;
677 	} else {
678 		pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
679 			 priv->dev->name);
680 		priv->sm_fullmember_sendonly_support = false;
681 	}
682 
683 out:
684 	complete(&cb_ctx->done);
685 }
686 
687 int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
688 {
689 	struct classport_info_context *callback_context;
690 	int ret;
691 
692 	callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
693 	if (!callback_context)
694 		return -ENOMEM;
695 
696 	callback_context->priv = priv;
697 	init_completion(&callback_context->done);
698 
699 	ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
700 					     priv->ca, priv->port, 3000,
701 					     GFP_KERNEL,
702 					     classport_info_query_cb,
703 					     callback_context,
704 					     &callback_context->sa_query);
705 	if (ret < 0) {
706 		pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
707 			priv->dev->name, ret);
708 		kfree(callback_context);
709 		return ret;
710 	}
711 
712 	/* waiting for the callback to finish before returnning */
713 	wait_for_completion(&callback_context->done);
714 	kfree(callback_context);
715 
716 	return ret;
717 }
718 
719 void ipoib_flush_paths(struct net_device *dev)
720 {
721 	struct ipoib_dev_priv *priv = netdev_priv(dev);
722 	struct ipoib_path *path, *tp;
723 	LIST_HEAD(remove_list);
724 	unsigned long flags;
725 
726 	netif_tx_lock_bh(dev);
727 	spin_lock_irqsave(&priv->lock, flags);
728 
729 	list_splice_init(&priv->path_list, &remove_list);
730 
731 	list_for_each_entry(path, &remove_list, list)
732 		rb_erase(&path->rb_node, &priv->path_tree);
733 
734 	list_for_each_entry_safe(path, tp, &remove_list, list) {
735 		if (path->query)
736 			ib_sa_cancel_query(path->query_id, path->query);
737 		spin_unlock_irqrestore(&priv->lock, flags);
738 		netif_tx_unlock_bh(dev);
739 		wait_for_completion(&path->done);
740 		path_free(dev, path);
741 		netif_tx_lock_bh(dev);
742 		spin_lock_irqsave(&priv->lock, flags);
743 	}
744 
745 	spin_unlock_irqrestore(&priv->lock, flags);
746 	netif_tx_unlock_bh(dev);
747 }
748 
749 static void path_rec_completion(int status,
750 				struct ib_sa_path_rec *pathrec,
751 				void *path_ptr)
752 {
753 	struct ipoib_path *path = path_ptr;
754 	struct net_device *dev = path->dev;
755 	struct ipoib_dev_priv *priv = netdev_priv(dev);
756 	struct ipoib_ah *ah = NULL;
757 	struct ipoib_ah *old_ah = NULL;
758 	struct ipoib_neigh *neigh, *tn;
759 	struct sk_buff_head skqueue;
760 	struct sk_buff *skb;
761 	unsigned long flags;
762 
763 	if (!status)
764 		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
765 			  be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
766 	else
767 		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
768 			  status, path->pathrec.dgid.raw);
769 
770 	skb_queue_head_init(&skqueue);
771 
772 	if (!status) {
773 		struct ib_ah_attr av;
774 
775 		if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
776 			ah = ipoib_create_ah(dev, priv->pd, &av);
777 	}
778 
779 	spin_lock_irqsave(&priv->lock, flags);
780 
781 	if (!IS_ERR_OR_NULL(ah)) {
782 		path->pathrec = *pathrec;
783 
784 		old_ah   = path->ah;
785 		path->ah = ah;
786 
787 		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
788 			  ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
789 
790 		while ((skb = __skb_dequeue(&path->queue)))
791 			__skb_queue_tail(&skqueue, skb);
792 
793 		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
794 			if (neigh->ah) {
795 				WARN_ON(neigh->ah != old_ah);
796 				/*
797 				 * Dropping the ah reference inside
798 				 * priv->lock is safe here, because we
799 				 * will hold one more reference from
800 				 * the original value of path->ah (ie
801 				 * old_ah).
802 				 */
803 				ipoib_put_ah(neigh->ah);
804 			}
805 			kref_get(&path->ah->ref);
806 			neigh->ah = path->ah;
807 
808 			if (ipoib_cm_enabled(dev, neigh->daddr)) {
809 				if (!ipoib_cm_get(neigh))
810 					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
811 									       path,
812 									       neigh));
813 				if (!ipoib_cm_get(neigh)) {
814 					ipoib_neigh_free(neigh);
815 					continue;
816 				}
817 			}
818 
819 			while ((skb = __skb_dequeue(&neigh->queue)))
820 				__skb_queue_tail(&skqueue, skb);
821 		}
822 		path->valid = 1;
823 	}
824 
825 	path->query = NULL;
826 	complete(&path->done);
827 
828 	spin_unlock_irqrestore(&priv->lock, flags);
829 
830 	if (IS_ERR_OR_NULL(ah))
831 		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
832 
833 	if (old_ah)
834 		ipoib_put_ah(old_ah);
835 
836 	while ((skb = __skb_dequeue(&skqueue))) {
837 		skb->dev = dev;
838 		if (dev_queue_xmit(skb))
839 			ipoib_warn(priv, "dev_queue_xmit failed "
840 				   "to requeue packet\n");
841 	}
842 }
843 
844 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
845 {
846 	struct ipoib_dev_priv *priv = netdev_priv(dev);
847 	struct ipoib_path *path;
848 
849 	if (!priv->broadcast)
850 		return NULL;
851 
852 	path = kzalloc(sizeof *path, GFP_ATOMIC);
853 	if (!path)
854 		return NULL;
855 
856 	path->dev = dev;
857 
858 	skb_queue_head_init(&path->queue);
859 
860 	INIT_LIST_HEAD(&path->neigh_list);
861 
862 	memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
863 	path->pathrec.sgid	    = priv->local_gid;
864 	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
865 	path->pathrec.numb_path     = 1;
866 	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
867 
868 	return path;
869 }
870 
871 static int path_rec_start(struct net_device *dev,
872 			  struct ipoib_path *path)
873 {
874 	struct ipoib_dev_priv *priv = netdev_priv(dev);
875 
876 	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
877 		  path->pathrec.dgid.raw);
878 
879 	init_completion(&path->done);
880 
881 	path->query_id =
882 		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
883 				   &path->pathrec,
884 				   IB_SA_PATH_REC_DGID		|
885 				   IB_SA_PATH_REC_SGID		|
886 				   IB_SA_PATH_REC_NUMB_PATH	|
887 				   IB_SA_PATH_REC_TRAFFIC_CLASS |
888 				   IB_SA_PATH_REC_PKEY,
889 				   1000, GFP_ATOMIC,
890 				   path_rec_completion,
891 				   path, &path->query);
892 	if (path->query_id < 0) {
893 		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
894 		path->query = NULL;
895 		complete(&path->done);
896 		return path->query_id;
897 	}
898 
899 	return 0;
900 }
901 
902 static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
903 			   struct net_device *dev)
904 {
905 	struct ipoib_dev_priv *priv = netdev_priv(dev);
906 	struct ipoib_path *path;
907 	struct ipoib_neigh *neigh;
908 	unsigned long flags;
909 
910 	spin_lock_irqsave(&priv->lock, flags);
911 	neigh = ipoib_neigh_alloc(daddr, dev);
912 	if (!neigh) {
913 		spin_unlock_irqrestore(&priv->lock, flags);
914 		++dev->stats.tx_dropped;
915 		dev_kfree_skb_any(skb);
916 		return;
917 	}
918 
919 	path = __path_find(dev, daddr + 4);
920 	if (!path) {
921 		path = path_rec_create(dev, daddr + 4);
922 		if (!path)
923 			goto err_path;
924 
925 		__path_add(dev, path);
926 	}
927 
928 	list_add_tail(&neigh->list, &path->neigh_list);
929 
930 	if (path->ah) {
931 		kref_get(&path->ah->ref);
932 		neigh->ah = path->ah;
933 
934 		if (ipoib_cm_enabled(dev, neigh->daddr)) {
935 			if (!ipoib_cm_get(neigh))
936 				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
937 			if (!ipoib_cm_get(neigh)) {
938 				ipoib_neigh_free(neigh);
939 				goto err_drop;
940 			}
941 			if (skb_queue_len(&neigh->queue) <
942 			    IPOIB_MAX_PATH_REC_QUEUE) {
943 				/* put pseudoheader back on for next time */
944 				skb_push(skb, IPOIB_PSEUDO_LEN);
945 				__skb_queue_tail(&neigh->queue, skb);
946 			} else {
947 				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
948 					   skb_queue_len(&neigh->queue));
949 				goto err_drop;
950 			}
951 		} else {
952 			spin_unlock_irqrestore(&priv->lock, flags);
953 			ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
954 			ipoib_neigh_put(neigh);
955 			return;
956 		}
957 	} else {
958 		neigh->ah  = NULL;
959 
960 		if (!path->query && path_rec_start(dev, path))
961 			goto err_path;
962 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
963 			__skb_queue_tail(&neigh->queue, skb);
964 		else
965 			goto err_drop;
966 	}
967 
968 	spin_unlock_irqrestore(&priv->lock, flags);
969 	ipoib_neigh_put(neigh);
970 	return;
971 
972 err_path:
973 	ipoib_neigh_free(neigh);
974 err_drop:
975 	++dev->stats.tx_dropped;
976 	dev_kfree_skb_any(skb);
977 
978 	spin_unlock_irqrestore(&priv->lock, flags);
979 	ipoib_neigh_put(neigh);
980 }
981 
982 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
983 			     struct ipoib_pseudo_header *phdr)
984 {
985 	struct ipoib_dev_priv *priv = netdev_priv(dev);
986 	struct ipoib_path *path;
987 	unsigned long flags;
988 
989 	spin_lock_irqsave(&priv->lock, flags);
990 
991 	path = __path_find(dev, phdr->hwaddr + 4);
992 	if (!path || !path->valid) {
993 		int new_path = 0;
994 
995 		if (!path) {
996 			path = path_rec_create(dev, phdr->hwaddr + 4);
997 			new_path = 1;
998 		}
999 		if (path) {
1000 			if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1001 				/* put pseudoheader back on for next time */
1002 				skb_push(skb, IPOIB_PSEUDO_LEN);
1003 				__skb_queue_tail(&path->queue, skb);
1004 			} else {
1005 				++dev->stats.tx_dropped;
1006 				dev_kfree_skb_any(skb);
1007 			}
1008 
1009 			if (!path->query && path_rec_start(dev, path)) {
1010 				spin_unlock_irqrestore(&priv->lock, flags);
1011 				if (new_path)
1012 					path_free(dev, path);
1013 				return;
1014 			} else
1015 				__path_add(dev, path);
1016 		} else {
1017 			++dev->stats.tx_dropped;
1018 			dev_kfree_skb_any(skb);
1019 		}
1020 
1021 		spin_unlock_irqrestore(&priv->lock, flags);
1022 		return;
1023 	}
1024 
1025 	if (path->ah) {
1026 		ipoib_dbg(priv, "Send unicast ARP to %04x\n",
1027 			  be16_to_cpu(path->pathrec.dlid));
1028 
1029 		spin_unlock_irqrestore(&priv->lock, flags);
1030 		ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
1031 		return;
1032 	} else if ((path->query || !path_rec_start(dev, path)) &&
1033 		   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1034 		/* put pseudoheader back on for next time */
1035 		skb_push(skb, IPOIB_PSEUDO_LEN);
1036 		__skb_queue_tail(&path->queue, skb);
1037 	} else {
1038 		++dev->stats.tx_dropped;
1039 		dev_kfree_skb_any(skb);
1040 	}
1041 
1042 	spin_unlock_irqrestore(&priv->lock, flags);
1043 }
1044 
1045 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1046 {
1047 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1048 	struct ipoib_neigh *neigh;
1049 	struct ipoib_pseudo_header *phdr;
1050 	struct ipoib_header *header;
1051 	unsigned long flags;
1052 
1053 	phdr = (struct ipoib_pseudo_header *) skb->data;
1054 	skb_pull(skb, sizeof(*phdr));
1055 	header = (struct ipoib_header *) skb->data;
1056 
1057 	if (unlikely(phdr->hwaddr[4] == 0xff)) {
1058 		/* multicast, arrange "if" according to probability */
1059 		if ((header->proto != htons(ETH_P_IP)) &&
1060 		    (header->proto != htons(ETH_P_IPV6)) &&
1061 		    (header->proto != htons(ETH_P_ARP)) &&
1062 		    (header->proto != htons(ETH_P_RARP)) &&
1063 		    (header->proto != htons(ETH_P_TIPC))) {
1064 			/* ethertype not supported by IPoIB */
1065 			++dev->stats.tx_dropped;
1066 			dev_kfree_skb_any(skb);
1067 			return NETDEV_TX_OK;
1068 		}
1069 		/* Add in the P_Key for multicast*/
1070 		phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1071 		phdr->hwaddr[9] = priv->pkey & 0xff;
1072 
1073 		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1074 		if (likely(neigh))
1075 			goto send_using_neigh;
1076 		ipoib_mcast_send(dev, phdr->hwaddr, skb);
1077 		return NETDEV_TX_OK;
1078 	}
1079 
1080 	/* unicast, arrange "switch" according to probability */
1081 	switch (header->proto) {
1082 	case htons(ETH_P_IP):
1083 	case htons(ETH_P_IPV6):
1084 	case htons(ETH_P_TIPC):
1085 		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1086 		if (unlikely(!neigh)) {
1087 			neigh_add_path(skb, phdr->hwaddr, dev);
1088 			return NETDEV_TX_OK;
1089 		}
1090 		break;
1091 	case htons(ETH_P_ARP):
1092 	case htons(ETH_P_RARP):
1093 		/* for unicast ARP and RARP should always perform path find */
1094 		unicast_arp_send(skb, dev, phdr);
1095 		return NETDEV_TX_OK;
1096 	default:
1097 		/* ethertype not supported by IPoIB */
1098 		++dev->stats.tx_dropped;
1099 		dev_kfree_skb_any(skb);
1100 		return NETDEV_TX_OK;
1101 	}
1102 
1103 send_using_neigh:
1104 	/* note we now hold a ref to neigh */
1105 	if (ipoib_cm_get(neigh)) {
1106 		if (ipoib_cm_up(neigh)) {
1107 			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1108 			goto unref;
1109 		}
1110 	} else if (neigh->ah) {
1111 		ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
1112 		goto unref;
1113 	}
1114 
1115 	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1116 		/* put pseudoheader back on for next time */
1117 		skb_push(skb, sizeof(*phdr));
1118 		spin_lock_irqsave(&priv->lock, flags);
1119 		__skb_queue_tail(&neigh->queue, skb);
1120 		spin_unlock_irqrestore(&priv->lock, flags);
1121 	} else {
1122 		++dev->stats.tx_dropped;
1123 		dev_kfree_skb_any(skb);
1124 	}
1125 
1126 unref:
1127 	ipoib_neigh_put(neigh);
1128 
1129 	return NETDEV_TX_OK;
1130 }
1131 
1132 static void ipoib_timeout(struct net_device *dev)
1133 {
1134 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1135 
1136 	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1137 		   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1138 	ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1139 		   netif_queue_stopped(dev),
1140 		   priv->tx_head, priv->tx_tail);
1141 	/* XXX reset QP, etc. */
1142 }
1143 
1144 static int ipoib_hard_header(struct sk_buff *skb,
1145 			     struct net_device *dev,
1146 			     unsigned short type,
1147 			     const void *daddr, const void *saddr, unsigned len)
1148 {
1149 	struct ipoib_pseudo_header *phdr;
1150 	struct ipoib_header *header;
1151 
1152 	header = (struct ipoib_header *) skb_push(skb, sizeof *header);
1153 
1154 	header->proto = htons(type);
1155 	header->reserved = 0;
1156 
1157 	/*
1158 	 * we don't rely on dst_entry structure,  always stuff the
1159 	 * destination address into skb hard header so we can figure out where
1160 	 * to send the packet later.
1161 	 */
1162 	phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
1163 	memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
1164 
1165 	return IPOIB_HARD_LEN;
1166 }
1167 
1168 static void ipoib_set_mcast_list(struct net_device *dev)
1169 {
1170 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1171 
1172 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1173 		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1174 		return;
1175 	}
1176 
1177 	queue_work(priv->wq, &priv->restart_task);
1178 }
1179 
1180 static int ipoib_get_iflink(const struct net_device *dev)
1181 {
1182 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1183 
1184 	/* parent interface */
1185 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1186 		return dev->ifindex;
1187 
1188 	/* child/vlan interface */
1189 	return priv->parent->ifindex;
1190 }
1191 
1192 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1193 {
1194 	/*
1195 	 * Use only the address parts that contributes to spreading
1196 	 * The subnet prefix is not used as one can not connect to
1197 	 * same remote port (GUID) using the same remote QPN via two
1198 	 * different subnets.
1199 	 */
1200 	 /* qpn octets[1:4) & port GUID octets[12:20) */
1201 	u32 *d32 = (u32 *) daddr;
1202 	u32 hv;
1203 
1204 	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1205 	return hv & htbl->mask;
1206 }
1207 
1208 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1209 {
1210 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1211 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1212 	struct ipoib_neigh_hash *htbl;
1213 	struct ipoib_neigh *neigh = NULL;
1214 	u32 hash_val;
1215 
1216 	rcu_read_lock_bh();
1217 
1218 	htbl = rcu_dereference_bh(ntbl->htbl);
1219 
1220 	if (!htbl)
1221 		goto out_unlock;
1222 
1223 	hash_val = ipoib_addr_hash(htbl, daddr);
1224 	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1225 	     neigh != NULL;
1226 	     neigh = rcu_dereference_bh(neigh->hnext)) {
1227 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1228 			/* found, take one ref on behalf of the caller */
1229 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1230 				/* deleted */
1231 				neigh = NULL;
1232 				goto out_unlock;
1233 			}
1234 
1235 			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1236 				neigh->alive = jiffies;
1237 			goto out_unlock;
1238 		}
1239 	}
1240 
1241 out_unlock:
1242 	rcu_read_unlock_bh();
1243 	return neigh;
1244 }
1245 
1246 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1247 {
1248 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1249 	struct ipoib_neigh_hash *htbl;
1250 	unsigned long neigh_obsolete;
1251 	unsigned long dt;
1252 	unsigned long flags;
1253 	int i;
1254 	LIST_HEAD(remove_list);
1255 
1256 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1257 		return;
1258 
1259 	spin_lock_irqsave(&priv->lock, flags);
1260 
1261 	htbl = rcu_dereference_protected(ntbl->htbl,
1262 					 lockdep_is_held(&priv->lock));
1263 
1264 	if (!htbl)
1265 		goto out_unlock;
1266 
1267 	/* neigh is obsolete if it was idle for two GC periods */
1268 	dt = 2 * arp_tbl.gc_interval;
1269 	neigh_obsolete = jiffies - dt;
1270 	/* handle possible race condition */
1271 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1272 		goto out_unlock;
1273 
1274 	for (i = 0; i < htbl->size; i++) {
1275 		struct ipoib_neigh *neigh;
1276 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1277 
1278 		while ((neigh = rcu_dereference_protected(*np,
1279 							  lockdep_is_held(&priv->lock))) != NULL) {
1280 			/* was the neigh idle for two GC periods */
1281 			if (time_after(neigh_obsolete, neigh->alive)) {
1282 
1283 				ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1284 
1285 				rcu_assign_pointer(*np,
1286 						   rcu_dereference_protected(neigh->hnext,
1287 									     lockdep_is_held(&priv->lock)));
1288 				/* remove from path/mc list */
1289 				list_del(&neigh->list);
1290 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1291 			} else {
1292 				np = &neigh->hnext;
1293 			}
1294 
1295 		}
1296 	}
1297 
1298 out_unlock:
1299 	spin_unlock_irqrestore(&priv->lock, flags);
1300 	ipoib_mcast_remove_list(&remove_list);
1301 }
1302 
1303 static void ipoib_reap_neigh(struct work_struct *work)
1304 {
1305 	struct ipoib_dev_priv *priv =
1306 		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1307 
1308 	__ipoib_reap_neigh(priv);
1309 
1310 	if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1311 		queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1312 				   arp_tbl.gc_interval);
1313 }
1314 
1315 
1316 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1317 				      struct net_device *dev)
1318 {
1319 	struct ipoib_neigh *neigh;
1320 
1321 	neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
1322 	if (!neigh)
1323 		return NULL;
1324 
1325 	neigh->dev = dev;
1326 	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1327 	skb_queue_head_init(&neigh->queue);
1328 	INIT_LIST_HEAD(&neigh->list);
1329 	ipoib_cm_set(neigh, NULL);
1330 	/* one ref on behalf of the caller */
1331 	atomic_set(&neigh->refcnt, 1);
1332 
1333 	return neigh;
1334 }
1335 
1336 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1337 				      struct net_device *dev)
1338 {
1339 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1340 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1341 	struct ipoib_neigh_hash *htbl;
1342 	struct ipoib_neigh *neigh;
1343 	u32 hash_val;
1344 
1345 	htbl = rcu_dereference_protected(ntbl->htbl,
1346 					 lockdep_is_held(&priv->lock));
1347 	if (!htbl) {
1348 		neigh = NULL;
1349 		goto out_unlock;
1350 	}
1351 
1352 	/* need to add a new neigh, but maybe some other thread succeeded?
1353 	 * recalc hash, maybe hash resize took place so we do a search
1354 	 */
1355 	hash_val = ipoib_addr_hash(htbl, daddr);
1356 	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1357 					       lockdep_is_held(&priv->lock));
1358 	     neigh != NULL;
1359 	     neigh = rcu_dereference_protected(neigh->hnext,
1360 					       lockdep_is_held(&priv->lock))) {
1361 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1362 			/* found, take one ref on behalf of the caller */
1363 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1364 				/* deleted */
1365 				neigh = NULL;
1366 				break;
1367 			}
1368 			neigh->alive = jiffies;
1369 			goto out_unlock;
1370 		}
1371 	}
1372 
1373 	neigh = ipoib_neigh_ctor(daddr, dev);
1374 	if (!neigh)
1375 		goto out_unlock;
1376 
1377 	/* one ref on behalf of the hash table */
1378 	atomic_inc(&neigh->refcnt);
1379 	neigh->alive = jiffies;
1380 	/* put in hash */
1381 	rcu_assign_pointer(neigh->hnext,
1382 			   rcu_dereference_protected(htbl->buckets[hash_val],
1383 						     lockdep_is_held(&priv->lock)));
1384 	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1385 	atomic_inc(&ntbl->entries);
1386 
1387 out_unlock:
1388 
1389 	return neigh;
1390 }
1391 
1392 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1393 {
1394 	/* neigh reference count was dropprd to zero */
1395 	struct net_device *dev = neigh->dev;
1396 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1397 	struct sk_buff *skb;
1398 	if (neigh->ah)
1399 		ipoib_put_ah(neigh->ah);
1400 	while ((skb = __skb_dequeue(&neigh->queue))) {
1401 		++dev->stats.tx_dropped;
1402 		dev_kfree_skb_any(skb);
1403 	}
1404 	if (ipoib_cm_get(neigh))
1405 		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1406 	ipoib_dbg(netdev_priv(dev),
1407 		  "neigh free for %06x %pI6\n",
1408 		  IPOIB_QPN(neigh->daddr),
1409 		  neigh->daddr + 4);
1410 	kfree(neigh);
1411 	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1412 		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1413 			complete(&priv->ntbl.flushed);
1414 	}
1415 }
1416 
1417 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1418 {
1419 	/* Called as a result of removal from hash table */
1420 	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1421 	/* note TX context may hold another ref */
1422 	ipoib_neigh_put(neigh);
1423 }
1424 
1425 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1426 {
1427 	struct net_device *dev = neigh->dev;
1428 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1429 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1430 	struct ipoib_neigh_hash *htbl;
1431 	struct ipoib_neigh __rcu **np;
1432 	struct ipoib_neigh *n;
1433 	u32 hash_val;
1434 
1435 	htbl = rcu_dereference_protected(ntbl->htbl,
1436 					lockdep_is_held(&priv->lock));
1437 	if (!htbl)
1438 		return;
1439 
1440 	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1441 	np = &htbl->buckets[hash_val];
1442 	for (n = rcu_dereference_protected(*np,
1443 					    lockdep_is_held(&priv->lock));
1444 	     n != NULL;
1445 	     n = rcu_dereference_protected(*np,
1446 					lockdep_is_held(&priv->lock))) {
1447 		if (n == neigh) {
1448 			/* found */
1449 			rcu_assign_pointer(*np,
1450 					   rcu_dereference_protected(neigh->hnext,
1451 								     lockdep_is_held(&priv->lock)));
1452 			/* remove from parent list */
1453 			list_del(&neigh->list);
1454 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1455 			return;
1456 		} else {
1457 			np = &n->hnext;
1458 		}
1459 	}
1460 }
1461 
1462 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1463 {
1464 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1465 	struct ipoib_neigh_hash *htbl;
1466 	struct ipoib_neigh __rcu **buckets;
1467 	u32 size;
1468 
1469 	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1470 	ntbl->htbl = NULL;
1471 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1472 	if (!htbl)
1473 		return -ENOMEM;
1474 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1475 	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1476 	buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1477 	if (!buckets) {
1478 		kfree(htbl);
1479 		return -ENOMEM;
1480 	}
1481 	htbl->size = size;
1482 	htbl->mask = (size - 1);
1483 	htbl->buckets = buckets;
1484 	RCU_INIT_POINTER(ntbl->htbl, htbl);
1485 	htbl->ntbl = ntbl;
1486 	atomic_set(&ntbl->entries, 0);
1487 
1488 	/* start garbage collection */
1489 	clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1490 	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1491 			   arp_tbl.gc_interval);
1492 
1493 	return 0;
1494 }
1495 
1496 static void neigh_hash_free_rcu(struct rcu_head *head)
1497 {
1498 	struct ipoib_neigh_hash *htbl = container_of(head,
1499 						    struct ipoib_neigh_hash,
1500 						    rcu);
1501 	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1502 	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1503 
1504 	kfree(buckets);
1505 	kfree(htbl);
1506 	complete(&ntbl->deleted);
1507 }
1508 
1509 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1510 {
1511 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1512 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1513 	struct ipoib_neigh_hash *htbl;
1514 	unsigned long flags;
1515 	int i;
1516 
1517 	/* remove all neigh connected to a given path or mcast */
1518 	spin_lock_irqsave(&priv->lock, flags);
1519 
1520 	htbl = rcu_dereference_protected(ntbl->htbl,
1521 					 lockdep_is_held(&priv->lock));
1522 
1523 	if (!htbl)
1524 		goto out_unlock;
1525 
1526 	for (i = 0; i < htbl->size; i++) {
1527 		struct ipoib_neigh *neigh;
1528 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1529 
1530 		while ((neigh = rcu_dereference_protected(*np,
1531 							  lockdep_is_held(&priv->lock))) != NULL) {
1532 			/* delete neighs belong to this parent */
1533 			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1534 				rcu_assign_pointer(*np,
1535 						   rcu_dereference_protected(neigh->hnext,
1536 									     lockdep_is_held(&priv->lock)));
1537 				/* remove from parent list */
1538 				list_del(&neigh->list);
1539 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1540 			} else {
1541 				np = &neigh->hnext;
1542 			}
1543 
1544 		}
1545 	}
1546 out_unlock:
1547 	spin_unlock_irqrestore(&priv->lock, flags);
1548 }
1549 
1550 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1551 {
1552 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1553 	struct ipoib_neigh_hash *htbl;
1554 	unsigned long flags;
1555 	int i, wait_flushed = 0;
1556 
1557 	init_completion(&priv->ntbl.flushed);
1558 
1559 	spin_lock_irqsave(&priv->lock, flags);
1560 
1561 	htbl = rcu_dereference_protected(ntbl->htbl,
1562 					lockdep_is_held(&priv->lock));
1563 	if (!htbl)
1564 		goto out_unlock;
1565 
1566 	wait_flushed = atomic_read(&priv->ntbl.entries);
1567 	if (!wait_flushed)
1568 		goto free_htbl;
1569 
1570 	for (i = 0; i < htbl->size; i++) {
1571 		struct ipoib_neigh *neigh;
1572 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1573 
1574 		while ((neigh = rcu_dereference_protected(*np,
1575 				       lockdep_is_held(&priv->lock))) != NULL) {
1576 			rcu_assign_pointer(*np,
1577 					   rcu_dereference_protected(neigh->hnext,
1578 								     lockdep_is_held(&priv->lock)));
1579 			/* remove from path/mc list */
1580 			list_del(&neigh->list);
1581 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1582 		}
1583 	}
1584 
1585 free_htbl:
1586 	rcu_assign_pointer(ntbl->htbl, NULL);
1587 	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1588 
1589 out_unlock:
1590 	spin_unlock_irqrestore(&priv->lock, flags);
1591 	if (wait_flushed)
1592 		wait_for_completion(&priv->ntbl.flushed);
1593 }
1594 
1595 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1596 {
1597 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1598 	int stopped;
1599 
1600 	ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1601 	init_completion(&priv->ntbl.deleted);
1602 	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1603 
1604 	/* Stop GC if called at init fail need to cancel work */
1605 	stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1606 	if (!stopped)
1607 		cancel_delayed_work(&priv->neigh_reap_task);
1608 
1609 	ipoib_flush_neighs(priv);
1610 
1611 	wait_for_completion(&priv->ntbl.deleted);
1612 }
1613 
1614 
1615 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1616 {
1617 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1618 
1619 	/* Allocate RX/TX "rings" to hold queued skbs */
1620 	priv->rx_ring =	kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1621 				GFP_KERNEL);
1622 	if (!priv->rx_ring)
1623 		goto out;
1624 
1625 	priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1626 	if (!priv->tx_ring) {
1627 		printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
1628 		       ca->name, ipoib_sendq_size);
1629 		goto out_rx_ring_cleanup;
1630 	}
1631 
1632 	/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1633 
1634 	if (ipoib_ib_dev_init(dev, ca, port))
1635 		goto out_tx_ring_cleanup;
1636 
1637 	/*
1638 	 * Must be after ipoib_ib_dev_init so we can allocate a per
1639 	 * device wq there and use it here
1640 	 */
1641 	if (ipoib_neigh_hash_init(priv) < 0)
1642 		goto out_dev_uninit;
1643 
1644 	return 0;
1645 
1646 out_dev_uninit:
1647 	ipoib_ib_dev_cleanup(dev);
1648 
1649 out_tx_ring_cleanup:
1650 	vfree(priv->tx_ring);
1651 
1652 out_rx_ring_cleanup:
1653 	kfree(priv->rx_ring);
1654 
1655 out:
1656 	return -ENOMEM;
1657 }
1658 
1659 void ipoib_dev_cleanup(struct net_device *dev)
1660 {
1661 	struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1662 	LIST_HEAD(head);
1663 
1664 	ASSERT_RTNL();
1665 
1666 	ipoib_delete_debug_files(dev);
1667 
1668 	/* Delete any child interfaces first */
1669 	list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1670 		/* Stop GC on child */
1671 		set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1672 		cancel_delayed_work(&cpriv->neigh_reap_task);
1673 		unregister_netdevice_queue(cpriv->dev, &head);
1674 	}
1675 	unregister_netdevice_many(&head);
1676 
1677 	/*
1678 	 * Must be before ipoib_ib_dev_cleanup or we delete an in use
1679 	 * work queue
1680 	 */
1681 	ipoib_neigh_hash_uninit(dev);
1682 
1683 	ipoib_ib_dev_cleanup(dev);
1684 
1685 	kfree(priv->rx_ring);
1686 	vfree(priv->tx_ring);
1687 
1688 	priv->rx_ring = NULL;
1689 	priv->tx_ring = NULL;
1690 }
1691 
1692 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
1693 {
1694 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1695 
1696 	return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
1697 }
1698 
1699 static int ipoib_get_vf_config(struct net_device *dev, int vf,
1700 			       struct ifla_vf_info *ivf)
1701 {
1702 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1703 	int err;
1704 
1705 	err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
1706 	if (err)
1707 		return err;
1708 
1709 	ivf->vf = vf;
1710 
1711 	return 0;
1712 }
1713 
1714 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
1715 {
1716 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1717 
1718 	if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
1719 		return -EINVAL;
1720 
1721 	return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
1722 }
1723 
1724 static int ipoib_get_vf_stats(struct net_device *dev, int vf,
1725 			      struct ifla_vf_stats *vf_stats)
1726 {
1727 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1728 
1729 	return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
1730 }
1731 
1732 static const struct header_ops ipoib_header_ops = {
1733 	.create	= ipoib_hard_header,
1734 };
1735 
1736 static const struct net_device_ops ipoib_netdev_ops_pf = {
1737 	.ndo_uninit		 = ipoib_uninit,
1738 	.ndo_open		 = ipoib_open,
1739 	.ndo_stop		 = ipoib_stop,
1740 	.ndo_change_mtu		 = ipoib_change_mtu,
1741 	.ndo_fix_features	 = ipoib_fix_features,
1742 	.ndo_start_xmit		 = ipoib_start_xmit,
1743 	.ndo_tx_timeout		 = ipoib_timeout,
1744 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
1745 	.ndo_get_iflink		 = ipoib_get_iflink,
1746 	.ndo_set_vf_link_state	 = ipoib_set_vf_link_state,
1747 	.ndo_get_vf_config	 = ipoib_get_vf_config,
1748 	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
1749 	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
1750 	.ndo_set_mac_address	 = ipoib_set_mac,
1751 };
1752 
1753 static const struct net_device_ops ipoib_netdev_ops_vf = {
1754 	.ndo_uninit		 = ipoib_uninit,
1755 	.ndo_open		 = ipoib_open,
1756 	.ndo_stop		 = ipoib_stop,
1757 	.ndo_change_mtu		 = ipoib_change_mtu,
1758 	.ndo_fix_features	 = ipoib_fix_features,
1759 	.ndo_start_xmit	 	 = ipoib_start_xmit,
1760 	.ndo_tx_timeout		 = ipoib_timeout,
1761 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
1762 	.ndo_get_iflink		 = ipoib_get_iflink,
1763 };
1764 
1765 void ipoib_setup(struct net_device *dev)
1766 {
1767 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1768 
1769 	if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
1770 		dev->netdev_ops	= &ipoib_netdev_ops_vf;
1771 	else
1772 		dev->netdev_ops	= &ipoib_netdev_ops_pf;
1773 
1774 	dev->header_ops		 = &ipoib_header_ops;
1775 
1776 	ipoib_set_ethtool_ops(dev);
1777 
1778 	netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
1779 
1780 	dev->watchdog_timeo	 = HZ;
1781 
1782 	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
1783 
1784 	dev->hard_header_len	 = IPOIB_HARD_LEN;
1785 	dev->addr_len		 = INFINIBAND_ALEN;
1786 	dev->type		 = ARPHRD_INFINIBAND;
1787 	dev->tx_queue_len	 = ipoib_sendq_size * 2;
1788 	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
1789 				    NETIF_F_HIGHDMA);
1790 	netif_keep_dst(dev);
1791 
1792 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1793 
1794 	priv->dev = dev;
1795 
1796 	spin_lock_init(&priv->lock);
1797 
1798 	init_rwsem(&priv->vlan_rwsem);
1799 
1800 	INIT_LIST_HEAD(&priv->path_list);
1801 	INIT_LIST_HEAD(&priv->child_intfs);
1802 	INIT_LIST_HEAD(&priv->dead_ahs);
1803 	INIT_LIST_HEAD(&priv->multicast_list);
1804 
1805 	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
1806 	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1807 	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
1808 	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
1809 	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
1810 	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1811 	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1812 	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1813 }
1814 
1815 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1816 {
1817 	struct net_device *dev;
1818 
1819 	dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
1820 			   NET_NAME_UNKNOWN, ipoib_setup);
1821 	if (!dev)
1822 		return NULL;
1823 
1824 	return netdev_priv(dev);
1825 }
1826 
1827 static ssize_t show_pkey(struct device *dev,
1828 			 struct device_attribute *attr, char *buf)
1829 {
1830 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1831 
1832 	return sprintf(buf, "0x%04x\n", priv->pkey);
1833 }
1834 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1835 
1836 static ssize_t show_umcast(struct device *dev,
1837 			   struct device_attribute *attr, char *buf)
1838 {
1839 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1840 
1841 	return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1842 }
1843 
1844 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1845 {
1846 	struct ipoib_dev_priv *priv = netdev_priv(ndev);
1847 
1848 	if (umcast_val > 0) {
1849 		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1850 		ipoib_warn(priv, "ignoring multicast groups joined directly "
1851 				"by userspace\n");
1852 	} else
1853 		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1854 }
1855 
1856 static ssize_t set_umcast(struct device *dev,
1857 			  struct device_attribute *attr,
1858 			  const char *buf, size_t count)
1859 {
1860 	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1861 
1862 	ipoib_set_umcast(to_net_dev(dev), umcast_val);
1863 
1864 	return count;
1865 }
1866 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1867 
1868 int ipoib_add_umcast_attr(struct net_device *dev)
1869 {
1870 	return device_create_file(&dev->dev, &dev_attr_umcast);
1871 }
1872 
1873 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
1874 {
1875 	struct ipoib_dev_priv *child_priv;
1876 	struct net_device *netdev = priv->dev;
1877 
1878 	netif_addr_lock_bh(netdev);
1879 
1880 	memcpy(&priv->local_gid.global.interface_id,
1881 	       &gid->global.interface_id,
1882 	       sizeof(gid->global.interface_id));
1883 	memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
1884 	clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1885 
1886 	netif_addr_unlock_bh(netdev);
1887 
1888 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1889 		down_read(&priv->vlan_rwsem);
1890 		list_for_each_entry(child_priv, &priv->child_intfs, list)
1891 			set_base_guid(child_priv, gid);
1892 		up_read(&priv->vlan_rwsem);
1893 	}
1894 }
1895 
1896 static int ipoib_check_lladdr(struct net_device *dev,
1897 			      struct sockaddr_storage *ss)
1898 {
1899 	union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
1900 	int ret = 0;
1901 
1902 	netif_addr_lock_bh(dev);
1903 
1904 	/* Make sure the QPN, reserved and subnet prefix match the current
1905 	 * lladdr, it also makes sure the lladdr is unicast.
1906 	 */
1907 	if (memcmp(dev->dev_addr, ss->__data,
1908 		   4 + sizeof(gid->global.subnet_prefix)) ||
1909 	    gid->global.interface_id == 0)
1910 		ret = -EINVAL;
1911 
1912 	netif_addr_unlock_bh(dev);
1913 
1914 	return ret;
1915 }
1916 
1917 static int ipoib_set_mac(struct net_device *dev, void *addr)
1918 {
1919 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1920 	struct sockaddr_storage *ss = addr;
1921 	int ret;
1922 
1923 	if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
1924 		return -EBUSY;
1925 
1926 	ret = ipoib_check_lladdr(dev, ss);
1927 	if (ret)
1928 		return ret;
1929 
1930 	set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
1931 
1932 	queue_work(ipoib_workqueue, &priv->flush_light);
1933 
1934 	return 0;
1935 }
1936 
1937 static ssize_t create_child(struct device *dev,
1938 			    struct device_attribute *attr,
1939 			    const char *buf, size_t count)
1940 {
1941 	int pkey;
1942 	int ret;
1943 
1944 	if (sscanf(buf, "%i", &pkey) != 1)
1945 		return -EINVAL;
1946 
1947 	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
1948 		return -EINVAL;
1949 
1950 	/*
1951 	 * Set the full membership bit, so that we join the right
1952 	 * broadcast group, etc.
1953 	 */
1954 	pkey |= 0x8000;
1955 
1956 	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1957 
1958 	return ret ? ret : count;
1959 }
1960 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1961 
1962 static ssize_t delete_child(struct device *dev,
1963 			    struct device_attribute *attr,
1964 			    const char *buf, size_t count)
1965 {
1966 	int pkey;
1967 	int ret;
1968 
1969 	if (sscanf(buf, "%i", &pkey) != 1)
1970 		return -EINVAL;
1971 
1972 	if (pkey < 0 || pkey > 0xffff)
1973 		return -EINVAL;
1974 
1975 	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1976 
1977 	return ret ? ret : count;
1978 
1979 }
1980 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1981 
1982 int ipoib_add_pkey_attr(struct net_device *dev)
1983 {
1984 	return device_create_file(&dev->dev, &dev_attr_pkey);
1985 }
1986 
1987 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1988 {
1989 	priv->hca_caps = hca->attrs.device_cap_flags;
1990 
1991 	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1992 		priv->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1993 
1994 		if (priv->hca_caps & IB_DEVICE_UD_TSO)
1995 			priv->dev->hw_features |= NETIF_F_TSO;
1996 
1997 		priv->dev->features |= priv->dev->hw_features;
1998 	}
1999 
2000 	return 0;
2001 }
2002 
2003 static struct net_device *ipoib_add_port(const char *format,
2004 					 struct ib_device *hca, u8 port)
2005 {
2006 	struct ipoib_dev_priv *priv;
2007 	struct ib_port_attr attr;
2008 	int result = -ENOMEM;
2009 
2010 	priv = ipoib_intf_alloc(format);
2011 	if (!priv)
2012 		goto alloc_mem_failed;
2013 
2014 	SET_NETDEV_DEV(priv->dev, hca->dma_device);
2015 	priv->dev->dev_id = port - 1;
2016 
2017 	result = ib_query_port(hca, port, &attr);
2018 	if (!result)
2019 		priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2020 	else {
2021 		printk(KERN_WARNING "%s: ib_query_port %d failed\n",
2022 		       hca->name, port);
2023 		goto device_init_failed;
2024 	}
2025 
2026 	/* MTU will be reset when mcast join happens */
2027 	priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
2028 	priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
2029 	priv->dev->max_mtu = IPOIB_CM_MTU;
2030 
2031 	priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
2032 
2033 	result = ib_query_pkey(hca, port, 0, &priv->pkey);
2034 	if (result) {
2035 		printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
2036 		       hca->name, port, result);
2037 		goto device_init_failed;
2038 	}
2039 
2040 	result = ipoib_set_dev_features(priv, hca);
2041 	if (result)
2042 		goto device_init_failed;
2043 
2044 	/*
2045 	 * Set the full membership bit, so that we join the right
2046 	 * broadcast group, etc.
2047 	 */
2048 	priv->pkey |= 0x8000;
2049 
2050 	priv->dev->broadcast[8] = priv->pkey >> 8;
2051 	priv->dev->broadcast[9] = priv->pkey & 0xff;
2052 
2053 	result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
2054 	if (result) {
2055 		printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
2056 		       hca->name, port, result);
2057 		goto device_init_failed;
2058 	} else
2059 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
2060 	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2061 
2062 	result = ipoib_dev_init(priv->dev, hca, port);
2063 	if (result < 0) {
2064 		printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
2065 		       hca->name, port, result);
2066 		goto device_init_failed;
2067 	}
2068 
2069 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
2070 			      priv->ca, ipoib_event);
2071 	result = ib_register_event_handler(&priv->event_handler);
2072 	if (result < 0) {
2073 		printk(KERN_WARNING "%s: ib_register_event_handler failed for "
2074 		       "port %d (ret = %d)\n",
2075 		       hca->name, port, result);
2076 		goto event_failed;
2077 	}
2078 
2079 	result = register_netdev(priv->dev);
2080 	if (result) {
2081 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
2082 		       hca->name, port, result);
2083 		goto register_failed;
2084 	}
2085 
2086 	ipoib_create_debug_files(priv->dev);
2087 
2088 	if (ipoib_cm_add_mode_attr(priv->dev))
2089 		goto sysfs_failed;
2090 	if (ipoib_add_pkey_attr(priv->dev))
2091 		goto sysfs_failed;
2092 	if (ipoib_add_umcast_attr(priv->dev))
2093 		goto sysfs_failed;
2094 	if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
2095 		goto sysfs_failed;
2096 	if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
2097 		goto sysfs_failed;
2098 
2099 	return priv->dev;
2100 
2101 sysfs_failed:
2102 	ipoib_delete_debug_files(priv->dev);
2103 	unregister_netdev(priv->dev);
2104 
2105 register_failed:
2106 	ib_unregister_event_handler(&priv->event_handler);
2107 	flush_workqueue(ipoib_workqueue);
2108 	/* Stop GC if started before flush */
2109 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2110 	cancel_delayed_work(&priv->neigh_reap_task);
2111 	flush_workqueue(priv->wq);
2112 
2113 event_failed:
2114 	ipoib_dev_cleanup(priv->dev);
2115 
2116 device_init_failed:
2117 	free_netdev(priv->dev);
2118 
2119 alloc_mem_failed:
2120 	return ERR_PTR(result);
2121 }
2122 
2123 static void ipoib_add_one(struct ib_device *device)
2124 {
2125 	struct list_head *dev_list;
2126 	struct net_device *dev;
2127 	struct ipoib_dev_priv *priv;
2128 	int p;
2129 	int count = 0;
2130 
2131 	dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
2132 	if (!dev_list)
2133 		return;
2134 
2135 	INIT_LIST_HEAD(dev_list);
2136 
2137 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
2138 		if (!rdma_protocol_ib(device, p))
2139 			continue;
2140 		dev = ipoib_add_port("ib%d", device, p);
2141 		if (!IS_ERR(dev)) {
2142 			priv = netdev_priv(dev);
2143 			list_add_tail(&priv->list, dev_list);
2144 			count++;
2145 		}
2146 	}
2147 
2148 	if (!count) {
2149 		kfree(dev_list);
2150 		return;
2151 	}
2152 
2153 	ib_set_client_data(device, &ipoib_client, dev_list);
2154 }
2155 
2156 static void ipoib_remove_one(struct ib_device *device, void *client_data)
2157 {
2158 	struct ipoib_dev_priv *priv, *tmp;
2159 	struct list_head *dev_list = client_data;
2160 
2161 	if (!dev_list)
2162 		return;
2163 
2164 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
2165 		ib_unregister_event_handler(&priv->event_handler);
2166 		flush_workqueue(ipoib_workqueue);
2167 
2168 		/* mark interface in the middle of destruction */
2169 		set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
2170 
2171 		rtnl_lock();
2172 		dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
2173 		rtnl_unlock();
2174 
2175 		/* Stop GC */
2176 		set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2177 		cancel_delayed_work(&priv->neigh_reap_task);
2178 		flush_workqueue(priv->wq);
2179 
2180 		unregister_netdev(priv->dev);
2181 		free_netdev(priv->dev);
2182 	}
2183 
2184 	kfree(dev_list);
2185 }
2186 
2187 static int __init ipoib_init_module(void)
2188 {
2189 	int ret;
2190 
2191 	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2192 	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2193 	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2194 
2195 	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2196 	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2197 	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2198 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2199 	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2200 #endif
2201 
2202 	/*
2203 	 * When copying small received packets, we only copy from the
2204 	 * linear data part of the SKB, so we rely on this condition.
2205 	 */
2206 	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2207 
2208 	ret = ipoib_register_debugfs();
2209 	if (ret)
2210 		return ret;
2211 
2212 	/*
2213 	 * We create a global workqueue here that is used for all flush
2214 	 * operations.  However, if you attempt to flush a workqueue
2215 	 * from a task on that same workqueue, it deadlocks the system.
2216 	 * We want to be able to flush the tasks associated with a
2217 	 * specific net device, so we also create a workqueue for each
2218 	 * netdevice.  We queue up the tasks for that device only on
2219 	 * its private workqueue, and we only queue up flush events
2220 	 * on our global flush workqueue.  This avoids the deadlocks.
2221 	 */
2222 	ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
2223 						  WQ_MEM_RECLAIM);
2224 	if (!ipoib_workqueue) {
2225 		ret = -ENOMEM;
2226 		goto err_fs;
2227 	}
2228 
2229 	ib_sa_register_client(&ipoib_sa_client);
2230 
2231 	ret = ib_register_client(&ipoib_client);
2232 	if (ret)
2233 		goto err_sa;
2234 
2235 	ret = ipoib_netlink_init();
2236 	if (ret)
2237 		goto err_client;
2238 
2239 	return 0;
2240 
2241 err_client:
2242 	ib_unregister_client(&ipoib_client);
2243 
2244 err_sa:
2245 	ib_sa_unregister_client(&ipoib_sa_client);
2246 	destroy_workqueue(ipoib_workqueue);
2247 
2248 err_fs:
2249 	ipoib_unregister_debugfs();
2250 
2251 	return ret;
2252 }
2253 
2254 static void __exit ipoib_cleanup_module(void)
2255 {
2256 	ipoib_netlink_fini();
2257 	ib_unregister_client(&ipoib_client);
2258 	ib_sa_unregister_client(&ipoib_sa_client);
2259 	ipoib_unregister_debugfs();
2260 	destroy_workqueue(ipoib_workqueue);
2261 }
2262 
2263 module_init(ipoib_init_module);
2264 module_exit(ipoib_cleanup_module);
2265