xref: /openbmc/linux/drivers/infiniband/ulp/ipoib/ipoib_main.c (revision 5c73cc4b6c83e88863a5de869cc5df3b913aef4a)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "ipoib.h"
36 
37 #include <linux/module.h>
38 
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43 
44 #include <linux/if_arp.h>	/* For ARPHRD_xxx */
45 
46 #include <linux/ip.h>
47 #include <linux/in.h>
48 
49 #include <linux/jhash.h>
50 #include <net/arp.h>
51 
52 #define DRV_VERSION "1.0.0"
53 
54 const char ipoib_driver_version[] = DRV_VERSION;
55 
56 MODULE_AUTHOR("Roland Dreier");
57 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
58 MODULE_LICENSE("Dual BSD/GPL");
59 MODULE_VERSION(DRV_VERSION);
60 
61 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
62 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
63 
64 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
65 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
66 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
67 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
68 
69 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
70 int ipoib_debug_level;
71 
72 module_param_named(debug_level, ipoib_debug_level, int, 0644);
73 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
74 #endif
75 
76 struct ipoib_path_iter {
77 	struct net_device *dev;
78 	struct ipoib_path  path;
79 };
80 
81 static const u8 ipv4_bcast_addr[] = {
82 	0x00, 0xff, 0xff, 0xff,
83 	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
84 	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
85 };
86 
87 struct workqueue_struct *ipoib_workqueue;
88 
89 struct ib_sa_client ipoib_sa_client;
90 
91 static void ipoib_add_one(struct ib_device *device);
92 static void ipoib_remove_one(struct ib_device *device);
93 static void ipoib_neigh_reclaim(struct rcu_head *rp);
94 
95 static struct ib_client ipoib_client = {
96 	.name   = "ipoib",
97 	.add    = ipoib_add_one,
98 	.remove = ipoib_remove_one
99 };
100 
101 int ipoib_open(struct net_device *dev)
102 {
103 	struct ipoib_dev_priv *priv = netdev_priv(dev);
104 
105 	ipoib_dbg(priv, "bringing up interface\n");
106 
107 	netif_carrier_off(dev);
108 
109 	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
110 
111 	if (ipoib_ib_dev_open(dev, 1)) {
112 		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
113 			return 0;
114 		goto err_disable;
115 	}
116 
117 	if (ipoib_ib_dev_up(dev))
118 		goto err_stop;
119 
120 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
121 		struct ipoib_dev_priv *cpriv;
122 
123 		/* Bring up any child interfaces too */
124 		down_read(&priv->vlan_rwsem);
125 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
126 			int flags;
127 
128 			flags = cpriv->dev->flags;
129 			if (flags & IFF_UP)
130 				continue;
131 
132 			dev_change_flags(cpriv->dev, flags | IFF_UP);
133 		}
134 		up_read(&priv->vlan_rwsem);
135 	}
136 
137 	netif_start_queue(dev);
138 
139 	return 0;
140 
141 err_stop:
142 	ipoib_ib_dev_stop(dev, 1);
143 
144 err_disable:
145 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
146 
147 	return -EINVAL;
148 }
149 
150 static int ipoib_stop(struct net_device *dev)
151 {
152 	struct ipoib_dev_priv *priv = netdev_priv(dev);
153 
154 	ipoib_dbg(priv, "stopping interface\n");
155 
156 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
157 
158 	netif_stop_queue(dev);
159 
160 	ipoib_ib_dev_down(dev, 1);
161 	ipoib_ib_dev_stop(dev, 0);
162 
163 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
164 		struct ipoib_dev_priv *cpriv;
165 
166 		/* Bring down any child interfaces too */
167 		down_read(&priv->vlan_rwsem);
168 		list_for_each_entry(cpriv, &priv->child_intfs, list) {
169 			int flags;
170 
171 			flags = cpriv->dev->flags;
172 			if (!(flags & IFF_UP))
173 				continue;
174 
175 			dev_change_flags(cpriv->dev, flags & ~IFF_UP);
176 		}
177 		up_read(&priv->vlan_rwsem);
178 	}
179 
180 	return 0;
181 }
182 
183 static void ipoib_uninit(struct net_device *dev)
184 {
185 	ipoib_dev_cleanup(dev);
186 }
187 
188 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
189 {
190 	struct ipoib_dev_priv *priv = netdev_priv(dev);
191 
192 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
193 		features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
194 
195 	return features;
196 }
197 
198 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
199 {
200 	struct ipoib_dev_priv *priv = netdev_priv(dev);
201 
202 	/* dev->mtu > 2K ==> connected mode */
203 	if (ipoib_cm_admin_enabled(dev)) {
204 		if (new_mtu > ipoib_cm_max_mtu(dev))
205 			return -EINVAL;
206 
207 		if (new_mtu > priv->mcast_mtu)
208 			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
209 				   priv->mcast_mtu);
210 
211 		dev->mtu = new_mtu;
212 		return 0;
213 	}
214 
215 	if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
216 		return -EINVAL;
217 
218 	priv->admin_mtu = new_mtu;
219 
220 	dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
221 
222 	return 0;
223 }
224 
225 int ipoib_set_mode(struct net_device *dev, const char *buf)
226 {
227 	struct ipoib_dev_priv *priv = netdev_priv(dev);
228 
229 	/* flush paths if we switch modes so that connections are restarted */
230 	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
231 		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
232 		ipoib_warn(priv, "enabling connected mode "
233 			   "will cause multicast packet drops\n");
234 		netdev_update_features(dev);
235 		rtnl_unlock();
236 		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
237 
238 		ipoib_flush_paths(dev);
239 		rtnl_lock();
240 		return 0;
241 	}
242 
243 	if (!strcmp(buf, "datagram\n")) {
244 		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
245 		netdev_update_features(dev);
246 		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
247 		rtnl_unlock();
248 		ipoib_flush_paths(dev);
249 		rtnl_lock();
250 		return 0;
251 	}
252 
253 	return -EINVAL;
254 }
255 
256 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
257 {
258 	struct ipoib_dev_priv *priv = netdev_priv(dev);
259 	struct rb_node *n = priv->path_tree.rb_node;
260 	struct ipoib_path *path;
261 	int ret;
262 
263 	while (n) {
264 		path = rb_entry(n, struct ipoib_path, rb_node);
265 
266 		ret = memcmp(gid, path->pathrec.dgid.raw,
267 			     sizeof (union ib_gid));
268 
269 		if (ret < 0)
270 			n = n->rb_left;
271 		else if (ret > 0)
272 			n = n->rb_right;
273 		else
274 			return path;
275 	}
276 
277 	return NULL;
278 }
279 
280 static int __path_add(struct net_device *dev, struct ipoib_path *path)
281 {
282 	struct ipoib_dev_priv *priv = netdev_priv(dev);
283 	struct rb_node **n = &priv->path_tree.rb_node;
284 	struct rb_node *pn = NULL;
285 	struct ipoib_path *tpath;
286 	int ret;
287 
288 	while (*n) {
289 		pn = *n;
290 		tpath = rb_entry(pn, struct ipoib_path, rb_node);
291 
292 		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
293 			     sizeof (union ib_gid));
294 		if (ret < 0)
295 			n = &pn->rb_left;
296 		else if (ret > 0)
297 			n = &pn->rb_right;
298 		else
299 			return -EEXIST;
300 	}
301 
302 	rb_link_node(&path->rb_node, pn, n);
303 	rb_insert_color(&path->rb_node, &priv->path_tree);
304 
305 	list_add_tail(&path->list, &priv->path_list);
306 
307 	return 0;
308 }
309 
310 static void path_free(struct net_device *dev, struct ipoib_path *path)
311 {
312 	struct sk_buff *skb;
313 
314 	while ((skb = __skb_dequeue(&path->queue)))
315 		dev_kfree_skb_irq(skb);
316 
317 	ipoib_dbg(netdev_priv(dev), "path_free\n");
318 
319 	/* remove all neigh connected to this path */
320 	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
321 
322 	if (path->ah)
323 		ipoib_put_ah(path->ah);
324 
325 	kfree(path);
326 }
327 
328 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
329 
330 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
331 {
332 	struct ipoib_path_iter *iter;
333 
334 	iter = kmalloc(sizeof *iter, GFP_KERNEL);
335 	if (!iter)
336 		return NULL;
337 
338 	iter->dev = dev;
339 	memset(iter->path.pathrec.dgid.raw, 0, 16);
340 
341 	if (ipoib_path_iter_next(iter)) {
342 		kfree(iter);
343 		return NULL;
344 	}
345 
346 	return iter;
347 }
348 
349 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
350 {
351 	struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
352 	struct rb_node *n;
353 	struct ipoib_path *path;
354 	int ret = 1;
355 
356 	spin_lock_irq(&priv->lock);
357 
358 	n = rb_first(&priv->path_tree);
359 
360 	while (n) {
361 		path = rb_entry(n, struct ipoib_path, rb_node);
362 
363 		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
364 			   sizeof (union ib_gid)) < 0) {
365 			iter->path = *path;
366 			ret = 0;
367 			break;
368 		}
369 
370 		n = rb_next(n);
371 	}
372 
373 	spin_unlock_irq(&priv->lock);
374 
375 	return ret;
376 }
377 
378 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
379 			  struct ipoib_path *path)
380 {
381 	*path = iter->path;
382 }
383 
384 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
385 
386 void ipoib_mark_paths_invalid(struct net_device *dev)
387 {
388 	struct ipoib_dev_priv *priv = netdev_priv(dev);
389 	struct ipoib_path *path, *tp;
390 
391 	spin_lock_irq(&priv->lock);
392 
393 	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
394 		ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
395 			be16_to_cpu(path->pathrec.dlid),
396 			path->pathrec.dgid.raw);
397 		path->valid =  0;
398 	}
399 
400 	spin_unlock_irq(&priv->lock);
401 }
402 
403 void ipoib_flush_paths(struct net_device *dev)
404 {
405 	struct ipoib_dev_priv *priv = netdev_priv(dev);
406 	struct ipoib_path *path, *tp;
407 	LIST_HEAD(remove_list);
408 	unsigned long flags;
409 
410 	netif_tx_lock_bh(dev);
411 	spin_lock_irqsave(&priv->lock, flags);
412 
413 	list_splice_init(&priv->path_list, &remove_list);
414 
415 	list_for_each_entry(path, &remove_list, list)
416 		rb_erase(&path->rb_node, &priv->path_tree);
417 
418 	list_for_each_entry_safe(path, tp, &remove_list, list) {
419 		if (path->query)
420 			ib_sa_cancel_query(path->query_id, path->query);
421 		spin_unlock_irqrestore(&priv->lock, flags);
422 		netif_tx_unlock_bh(dev);
423 		wait_for_completion(&path->done);
424 		path_free(dev, path);
425 		netif_tx_lock_bh(dev);
426 		spin_lock_irqsave(&priv->lock, flags);
427 	}
428 
429 	spin_unlock_irqrestore(&priv->lock, flags);
430 	netif_tx_unlock_bh(dev);
431 }
432 
433 static void path_rec_completion(int status,
434 				struct ib_sa_path_rec *pathrec,
435 				void *path_ptr)
436 {
437 	struct ipoib_path *path = path_ptr;
438 	struct net_device *dev = path->dev;
439 	struct ipoib_dev_priv *priv = netdev_priv(dev);
440 	struct ipoib_ah *ah = NULL;
441 	struct ipoib_ah *old_ah = NULL;
442 	struct ipoib_neigh *neigh, *tn;
443 	struct sk_buff_head skqueue;
444 	struct sk_buff *skb;
445 	unsigned long flags;
446 
447 	if (!status)
448 		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
449 			  be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
450 	else
451 		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
452 			  status, path->pathrec.dgid.raw);
453 
454 	skb_queue_head_init(&skqueue);
455 
456 	if (!status) {
457 		struct ib_ah_attr av;
458 
459 		if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
460 			ah = ipoib_create_ah(dev, priv->pd, &av);
461 	}
462 
463 	spin_lock_irqsave(&priv->lock, flags);
464 
465 	if (!IS_ERR_OR_NULL(ah)) {
466 		path->pathrec = *pathrec;
467 
468 		old_ah   = path->ah;
469 		path->ah = ah;
470 
471 		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
472 			  ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
473 
474 		while ((skb = __skb_dequeue(&path->queue)))
475 			__skb_queue_tail(&skqueue, skb);
476 
477 		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
478 			if (neigh->ah) {
479 				WARN_ON(neigh->ah != old_ah);
480 				/*
481 				 * Dropping the ah reference inside
482 				 * priv->lock is safe here, because we
483 				 * will hold one more reference from
484 				 * the original value of path->ah (ie
485 				 * old_ah).
486 				 */
487 				ipoib_put_ah(neigh->ah);
488 			}
489 			kref_get(&path->ah->ref);
490 			neigh->ah = path->ah;
491 
492 			if (ipoib_cm_enabled(dev, neigh->daddr)) {
493 				if (!ipoib_cm_get(neigh))
494 					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
495 									       path,
496 									       neigh));
497 				if (!ipoib_cm_get(neigh)) {
498 					ipoib_neigh_free(neigh);
499 					continue;
500 				}
501 			}
502 
503 			while ((skb = __skb_dequeue(&neigh->queue)))
504 				__skb_queue_tail(&skqueue, skb);
505 		}
506 		path->valid = 1;
507 	}
508 
509 	path->query = NULL;
510 	complete(&path->done);
511 
512 	spin_unlock_irqrestore(&priv->lock, flags);
513 
514 	if (IS_ERR_OR_NULL(ah))
515 		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
516 
517 	if (old_ah)
518 		ipoib_put_ah(old_ah);
519 
520 	while ((skb = __skb_dequeue(&skqueue))) {
521 		skb->dev = dev;
522 		if (dev_queue_xmit(skb))
523 			ipoib_warn(priv, "dev_queue_xmit failed "
524 				   "to requeue packet\n");
525 	}
526 }
527 
528 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
529 {
530 	struct ipoib_dev_priv *priv = netdev_priv(dev);
531 	struct ipoib_path *path;
532 
533 	if (!priv->broadcast)
534 		return NULL;
535 
536 	path = kzalloc(sizeof *path, GFP_ATOMIC);
537 	if (!path)
538 		return NULL;
539 
540 	path->dev = dev;
541 
542 	skb_queue_head_init(&path->queue);
543 
544 	INIT_LIST_HEAD(&path->neigh_list);
545 
546 	memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
547 	path->pathrec.sgid	    = priv->local_gid;
548 	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
549 	path->pathrec.numb_path     = 1;
550 	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
551 
552 	return path;
553 }
554 
555 static int path_rec_start(struct net_device *dev,
556 			  struct ipoib_path *path)
557 {
558 	struct ipoib_dev_priv *priv = netdev_priv(dev);
559 
560 	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
561 		  path->pathrec.dgid.raw);
562 
563 	init_completion(&path->done);
564 
565 	path->query_id =
566 		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
567 				   &path->pathrec,
568 				   IB_SA_PATH_REC_DGID		|
569 				   IB_SA_PATH_REC_SGID		|
570 				   IB_SA_PATH_REC_NUMB_PATH	|
571 				   IB_SA_PATH_REC_TRAFFIC_CLASS |
572 				   IB_SA_PATH_REC_PKEY,
573 				   1000, GFP_ATOMIC,
574 				   path_rec_completion,
575 				   path, &path->query);
576 	if (path->query_id < 0) {
577 		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
578 		path->query = NULL;
579 		complete(&path->done);
580 		return path->query_id;
581 	}
582 
583 	return 0;
584 }
585 
586 static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
587 			   struct net_device *dev)
588 {
589 	struct ipoib_dev_priv *priv = netdev_priv(dev);
590 	struct ipoib_path *path;
591 	struct ipoib_neigh *neigh;
592 	unsigned long flags;
593 
594 	spin_lock_irqsave(&priv->lock, flags);
595 	neigh = ipoib_neigh_alloc(daddr, dev);
596 	if (!neigh) {
597 		spin_unlock_irqrestore(&priv->lock, flags);
598 		++dev->stats.tx_dropped;
599 		dev_kfree_skb_any(skb);
600 		return;
601 	}
602 
603 	path = __path_find(dev, daddr + 4);
604 	if (!path) {
605 		path = path_rec_create(dev, daddr + 4);
606 		if (!path)
607 			goto err_path;
608 
609 		__path_add(dev, path);
610 	}
611 
612 	list_add_tail(&neigh->list, &path->neigh_list);
613 
614 	if (path->ah) {
615 		kref_get(&path->ah->ref);
616 		neigh->ah = path->ah;
617 
618 		if (ipoib_cm_enabled(dev, neigh->daddr)) {
619 			if (!ipoib_cm_get(neigh))
620 				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
621 			if (!ipoib_cm_get(neigh)) {
622 				ipoib_neigh_free(neigh);
623 				goto err_drop;
624 			}
625 			if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
626 				__skb_queue_tail(&neigh->queue, skb);
627 			else {
628 				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
629 					   skb_queue_len(&neigh->queue));
630 				goto err_drop;
631 			}
632 		} else {
633 			spin_unlock_irqrestore(&priv->lock, flags);
634 			ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
635 			ipoib_neigh_put(neigh);
636 			return;
637 		}
638 	} else {
639 		neigh->ah  = NULL;
640 
641 		if (!path->query && path_rec_start(dev, path))
642 			goto err_path;
643 
644 		__skb_queue_tail(&neigh->queue, skb);
645 	}
646 
647 	spin_unlock_irqrestore(&priv->lock, flags);
648 	ipoib_neigh_put(neigh);
649 	return;
650 
651 err_path:
652 	ipoib_neigh_free(neigh);
653 err_drop:
654 	++dev->stats.tx_dropped;
655 	dev_kfree_skb_any(skb);
656 
657 	spin_unlock_irqrestore(&priv->lock, flags);
658 	ipoib_neigh_put(neigh);
659 }
660 
661 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
662 			     struct ipoib_cb *cb)
663 {
664 	struct ipoib_dev_priv *priv = netdev_priv(dev);
665 	struct ipoib_path *path;
666 	unsigned long flags;
667 
668 	spin_lock_irqsave(&priv->lock, flags);
669 
670 	path = __path_find(dev, cb->hwaddr + 4);
671 	if (!path || !path->valid) {
672 		int new_path = 0;
673 
674 		if (!path) {
675 			path = path_rec_create(dev, cb->hwaddr + 4);
676 			new_path = 1;
677 		}
678 		if (path) {
679 			__skb_queue_tail(&path->queue, skb);
680 
681 			if (!path->query && path_rec_start(dev, path)) {
682 				spin_unlock_irqrestore(&priv->lock, flags);
683 				if (new_path)
684 					path_free(dev, path);
685 				return;
686 			} else
687 				__path_add(dev, path);
688 		} else {
689 			++dev->stats.tx_dropped;
690 			dev_kfree_skb_any(skb);
691 		}
692 
693 		spin_unlock_irqrestore(&priv->lock, flags);
694 		return;
695 	}
696 
697 	if (path->ah) {
698 		ipoib_dbg(priv, "Send unicast ARP to %04x\n",
699 			  be16_to_cpu(path->pathrec.dlid));
700 
701 		spin_unlock_irqrestore(&priv->lock, flags);
702 		ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
703 		return;
704 	} else if ((path->query || !path_rec_start(dev, path)) &&
705 		   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
706 		__skb_queue_tail(&path->queue, skb);
707 	} else {
708 		++dev->stats.tx_dropped;
709 		dev_kfree_skb_any(skb);
710 	}
711 
712 	spin_unlock_irqrestore(&priv->lock, flags);
713 }
714 
715 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
716 {
717 	struct ipoib_dev_priv *priv = netdev_priv(dev);
718 	struct ipoib_neigh *neigh;
719 	struct ipoib_cb *cb = ipoib_skb_cb(skb);
720 	struct ipoib_header *header;
721 	unsigned long flags;
722 
723 	header = (struct ipoib_header *) skb->data;
724 
725 	if (unlikely(cb->hwaddr[4] == 0xff)) {
726 		/* multicast, arrange "if" according to probability */
727 		if ((header->proto != htons(ETH_P_IP)) &&
728 		    (header->proto != htons(ETH_P_IPV6)) &&
729 		    (header->proto != htons(ETH_P_ARP)) &&
730 		    (header->proto != htons(ETH_P_RARP)) &&
731 		    (header->proto != htons(ETH_P_TIPC))) {
732 			/* ethertype not supported by IPoIB */
733 			++dev->stats.tx_dropped;
734 			dev_kfree_skb_any(skb);
735 			return NETDEV_TX_OK;
736 		}
737 		/* Add in the P_Key for multicast*/
738 		cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
739 		cb->hwaddr[9] = priv->pkey & 0xff;
740 
741 		neigh = ipoib_neigh_get(dev, cb->hwaddr);
742 		if (likely(neigh))
743 			goto send_using_neigh;
744 		ipoib_mcast_send(dev, cb->hwaddr, skb);
745 		return NETDEV_TX_OK;
746 	}
747 
748 	/* unicast, arrange "switch" according to probability */
749 	switch (header->proto) {
750 	case htons(ETH_P_IP):
751 	case htons(ETH_P_IPV6):
752 	case htons(ETH_P_TIPC):
753 		neigh = ipoib_neigh_get(dev, cb->hwaddr);
754 		if (unlikely(!neigh)) {
755 			neigh_add_path(skb, cb->hwaddr, dev);
756 			return NETDEV_TX_OK;
757 		}
758 		break;
759 	case htons(ETH_P_ARP):
760 	case htons(ETH_P_RARP):
761 		/* for unicast ARP and RARP should always perform path find */
762 		unicast_arp_send(skb, dev, cb);
763 		return NETDEV_TX_OK;
764 	default:
765 		/* ethertype not supported by IPoIB */
766 		++dev->stats.tx_dropped;
767 		dev_kfree_skb_any(skb);
768 		return NETDEV_TX_OK;
769 	}
770 
771 send_using_neigh:
772 	/* note we now hold a ref to neigh */
773 	if (ipoib_cm_get(neigh)) {
774 		if (ipoib_cm_up(neigh)) {
775 			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
776 			goto unref;
777 		}
778 	} else if (neigh->ah) {
779 		ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
780 		goto unref;
781 	}
782 
783 	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
784 		spin_lock_irqsave(&priv->lock, flags);
785 		__skb_queue_tail(&neigh->queue, skb);
786 		spin_unlock_irqrestore(&priv->lock, flags);
787 	} else {
788 		++dev->stats.tx_dropped;
789 		dev_kfree_skb_any(skb);
790 	}
791 
792 unref:
793 	ipoib_neigh_put(neigh);
794 
795 	return NETDEV_TX_OK;
796 }
797 
798 static void ipoib_timeout(struct net_device *dev)
799 {
800 	struct ipoib_dev_priv *priv = netdev_priv(dev);
801 
802 	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
803 		   jiffies_to_msecs(jiffies - dev->trans_start));
804 	ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
805 		   netif_queue_stopped(dev),
806 		   priv->tx_head, priv->tx_tail);
807 	/* XXX reset QP, etc. */
808 }
809 
810 static int ipoib_hard_header(struct sk_buff *skb,
811 			     struct net_device *dev,
812 			     unsigned short type,
813 			     const void *daddr, const void *saddr, unsigned len)
814 {
815 	struct ipoib_header *header;
816 	struct ipoib_cb *cb = ipoib_skb_cb(skb);
817 
818 	header = (struct ipoib_header *) skb_push(skb, sizeof *header);
819 
820 	header->proto = htons(type);
821 	header->reserved = 0;
822 
823 	/*
824 	 * we don't rely on dst_entry structure,  always stuff the
825 	 * destination address into skb->cb so we can figure out where
826 	 * to send the packet later.
827 	 */
828 	memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
829 
830 	return sizeof *header;
831 }
832 
833 static void ipoib_set_mcast_list(struct net_device *dev)
834 {
835 	struct ipoib_dev_priv *priv = netdev_priv(dev);
836 
837 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
838 		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
839 		return;
840 	}
841 
842 	queue_work(ipoib_workqueue, &priv->restart_task);
843 }
844 
845 static int ipoib_get_iflink(const struct net_device *dev)
846 {
847 	struct ipoib_dev_priv *priv = netdev_priv(dev);
848 
849 	/* parent interface */
850 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
851 		return dev->ifindex;
852 
853 	/* child/vlan interface */
854 	return priv->parent->ifindex;
855 }
856 
857 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
858 {
859 	/*
860 	 * Use only the address parts that contributes to spreading
861 	 * The subnet prefix is not used as one can not connect to
862 	 * same remote port (GUID) using the same remote QPN via two
863 	 * different subnets.
864 	 */
865 	 /* qpn octets[1:4) & port GUID octets[12:20) */
866 	u32 *d32 = (u32 *) daddr;
867 	u32 hv;
868 
869 	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
870 	return hv & htbl->mask;
871 }
872 
873 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
874 {
875 	struct ipoib_dev_priv *priv = netdev_priv(dev);
876 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
877 	struct ipoib_neigh_hash *htbl;
878 	struct ipoib_neigh *neigh = NULL;
879 	u32 hash_val;
880 
881 	rcu_read_lock_bh();
882 
883 	htbl = rcu_dereference_bh(ntbl->htbl);
884 
885 	if (!htbl)
886 		goto out_unlock;
887 
888 	hash_val = ipoib_addr_hash(htbl, daddr);
889 	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
890 	     neigh != NULL;
891 	     neigh = rcu_dereference_bh(neigh->hnext)) {
892 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
893 			/* found, take one ref on behalf of the caller */
894 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
895 				/* deleted */
896 				neigh = NULL;
897 				goto out_unlock;
898 			}
899 			neigh->alive = jiffies;
900 			goto out_unlock;
901 		}
902 	}
903 
904 out_unlock:
905 	rcu_read_unlock_bh();
906 	return neigh;
907 }
908 
909 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
910 {
911 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
912 	struct ipoib_neigh_hash *htbl;
913 	unsigned long neigh_obsolete;
914 	unsigned long dt;
915 	unsigned long flags;
916 	int i;
917 
918 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
919 		return;
920 
921 	spin_lock_irqsave(&priv->lock, flags);
922 
923 	htbl = rcu_dereference_protected(ntbl->htbl,
924 					 lockdep_is_held(&priv->lock));
925 
926 	if (!htbl)
927 		goto out_unlock;
928 
929 	/* neigh is obsolete if it was idle for two GC periods */
930 	dt = 2 * arp_tbl.gc_interval;
931 	neigh_obsolete = jiffies - dt;
932 	/* handle possible race condition */
933 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
934 		goto out_unlock;
935 
936 	for (i = 0; i < htbl->size; i++) {
937 		struct ipoib_neigh *neigh;
938 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
939 
940 		while ((neigh = rcu_dereference_protected(*np,
941 							  lockdep_is_held(&priv->lock))) != NULL) {
942 			/* was the neigh idle for two GC periods */
943 			if (time_after(neigh_obsolete, neigh->alive)) {
944 				rcu_assign_pointer(*np,
945 						   rcu_dereference_protected(neigh->hnext,
946 									     lockdep_is_held(&priv->lock)));
947 				/* remove from path/mc list */
948 				list_del(&neigh->list);
949 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
950 			} else {
951 				np = &neigh->hnext;
952 			}
953 
954 		}
955 	}
956 
957 out_unlock:
958 	spin_unlock_irqrestore(&priv->lock, flags);
959 }
960 
961 static void ipoib_reap_neigh(struct work_struct *work)
962 {
963 	struct ipoib_dev_priv *priv =
964 		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
965 
966 	__ipoib_reap_neigh(priv);
967 
968 	if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
969 		queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
970 				   arp_tbl.gc_interval);
971 }
972 
973 
974 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
975 				      struct net_device *dev)
976 {
977 	struct ipoib_neigh *neigh;
978 
979 	neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
980 	if (!neigh)
981 		return NULL;
982 
983 	neigh->dev = dev;
984 	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
985 	skb_queue_head_init(&neigh->queue);
986 	INIT_LIST_HEAD(&neigh->list);
987 	ipoib_cm_set(neigh, NULL);
988 	/* one ref on behalf of the caller */
989 	atomic_set(&neigh->refcnt, 1);
990 
991 	return neigh;
992 }
993 
994 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
995 				      struct net_device *dev)
996 {
997 	struct ipoib_dev_priv *priv = netdev_priv(dev);
998 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
999 	struct ipoib_neigh_hash *htbl;
1000 	struct ipoib_neigh *neigh;
1001 	u32 hash_val;
1002 
1003 	htbl = rcu_dereference_protected(ntbl->htbl,
1004 					 lockdep_is_held(&priv->lock));
1005 	if (!htbl) {
1006 		neigh = NULL;
1007 		goto out_unlock;
1008 	}
1009 
1010 	/* need to add a new neigh, but maybe some other thread succeeded?
1011 	 * recalc hash, maybe hash resize took place so we do a search
1012 	 */
1013 	hash_val = ipoib_addr_hash(htbl, daddr);
1014 	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1015 					       lockdep_is_held(&priv->lock));
1016 	     neigh != NULL;
1017 	     neigh = rcu_dereference_protected(neigh->hnext,
1018 					       lockdep_is_held(&priv->lock))) {
1019 		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1020 			/* found, take one ref on behalf of the caller */
1021 			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1022 				/* deleted */
1023 				neigh = NULL;
1024 				break;
1025 			}
1026 			neigh->alive = jiffies;
1027 			goto out_unlock;
1028 		}
1029 	}
1030 
1031 	neigh = ipoib_neigh_ctor(daddr, dev);
1032 	if (!neigh)
1033 		goto out_unlock;
1034 
1035 	/* one ref on behalf of the hash table */
1036 	atomic_inc(&neigh->refcnt);
1037 	neigh->alive = jiffies;
1038 	/* put in hash */
1039 	rcu_assign_pointer(neigh->hnext,
1040 			   rcu_dereference_protected(htbl->buckets[hash_val],
1041 						     lockdep_is_held(&priv->lock)));
1042 	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1043 	atomic_inc(&ntbl->entries);
1044 
1045 out_unlock:
1046 
1047 	return neigh;
1048 }
1049 
1050 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1051 {
1052 	/* neigh reference count was dropprd to zero */
1053 	struct net_device *dev = neigh->dev;
1054 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1055 	struct sk_buff *skb;
1056 	if (neigh->ah)
1057 		ipoib_put_ah(neigh->ah);
1058 	while ((skb = __skb_dequeue(&neigh->queue))) {
1059 		++dev->stats.tx_dropped;
1060 		dev_kfree_skb_any(skb);
1061 	}
1062 	if (ipoib_cm_get(neigh))
1063 		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1064 	ipoib_dbg(netdev_priv(dev),
1065 		  "neigh free for %06x %pI6\n",
1066 		  IPOIB_QPN(neigh->daddr),
1067 		  neigh->daddr + 4);
1068 	kfree(neigh);
1069 	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1070 		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1071 			complete(&priv->ntbl.flushed);
1072 	}
1073 }
1074 
1075 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1076 {
1077 	/* Called as a result of removal from hash table */
1078 	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1079 	/* note TX context may hold another ref */
1080 	ipoib_neigh_put(neigh);
1081 }
1082 
1083 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1084 {
1085 	struct net_device *dev = neigh->dev;
1086 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1087 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1088 	struct ipoib_neigh_hash *htbl;
1089 	struct ipoib_neigh __rcu **np;
1090 	struct ipoib_neigh *n;
1091 	u32 hash_val;
1092 
1093 	htbl = rcu_dereference_protected(ntbl->htbl,
1094 					lockdep_is_held(&priv->lock));
1095 	if (!htbl)
1096 		return;
1097 
1098 	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1099 	np = &htbl->buckets[hash_val];
1100 	for (n = rcu_dereference_protected(*np,
1101 					    lockdep_is_held(&priv->lock));
1102 	     n != NULL;
1103 	     n = rcu_dereference_protected(*np,
1104 					lockdep_is_held(&priv->lock))) {
1105 		if (n == neigh) {
1106 			/* found */
1107 			rcu_assign_pointer(*np,
1108 					   rcu_dereference_protected(neigh->hnext,
1109 								     lockdep_is_held(&priv->lock)));
1110 			/* remove from parent list */
1111 			list_del(&neigh->list);
1112 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1113 			return;
1114 		} else {
1115 			np = &n->hnext;
1116 		}
1117 	}
1118 }
1119 
1120 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1121 {
1122 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1123 	struct ipoib_neigh_hash *htbl;
1124 	struct ipoib_neigh **buckets;
1125 	u32 size;
1126 
1127 	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1128 	ntbl->htbl = NULL;
1129 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1130 	if (!htbl)
1131 		return -ENOMEM;
1132 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1133 	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1134 	buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1135 	if (!buckets) {
1136 		kfree(htbl);
1137 		return -ENOMEM;
1138 	}
1139 	htbl->size = size;
1140 	htbl->mask = (size - 1);
1141 	htbl->buckets = buckets;
1142 	ntbl->htbl = htbl;
1143 	htbl->ntbl = ntbl;
1144 	atomic_set(&ntbl->entries, 0);
1145 
1146 	/* start garbage collection */
1147 	clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1148 	queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
1149 			   arp_tbl.gc_interval);
1150 
1151 	return 0;
1152 }
1153 
1154 static void neigh_hash_free_rcu(struct rcu_head *head)
1155 {
1156 	struct ipoib_neigh_hash *htbl = container_of(head,
1157 						    struct ipoib_neigh_hash,
1158 						    rcu);
1159 	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1160 	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1161 
1162 	kfree(buckets);
1163 	kfree(htbl);
1164 	complete(&ntbl->deleted);
1165 }
1166 
1167 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1168 {
1169 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1170 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1171 	struct ipoib_neigh_hash *htbl;
1172 	unsigned long flags;
1173 	int i;
1174 
1175 	/* remove all neigh connected to a given path or mcast */
1176 	spin_lock_irqsave(&priv->lock, flags);
1177 
1178 	htbl = rcu_dereference_protected(ntbl->htbl,
1179 					 lockdep_is_held(&priv->lock));
1180 
1181 	if (!htbl)
1182 		goto out_unlock;
1183 
1184 	for (i = 0; i < htbl->size; i++) {
1185 		struct ipoib_neigh *neigh;
1186 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1187 
1188 		while ((neigh = rcu_dereference_protected(*np,
1189 							  lockdep_is_held(&priv->lock))) != NULL) {
1190 			/* delete neighs belong to this parent */
1191 			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1192 				rcu_assign_pointer(*np,
1193 						   rcu_dereference_protected(neigh->hnext,
1194 									     lockdep_is_held(&priv->lock)));
1195 				/* remove from parent list */
1196 				list_del(&neigh->list);
1197 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1198 			} else {
1199 				np = &neigh->hnext;
1200 			}
1201 
1202 		}
1203 	}
1204 out_unlock:
1205 	spin_unlock_irqrestore(&priv->lock, flags);
1206 }
1207 
1208 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1209 {
1210 	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1211 	struct ipoib_neigh_hash *htbl;
1212 	unsigned long flags;
1213 	int i, wait_flushed = 0;
1214 
1215 	init_completion(&priv->ntbl.flushed);
1216 
1217 	spin_lock_irqsave(&priv->lock, flags);
1218 
1219 	htbl = rcu_dereference_protected(ntbl->htbl,
1220 					lockdep_is_held(&priv->lock));
1221 	if (!htbl)
1222 		goto out_unlock;
1223 
1224 	wait_flushed = atomic_read(&priv->ntbl.entries);
1225 	if (!wait_flushed)
1226 		goto free_htbl;
1227 
1228 	for (i = 0; i < htbl->size; i++) {
1229 		struct ipoib_neigh *neigh;
1230 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1231 
1232 		while ((neigh = rcu_dereference_protected(*np,
1233 				       lockdep_is_held(&priv->lock))) != NULL) {
1234 			rcu_assign_pointer(*np,
1235 					   rcu_dereference_protected(neigh->hnext,
1236 								     lockdep_is_held(&priv->lock)));
1237 			/* remove from path/mc list */
1238 			list_del(&neigh->list);
1239 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1240 		}
1241 	}
1242 
1243 free_htbl:
1244 	rcu_assign_pointer(ntbl->htbl, NULL);
1245 	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1246 
1247 out_unlock:
1248 	spin_unlock_irqrestore(&priv->lock, flags);
1249 	if (wait_flushed)
1250 		wait_for_completion(&priv->ntbl.flushed);
1251 }
1252 
1253 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1254 {
1255 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1256 	int stopped;
1257 
1258 	ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1259 	init_completion(&priv->ntbl.deleted);
1260 	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1261 
1262 	/* Stop GC if called at init fail need to cancel work */
1263 	stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1264 	if (!stopped)
1265 		cancel_delayed_work(&priv->neigh_reap_task);
1266 
1267 	ipoib_flush_neighs(priv);
1268 
1269 	wait_for_completion(&priv->ntbl.deleted);
1270 }
1271 
1272 
1273 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1274 {
1275 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1276 
1277 	if (ipoib_neigh_hash_init(priv) < 0)
1278 		goto out;
1279 	/* Allocate RX/TX "rings" to hold queued skbs */
1280 	priv->rx_ring =	kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1281 				GFP_KERNEL);
1282 	if (!priv->rx_ring) {
1283 		printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1284 		       ca->name, ipoib_recvq_size);
1285 		goto out_neigh_hash_cleanup;
1286 	}
1287 
1288 	priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1289 	if (!priv->tx_ring) {
1290 		printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
1291 		       ca->name, ipoib_sendq_size);
1292 		goto out_rx_ring_cleanup;
1293 	}
1294 
1295 	/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1296 
1297 	if (ipoib_ib_dev_init(dev, ca, port))
1298 		goto out_tx_ring_cleanup;
1299 
1300 	return 0;
1301 
1302 out_tx_ring_cleanup:
1303 	vfree(priv->tx_ring);
1304 
1305 out_rx_ring_cleanup:
1306 	kfree(priv->rx_ring);
1307 
1308 out_neigh_hash_cleanup:
1309 	ipoib_neigh_hash_uninit(dev);
1310 out:
1311 	return -ENOMEM;
1312 }
1313 
1314 void ipoib_dev_cleanup(struct net_device *dev)
1315 {
1316 	struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1317 	LIST_HEAD(head);
1318 
1319 	ASSERT_RTNL();
1320 
1321 	ipoib_delete_debug_files(dev);
1322 
1323 	/* Delete any child interfaces first */
1324 	list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1325 		/* Stop GC on child */
1326 		set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1327 		cancel_delayed_work(&cpriv->neigh_reap_task);
1328 		unregister_netdevice_queue(cpriv->dev, &head);
1329 	}
1330 	unregister_netdevice_many(&head);
1331 
1332 	ipoib_ib_dev_cleanup(dev);
1333 
1334 	kfree(priv->rx_ring);
1335 	vfree(priv->tx_ring);
1336 
1337 	priv->rx_ring = NULL;
1338 	priv->tx_ring = NULL;
1339 
1340 	ipoib_neigh_hash_uninit(dev);
1341 }
1342 
1343 static const struct header_ops ipoib_header_ops = {
1344 	.create	= ipoib_hard_header,
1345 };
1346 
1347 static const struct net_device_ops ipoib_netdev_ops = {
1348 	.ndo_uninit		 = ipoib_uninit,
1349 	.ndo_open		 = ipoib_open,
1350 	.ndo_stop		 = ipoib_stop,
1351 	.ndo_change_mtu		 = ipoib_change_mtu,
1352 	.ndo_fix_features	 = ipoib_fix_features,
1353 	.ndo_start_xmit	 	 = ipoib_start_xmit,
1354 	.ndo_tx_timeout		 = ipoib_timeout,
1355 	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
1356 	.ndo_get_iflink		 = ipoib_get_iflink,
1357 };
1358 
1359 void ipoib_setup(struct net_device *dev)
1360 {
1361 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1362 
1363 	dev->netdev_ops		 = &ipoib_netdev_ops;
1364 	dev->header_ops		 = &ipoib_header_ops;
1365 
1366 	ipoib_set_ethtool_ops(dev);
1367 
1368 	netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
1369 
1370 	dev->watchdog_timeo	 = HZ;
1371 
1372 	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
1373 
1374 	dev->hard_header_len	 = IPOIB_ENCAP_LEN;
1375 	dev->addr_len		 = INFINIBAND_ALEN;
1376 	dev->type		 = ARPHRD_INFINIBAND;
1377 	dev->tx_queue_len	 = ipoib_sendq_size * 2;
1378 	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
1379 				    NETIF_F_HIGHDMA);
1380 	netif_keep_dst(dev);
1381 
1382 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1383 
1384 	priv->dev = dev;
1385 
1386 	spin_lock_init(&priv->lock);
1387 
1388 	init_rwsem(&priv->vlan_rwsem);
1389 
1390 	INIT_LIST_HEAD(&priv->path_list);
1391 	INIT_LIST_HEAD(&priv->child_intfs);
1392 	INIT_LIST_HEAD(&priv->dead_ahs);
1393 	INIT_LIST_HEAD(&priv->multicast_list);
1394 
1395 	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
1396 	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1397 	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
1398 	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
1399 	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
1400 	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1401 	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1402 	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1403 }
1404 
1405 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1406 {
1407 	struct net_device *dev;
1408 
1409 	dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
1410 			   NET_NAME_UNKNOWN, ipoib_setup);
1411 	if (!dev)
1412 		return NULL;
1413 
1414 	return netdev_priv(dev);
1415 }
1416 
1417 static ssize_t show_pkey(struct device *dev,
1418 			 struct device_attribute *attr, char *buf)
1419 {
1420 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1421 
1422 	return sprintf(buf, "0x%04x\n", priv->pkey);
1423 }
1424 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1425 
1426 static ssize_t show_umcast(struct device *dev,
1427 			   struct device_attribute *attr, char *buf)
1428 {
1429 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1430 
1431 	return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1432 }
1433 
1434 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1435 {
1436 	struct ipoib_dev_priv *priv = netdev_priv(ndev);
1437 
1438 	if (umcast_val > 0) {
1439 		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1440 		ipoib_warn(priv, "ignoring multicast groups joined directly "
1441 				"by userspace\n");
1442 	} else
1443 		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1444 }
1445 
1446 static ssize_t set_umcast(struct device *dev,
1447 			  struct device_attribute *attr,
1448 			  const char *buf, size_t count)
1449 {
1450 	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1451 
1452 	ipoib_set_umcast(to_net_dev(dev), umcast_val);
1453 
1454 	return count;
1455 }
1456 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1457 
1458 int ipoib_add_umcast_attr(struct net_device *dev)
1459 {
1460 	return device_create_file(&dev->dev, &dev_attr_umcast);
1461 }
1462 
1463 static ssize_t create_child(struct device *dev,
1464 			    struct device_attribute *attr,
1465 			    const char *buf, size_t count)
1466 {
1467 	int pkey;
1468 	int ret;
1469 
1470 	if (sscanf(buf, "%i", &pkey) != 1)
1471 		return -EINVAL;
1472 
1473 	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
1474 		return -EINVAL;
1475 
1476 	/*
1477 	 * Set the full membership bit, so that we join the right
1478 	 * broadcast group, etc.
1479 	 */
1480 	pkey |= 0x8000;
1481 
1482 	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1483 
1484 	return ret ? ret : count;
1485 }
1486 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1487 
1488 static ssize_t delete_child(struct device *dev,
1489 			    struct device_attribute *attr,
1490 			    const char *buf, size_t count)
1491 {
1492 	int pkey;
1493 	int ret;
1494 
1495 	if (sscanf(buf, "%i", &pkey) != 1)
1496 		return -EINVAL;
1497 
1498 	if (pkey < 0 || pkey > 0xffff)
1499 		return -EINVAL;
1500 
1501 	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1502 
1503 	return ret ? ret : count;
1504 
1505 }
1506 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1507 
1508 int ipoib_add_pkey_attr(struct net_device *dev)
1509 {
1510 	return device_create_file(&dev->dev, &dev_attr_pkey);
1511 }
1512 
1513 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1514 {
1515 	struct ib_device_attr *device_attr;
1516 	int result = -ENOMEM;
1517 
1518 	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1519 	if (!device_attr) {
1520 		printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1521 		       hca->name, sizeof *device_attr);
1522 		return result;
1523 	}
1524 
1525 	result = ib_query_device(hca, device_attr);
1526 	if (result) {
1527 		printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1528 		       hca->name, result);
1529 		kfree(device_attr);
1530 		return result;
1531 	}
1532 	priv->hca_caps = device_attr->device_cap_flags;
1533 
1534 	kfree(device_attr);
1535 
1536 	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1537 		priv->dev->hw_features = NETIF_F_SG |
1538 			NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1539 
1540 		if (priv->hca_caps & IB_DEVICE_UD_TSO)
1541 			priv->dev->hw_features |= NETIF_F_TSO;
1542 
1543 		priv->dev->features |= priv->dev->hw_features;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
1549 static struct net_device *ipoib_add_port(const char *format,
1550 					 struct ib_device *hca, u8 port)
1551 {
1552 	struct ipoib_dev_priv *priv;
1553 	struct ib_port_attr attr;
1554 	int result = -ENOMEM;
1555 
1556 	priv = ipoib_intf_alloc(format);
1557 	if (!priv)
1558 		goto alloc_mem_failed;
1559 
1560 	SET_NETDEV_DEV(priv->dev, hca->dma_device);
1561 	priv->dev->dev_id = port - 1;
1562 
1563 	if (!ib_query_port(hca, port, &attr))
1564 		priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1565 	else {
1566 		printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1567 		       hca->name, port);
1568 		goto device_init_failed;
1569 	}
1570 
1571 	/* MTU will be reset when mcast join happens */
1572 	priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
1573 	priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
1574 
1575 	priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
1576 
1577 	result = ib_query_pkey(hca, port, 0, &priv->pkey);
1578 	if (result) {
1579 		printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1580 		       hca->name, port, result);
1581 		goto device_init_failed;
1582 	}
1583 
1584 	if (ipoib_set_dev_features(priv, hca))
1585 		goto device_init_failed;
1586 
1587 	/*
1588 	 * Set the full membership bit, so that we join the right
1589 	 * broadcast group, etc.
1590 	 */
1591 	priv->pkey |= 0x8000;
1592 
1593 	priv->dev->broadcast[8] = priv->pkey >> 8;
1594 	priv->dev->broadcast[9] = priv->pkey & 0xff;
1595 
1596 	result = ib_query_gid(hca, port, 0, &priv->local_gid);
1597 	if (result) {
1598 		printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1599 		       hca->name, port, result);
1600 		goto device_init_failed;
1601 	} else
1602 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1603 
1604 	result = ipoib_dev_init(priv->dev, hca, port);
1605 	if (result < 0) {
1606 		printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1607 		       hca->name, port, result);
1608 		goto device_init_failed;
1609 	}
1610 
1611 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
1612 			      priv->ca, ipoib_event);
1613 	result = ib_register_event_handler(&priv->event_handler);
1614 	if (result < 0) {
1615 		printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1616 		       "port %d (ret = %d)\n",
1617 		       hca->name, port, result);
1618 		goto event_failed;
1619 	}
1620 
1621 	result = register_netdev(priv->dev);
1622 	if (result) {
1623 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1624 		       hca->name, port, result);
1625 		goto register_failed;
1626 	}
1627 
1628 	ipoib_create_debug_files(priv->dev);
1629 
1630 	if (ipoib_cm_add_mode_attr(priv->dev))
1631 		goto sysfs_failed;
1632 	if (ipoib_add_pkey_attr(priv->dev))
1633 		goto sysfs_failed;
1634 	if (ipoib_add_umcast_attr(priv->dev))
1635 		goto sysfs_failed;
1636 	if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1637 		goto sysfs_failed;
1638 	if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1639 		goto sysfs_failed;
1640 
1641 	return priv->dev;
1642 
1643 sysfs_failed:
1644 	ipoib_delete_debug_files(priv->dev);
1645 	unregister_netdev(priv->dev);
1646 
1647 register_failed:
1648 	ib_unregister_event_handler(&priv->event_handler);
1649 	/* Stop GC if started before flush */
1650 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1651 	cancel_delayed_work(&priv->neigh_reap_task);
1652 	flush_workqueue(ipoib_workqueue);
1653 
1654 event_failed:
1655 	ipoib_dev_cleanup(priv->dev);
1656 
1657 device_init_failed:
1658 	free_netdev(priv->dev);
1659 
1660 alloc_mem_failed:
1661 	return ERR_PTR(result);
1662 }
1663 
1664 static void ipoib_add_one(struct ib_device *device)
1665 {
1666 	struct list_head *dev_list;
1667 	struct net_device *dev;
1668 	struct ipoib_dev_priv *priv;
1669 	int s, e, p;
1670 
1671 	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1672 		return;
1673 
1674 	dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1675 	if (!dev_list)
1676 		return;
1677 
1678 	INIT_LIST_HEAD(dev_list);
1679 
1680 	if (device->node_type == RDMA_NODE_IB_SWITCH) {
1681 		s = 0;
1682 		e = 0;
1683 	} else {
1684 		s = 1;
1685 		e = device->phys_port_cnt;
1686 	}
1687 
1688 	for (p = s; p <= e; ++p) {
1689 		if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1690 			continue;
1691 		dev = ipoib_add_port("ib%d", device, p);
1692 		if (!IS_ERR(dev)) {
1693 			priv = netdev_priv(dev);
1694 			list_add_tail(&priv->list, dev_list);
1695 		}
1696 	}
1697 
1698 	ib_set_client_data(device, &ipoib_client, dev_list);
1699 }
1700 
1701 static void ipoib_remove_one(struct ib_device *device)
1702 {
1703 	struct ipoib_dev_priv *priv, *tmp;
1704 	struct list_head *dev_list;
1705 
1706 	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1707 		return;
1708 
1709 	dev_list = ib_get_client_data(device, &ipoib_client);
1710 	if (!dev_list)
1711 		return;
1712 
1713 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
1714 		ib_unregister_event_handler(&priv->event_handler);
1715 
1716 		rtnl_lock();
1717 		dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1718 		rtnl_unlock();
1719 
1720 		/* Stop GC */
1721 		set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1722 		cancel_delayed_work(&priv->neigh_reap_task);
1723 		flush_workqueue(ipoib_workqueue);
1724 
1725 		unregister_netdev(priv->dev);
1726 		free_netdev(priv->dev);
1727 	}
1728 
1729 	kfree(dev_list);
1730 }
1731 
1732 static int __init ipoib_init_module(void)
1733 {
1734 	int ret;
1735 
1736 	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1737 	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1738 	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1739 
1740 	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1741 	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1742 	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
1743 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1744 	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1745 #endif
1746 
1747 	/*
1748 	 * When copying small received packets, we only copy from the
1749 	 * linear data part of the SKB, so we rely on this condition.
1750 	 */
1751 	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1752 
1753 	ret = ipoib_register_debugfs();
1754 	if (ret)
1755 		return ret;
1756 
1757 	/*
1758 	 * We create our own workqueue mainly because we want to be
1759 	 * able to flush it when devices are being removed.  We can't
1760 	 * use schedule_work()/flush_scheduled_work() because both
1761 	 * unregister_netdev() and linkwatch_event take the rtnl lock,
1762 	 * so flush_scheduled_work() can deadlock during device
1763 	 * removal.
1764 	 */
1765 	ipoib_workqueue = create_singlethread_workqueue("ipoib");
1766 	if (!ipoib_workqueue) {
1767 		ret = -ENOMEM;
1768 		goto err_fs;
1769 	}
1770 
1771 	ib_sa_register_client(&ipoib_sa_client);
1772 
1773 	ret = ib_register_client(&ipoib_client);
1774 	if (ret)
1775 		goto err_sa;
1776 
1777 	ret = ipoib_netlink_init();
1778 	if (ret)
1779 		goto err_client;
1780 
1781 	return 0;
1782 
1783 err_client:
1784 	ib_unregister_client(&ipoib_client);
1785 
1786 err_sa:
1787 	ib_sa_unregister_client(&ipoib_sa_client);
1788 	destroy_workqueue(ipoib_workqueue);
1789 
1790 err_fs:
1791 	ipoib_unregister_debugfs();
1792 
1793 	return ret;
1794 }
1795 
1796 static void __exit ipoib_cleanup_module(void)
1797 {
1798 	ipoib_netlink_fini();
1799 	ib_unregister_client(&ipoib_client);
1800 	ib_sa_unregister_client(&ipoib_sa_client);
1801 	ipoib_unregister_debugfs();
1802 	destroy_workqueue(ipoib_workqueue);
1803 }
1804 
1805 module_init(ipoib_init_module);
1806 module_exit(ipoib_cleanup_module);
1807