1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/moduleparam.h>
38 #include <linux/ip.h>
39 #include <linux/in.h>
40 #include <linux/igmp.h>
41 #include <linux/inetdevice.h>
42 #include <linux/delay.h>
43 #include <linux/completion.h>
44 #include <linux/slab.h>
45 
46 #include <net/dst.h>
47 
48 #include "ipoib.h"
49 
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
52 
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 		 "Enable multicast debug tracing if > 0");
56 #endif
57 
58 static DEFINE_MUTEX(mcast_mutex);
59 
60 struct ipoib_mcast_iter {
61 	struct net_device *dev;
62 	union ib_gid       mgid;
63 	unsigned long      created;
64 	unsigned int       queuelen;
65 	unsigned int       complete;
66 	unsigned int       send_only;
67 };
68 
69 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
70 {
71 	struct net_device *dev = mcast->dev;
72 	struct ipoib_dev_priv *priv = netdev_priv(dev);
73 	struct ipoib_neigh *neigh, *tmp;
74 	int tx_dropped = 0;
75 
76 	ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
77 			mcast->mcmember.mgid.raw);
78 
79 	spin_lock_irq(&priv->lock);
80 
81 	list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
82 		/*
83 		 * It's safe to call ipoib_put_ah() inside priv->lock
84 		 * here, because we know that mcast->ah will always
85 		 * hold one more reference, so ipoib_put_ah() will
86 		 * never do more than decrement the ref count.
87 		 */
88 		if (neigh->ah)
89 			ipoib_put_ah(neigh->ah);
90 		ipoib_neigh_free(dev, neigh);
91 	}
92 
93 	spin_unlock_irq(&priv->lock);
94 
95 	if (mcast->ah)
96 		ipoib_put_ah(mcast->ah);
97 
98 	while (!skb_queue_empty(&mcast->pkt_queue)) {
99 		++tx_dropped;
100 		dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
101 	}
102 
103 	netif_tx_lock_bh(dev);
104 	dev->stats.tx_dropped += tx_dropped;
105 	netif_tx_unlock_bh(dev);
106 
107 	kfree(mcast);
108 }
109 
110 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
111 					     int can_sleep)
112 {
113 	struct ipoib_mcast *mcast;
114 
115 	mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
116 	if (!mcast)
117 		return NULL;
118 
119 	mcast->dev = dev;
120 	mcast->created = jiffies;
121 	mcast->backoff = 1;
122 
123 	INIT_LIST_HEAD(&mcast->list);
124 	INIT_LIST_HEAD(&mcast->neigh_list);
125 	skb_queue_head_init(&mcast->pkt_queue);
126 
127 	return mcast;
128 }
129 
130 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
131 {
132 	struct ipoib_dev_priv *priv = netdev_priv(dev);
133 	struct rb_node *n = priv->multicast_tree.rb_node;
134 
135 	while (n) {
136 		struct ipoib_mcast *mcast;
137 		int ret;
138 
139 		mcast = rb_entry(n, struct ipoib_mcast, rb_node);
140 
141 		ret = memcmp(mgid, mcast->mcmember.mgid.raw,
142 			     sizeof (union ib_gid));
143 		if (ret < 0)
144 			n = n->rb_left;
145 		else if (ret > 0)
146 			n = n->rb_right;
147 		else
148 			return mcast;
149 	}
150 
151 	return NULL;
152 }
153 
154 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
155 {
156 	struct ipoib_dev_priv *priv = netdev_priv(dev);
157 	struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
158 
159 	while (*n) {
160 		struct ipoib_mcast *tmcast;
161 		int ret;
162 
163 		pn = *n;
164 		tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
165 
166 		ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
167 			     sizeof (union ib_gid));
168 		if (ret < 0)
169 			n = &pn->rb_left;
170 		else if (ret > 0)
171 			n = &pn->rb_right;
172 		else
173 			return -EEXIST;
174 	}
175 
176 	rb_link_node(&mcast->rb_node, pn, n);
177 	rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
178 
179 	return 0;
180 }
181 
182 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
183 				   struct ib_sa_mcmember_rec *mcmember)
184 {
185 	struct net_device *dev = mcast->dev;
186 	struct ipoib_dev_priv *priv = netdev_priv(dev);
187 	struct ipoib_ah *ah;
188 	int ret;
189 	int set_qkey = 0;
190 
191 	mcast->mcmember = *mcmember;
192 
193 	/* Set the cached Q_Key before we attach if it's the broadcast group */
194 	if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
195 		    sizeof (union ib_gid))) {
196 		spin_lock_irq(&priv->lock);
197 		if (!priv->broadcast) {
198 			spin_unlock_irq(&priv->lock);
199 			return -EAGAIN;
200 		}
201 		priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
202 		spin_unlock_irq(&priv->lock);
203 		priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
204 		set_qkey = 1;
205 	}
206 
207 	if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
208 		if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
209 			ipoib_warn(priv, "multicast group %pI6 already attached\n",
210 				   mcast->mcmember.mgid.raw);
211 
212 			return 0;
213 		}
214 
215 		ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
216 					 &mcast->mcmember.mgid, set_qkey);
217 		if (ret < 0) {
218 			ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
219 				   mcast->mcmember.mgid.raw);
220 
221 			clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
222 			return ret;
223 		}
224 	}
225 
226 	{
227 		struct ib_ah_attr av = {
228 			.dlid	       = be16_to_cpu(mcast->mcmember.mlid),
229 			.port_num      = priv->port,
230 			.sl	       = mcast->mcmember.sl,
231 			.ah_flags      = IB_AH_GRH,
232 			.static_rate   = mcast->mcmember.rate,
233 			.grh	       = {
234 				.flow_label    = be32_to_cpu(mcast->mcmember.flow_label),
235 				.hop_limit     = mcast->mcmember.hop_limit,
236 				.sgid_index    = 0,
237 				.traffic_class = mcast->mcmember.traffic_class
238 			}
239 		};
240 		av.grh.dgid = mcast->mcmember.mgid;
241 
242 		ah = ipoib_create_ah(dev, priv->pd, &av);
243 		if (IS_ERR(ah)) {
244 			ipoib_warn(priv, "ib_address_create failed %ld\n",
245 				-PTR_ERR(ah));
246 			/* use original error */
247 			return PTR_ERR(ah);
248 		} else {
249 			spin_lock_irq(&priv->lock);
250 			mcast->ah = ah;
251 			spin_unlock_irq(&priv->lock);
252 
253 			ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
254 					mcast->mcmember.mgid.raw,
255 					mcast->ah->ah,
256 					be16_to_cpu(mcast->mcmember.mlid),
257 					mcast->mcmember.sl);
258 		}
259 	}
260 
261 	/* actually send any queued packets */
262 	netif_tx_lock_bh(dev);
263 	while (!skb_queue_empty(&mcast->pkt_queue)) {
264 		struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
265 
266 		netif_tx_unlock_bh(dev);
267 
268 		skb->dev = dev;
269 		if (dev_queue_xmit(skb))
270 			ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
271 
272 		netif_tx_lock_bh(dev);
273 	}
274 	netif_tx_unlock_bh(dev);
275 
276 	return 0;
277 }
278 
279 static int
280 ipoib_mcast_sendonly_join_complete(int status,
281 				   struct ib_sa_multicast *multicast)
282 {
283 	struct ipoib_mcast *mcast = multicast->context;
284 	struct net_device *dev = mcast->dev;
285 
286 	/* We trap for port events ourselves. */
287 	if (status == -ENETRESET)
288 		return 0;
289 
290 	if (!status)
291 		status = ipoib_mcast_join_finish(mcast, &multicast->rec);
292 
293 	if (status) {
294 		if (mcast->logcount++ < 20)
295 			ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
296 					mcast->mcmember.mgid.raw, status);
297 
298 		/* Flush out any queued packets */
299 		netif_tx_lock_bh(dev);
300 		while (!skb_queue_empty(&mcast->pkt_queue)) {
301 			++dev->stats.tx_dropped;
302 			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
303 		}
304 		netif_tx_unlock_bh(dev);
305 
306 		/* Clear the busy flag so we try again */
307 		status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
308 					    &mcast->flags);
309 	}
310 	return status;
311 }
312 
313 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
314 {
315 	struct net_device *dev = mcast->dev;
316 	struct ipoib_dev_priv *priv = netdev_priv(dev);
317 	struct ib_sa_mcmember_rec rec = {
318 #if 0				/* Some SMs don't support send-only yet */
319 		.join_state = 4
320 #else
321 		.join_state = 1
322 #endif
323 	};
324 	int ret = 0;
325 
326 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
327 		ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
328 		return -ENODEV;
329 	}
330 
331 	if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
332 		ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
333 		return -EBUSY;
334 	}
335 
336 	rec.mgid     = mcast->mcmember.mgid;
337 	rec.port_gid = priv->local_gid;
338 	rec.pkey     = cpu_to_be16(priv->pkey);
339 
340 	mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
341 					 priv->port, &rec,
342 					 IB_SA_MCMEMBER_REC_MGID	|
343 					 IB_SA_MCMEMBER_REC_PORT_GID	|
344 					 IB_SA_MCMEMBER_REC_PKEY	|
345 					 IB_SA_MCMEMBER_REC_JOIN_STATE,
346 					 GFP_ATOMIC,
347 					 ipoib_mcast_sendonly_join_complete,
348 					 mcast);
349 	if (IS_ERR(mcast->mc)) {
350 		ret = PTR_ERR(mcast->mc);
351 		clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
352 		ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
353 			   ret);
354 	} else {
355 		ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
356 				mcast->mcmember.mgid.raw);
357 	}
358 
359 	return ret;
360 }
361 
362 void ipoib_mcast_carrier_on_task(struct work_struct *work)
363 {
364 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
365 						   carrier_on_task);
366 	struct ib_port_attr attr;
367 
368 	/*
369 	 * Take rtnl_lock to avoid racing with ipoib_stop() and
370 	 * turning the carrier back on while a device is being
371 	 * removed.
372 	 */
373 	if (ib_query_port(priv->ca, priv->port, &attr) ||
374 	    attr.state != IB_PORT_ACTIVE) {
375 		ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
376 		return;
377 	}
378 
379 	rtnl_lock();
380 	netif_carrier_on(priv->dev);
381 	rtnl_unlock();
382 }
383 
384 static int ipoib_mcast_join_complete(int status,
385 				     struct ib_sa_multicast *multicast)
386 {
387 	struct ipoib_mcast *mcast = multicast->context;
388 	struct net_device *dev = mcast->dev;
389 	struct ipoib_dev_priv *priv = netdev_priv(dev);
390 
391 	ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
392 			mcast->mcmember.mgid.raw, status);
393 
394 	/* We trap for port events ourselves. */
395 	if (status == -ENETRESET)
396 		return 0;
397 
398 	if (!status)
399 		status = ipoib_mcast_join_finish(mcast, &multicast->rec);
400 
401 	if (!status) {
402 		mcast->backoff = 1;
403 		mutex_lock(&mcast_mutex);
404 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
405 			queue_delayed_work(ipoib_workqueue,
406 					   &priv->mcast_task, 0);
407 		mutex_unlock(&mcast_mutex);
408 
409 		/*
410 		 * Defer carrier on work to ipoib_workqueue to avoid a
411 		 * deadlock on rtnl_lock here.
412 		 */
413 		if (mcast == priv->broadcast)
414 			queue_work(ipoib_workqueue, &priv->carrier_on_task);
415 
416 		return 0;
417 	}
418 
419 	if (mcast->logcount++ < 20) {
420 		if (status == -ETIMEDOUT || status == -EAGAIN) {
421 			ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
422 					mcast->mcmember.mgid.raw, status);
423 		} else {
424 			ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
425 				   mcast->mcmember.mgid.raw, status);
426 		}
427 	}
428 
429 	mcast->backoff *= 2;
430 	if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
431 		mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
432 
433 	/* Clear the busy flag so we try again */
434 	status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
435 
436 	mutex_lock(&mcast_mutex);
437 	spin_lock_irq(&priv->lock);
438 	if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
439 		queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
440 				   mcast->backoff * HZ);
441 	spin_unlock_irq(&priv->lock);
442 	mutex_unlock(&mcast_mutex);
443 
444 	return status;
445 }
446 
447 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
448 			     int create)
449 {
450 	struct ipoib_dev_priv *priv = netdev_priv(dev);
451 	struct ib_sa_mcmember_rec rec = {
452 		.join_state = 1
453 	};
454 	ib_sa_comp_mask comp_mask;
455 	int ret = 0;
456 
457 	ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
458 
459 	rec.mgid     = mcast->mcmember.mgid;
460 	rec.port_gid = priv->local_gid;
461 	rec.pkey     = cpu_to_be16(priv->pkey);
462 
463 	comp_mask =
464 		IB_SA_MCMEMBER_REC_MGID		|
465 		IB_SA_MCMEMBER_REC_PORT_GID	|
466 		IB_SA_MCMEMBER_REC_PKEY		|
467 		IB_SA_MCMEMBER_REC_JOIN_STATE;
468 
469 	if (create) {
470 		comp_mask |=
471 			IB_SA_MCMEMBER_REC_QKEY			|
472 			IB_SA_MCMEMBER_REC_MTU_SELECTOR		|
473 			IB_SA_MCMEMBER_REC_MTU			|
474 			IB_SA_MCMEMBER_REC_TRAFFIC_CLASS	|
475 			IB_SA_MCMEMBER_REC_RATE_SELECTOR	|
476 			IB_SA_MCMEMBER_REC_RATE			|
477 			IB_SA_MCMEMBER_REC_SL			|
478 			IB_SA_MCMEMBER_REC_FLOW_LABEL		|
479 			IB_SA_MCMEMBER_REC_HOP_LIMIT;
480 
481 		rec.qkey	  = priv->broadcast->mcmember.qkey;
482 		rec.mtu_selector  = IB_SA_EQ;
483 		rec.mtu		  = priv->broadcast->mcmember.mtu;
484 		rec.traffic_class = priv->broadcast->mcmember.traffic_class;
485 		rec.rate_selector = IB_SA_EQ;
486 		rec.rate	  = priv->broadcast->mcmember.rate;
487 		rec.sl		  = priv->broadcast->mcmember.sl;
488 		rec.flow_label	  = priv->broadcast->mcmember.flow_label;
489 		rec.hop_limit	  = priv->broadcast->mcmember.hop_limit;
490 	}
491 
492 	set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
493 	mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
494 					 &rec, comp_mask, GFP_KERNEL,
495 					 ipoib_mcast_join_complete, mcast);
496 	if (IS_ERR(mcast->mc)) {
497 		clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
498 		ret = PTR_ERR(mcast->mc);
499 		ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
500 
501 		mcast->backoff *= 2;
502 		if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
503 			mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
504 
505 		mutex_lock(&mcast_mutex);
506 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
507 			queue_delayed_work(ipoib_workqueue,
508 					   &priv->mcast_task,
509 					   mcast->backoff * HZ);
510 		mutex_unlock(&mcast_mutex);
511 	}
512 }
513 
514 void ipoib_mcast_join_task(struct work_struct *work)
515 {
516 	struct ipoib_dev_priv *priv =
517 		container_of(work, struct ipoib_dev_priv, mcast_task.work);
518 	struct net_device *dev = priv->dev;
519 
520 	if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
521 		return;
522 
523 	if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
524 		ipoib_warn(priv, "ib_query_gid() failed\n");
525 	else
526 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
527 
528 	{
529 		struct ib_port_attr attr;
530 
531 		if (!ib_query_port(priv->ca, priv->port, &attr))
532 			priv->local_lid = attr.lid;
533 		else
534 			ipoib_warn(priv, "ib_query_port failed\n");
535 	}
536 
537 	if (!priv->broadcast) {
538 		struct ipoib_mcast *broadcast;
539 
540 		if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
541 			return;
542 
543 		broadcast = ipoib_mcast_alloc(dev, 1);
544 		if (!broadcast) {
545 			ipoib_warn(priv, "failed to allocate broadcast group\n");
546 			mutex_lock(&mcast_mutex);
547 			if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
548 				queue_delayed_work(ipoib_workqueue,
549 						   &priv->mcast_task, HZ);
550 			mutex_unlock(&mcast_mutex);
551 			return;
552 		}
553 
554 		spin_lock_irq(&priv->lock);
555 		memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
556 		       sizeof (union ib_gid));
557 		priv->broadcast = broadcast;
558 
559 		__ipoib_mcast_add(dev, priv->broadcast);
560 		spin_unlock_irq(&priv->lock);
561 	}
562 
563 	if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
564 		if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
565 			ipoib_mcast_join(dev, priv->broadcast, 0);
566 		return;
567 	}
568 
569 	while (1) {
570 		struct ipoib_mcast *mcast = NULL;
571 
572 		spin_lock_irq(&priv->lock);
573 		list_for_each_entry(mcast, &priv->multicast_list, list) {
574 			if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
575 			    && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
576 			    && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
577 				/* Found the next unjoined group */
578 				break;
579 			}
580 		}
581 		spin_unlock_irq(&priv->lock);
582 
583 		if (&mcast->list == &priv->multicast_list) {
584 			/* All done */
585 			break;
586 		}
587 
588 		ipoib_mcast_join(dev, mcast, 1);
589 		return;
590 	}
591 
592 	priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
593 
594 	if (!ipoib_cm_admin_enabled(dev)) {
595 		rtnl_lock();
596 		dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
597 		rtnl_unlock();
598 	}
599 
600 	ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
601 
602 	clear_bit(IPOIB_MCAST_RUN, &priv->flags);
603 }
604 
605 int ipoib_mcast_start_thread(struct net_device *dev)
606 {
607 	struct ipoib_dev_priv *priv = netdev_priv(dev);
608 
609 	ipoib_dbg_mcast(priv, "starting multicast thread\n");
610 
611 	mutex_lock(&mcast_mutex);
612 	if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
613 		queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
614 	mutex_unlock(&mcast_mutex);
615 
616 	return 0;
617 }
618 
619 int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
620 {
621 	struct ipoib_dev_priv *priv = netdev_priv(dev);
622 
623 	ipoib_dbg_mcast(priv, "stopping multicast thread\n");
624 
625 	mutex_lock(&mcast_mutex);
626 	clear_bit(IPOIB_MCAST_RUN, &priv->flags);
627 	cancel_delayed_work(&priv->mcast_task);
628 	mutex_unlock(&mcast_mutex);
629 
630 	if (flush)
631 		flush_workqueue(ipoib_workqueue);
632 
633 	return 0;
634 }
635 
636 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
637 {
638 	struct ipoib_dev_priv *priv = netdev_priv(dev);
639 	int ret = 0;
640 
641 	if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
642 		ib_sa_free_multicast(mcast->mc);
643 
644 	if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
645 		ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
646 				mcast->mcmember.mgid.raw);
647 
648 		/* Remove ourselves from the multicast group */
649 		ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
650 				      be16_to_cpu(mcast->mcmember.mlid));
651 		if (ret)
652 			ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
653 	}
654 
655 	return 0;
656 }
657 
658 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
659 {
660 	struct ipoib_dev_priv *priv = netdev_priv(dev);
661 	struct ipoib_mcast *mcast;
662 	unsigned long flags;
663 
664 	spin_lock_irqsave(&priv->lock, flags);
665 
666 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
667 	    !priv->broadcast					||
668 	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
669 		++dev->stats.tx_dropped;
670 		dev_kfree_skb_any(skb);
671 		goto unlock;
672 	}
673 
674 	mcast = __ipoib_mcast_find(dev, mgid);
675 	if (!mcast) {
676 		/* Let's create a new send only group now */
677 		ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
678 				mgid);
679 
680 		mcast = ipoib_mcast_alloc(dev, 0);
681 		if (!mcast) {
682 			ipoib_warn(priv, "unable to allocate memory for "
683 				   "multicast structure\n");
684 			++dev->stats.tx_dropped;
685 			dev_kfree_skb_any(skb);
686 			goto out;
687 		}
688 
689 		set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
690 		memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
691 		__ipoib_mcast_add(dev, mcast);
692 		list_add_tail(&mcast->list, &priv->multicast_list);
693 	}
694 
695 	if (!mcast->ah) {
696 		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
697 			skb_queue_tail(&mcast->pkt_queue, skb);
698 		else {
699 			++dev->stats.tx_dropped;
700 			dev_kfree_skb_any(skb);
701 		}
702 
703 		if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
704 			ipoib_dbg_mcast(priv, "no address vector, "
705 					"but multicast join already started\n");
706 		else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
707 			ipoib_mcast_sendonly_join(mcast);
708 
709 		/*
710 		 * If lookup completes between here and out:, don't
711 		 * want to send packet twice.
712 		 */
713 		mcast = NULL;
714 	}
715 
716 out:
717 	if (mcast && mcast->ah) {
718 		struct dst_entry *dst = skb_dst(skb);
719 		struct neighbour *n = NULL;
720 
721 		rcu_read_lock();
722 		if (dst)
723 			n = dst_get_neighbour_noref(dst);
724 		if (n && !*to_ipoib_neigh(n)) {
725 			struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
726 								      skb->dev);
727 
728 			if (neigh) {
729 				kref_get(&mcast->ah->ref);
730 				neigh->ah	= mcast->ah;
731 				list_add_tail(&neigh->list, &mcast->neigh_list);
732 			}
733 		}
734 		rcu_read_unlock();
735 		spin_unlock_irqrestore(&priv->lock, flags);
736 		ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
737 		return;
738 	}
739 
740 unlock:
741 	spin_unlock_irqrestore(&priv->lock, flags);
742 }
743 
744 void ipoib_mcast_dev_flush(struct net_device *dev)
745 {
746 	struct ipoib_dev_priv *priv = netdev_priv(dev);
747 	LIST_HEAD(remove_list);
748 	struct ipoib_mcast *mcast, *tmcast;
749 	unsigned long flags;
750 
751 	ipoib_dbg_mcast(priv, "flushing multicast list\n");
752 
753 	spin_lock_irqsave(&priv->lock, flags);
754 
755 	list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
756 		list_del(&mcast->list);
757 		rb_erase(&mcast->rb_node, &priv->multicast_tree);
758 		list_add_tail(&mcast->list, &remove_list);
759 	}
760 
761 	if (priv->broadcast) {
762 		rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
763 		list_add_tail(&priv->broadcast->list, &remove_list);
764 		priv->broadcast = NULL;
765 	}
766 
767 	spin_unlock_irqrestore(&priv->lock, flags);
768 
769 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
770 		ipoib_mcast_leave(dev, mcast);
771 		ipoib_mcast_free(mcast);
772 	}
773 }
774 
775 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
776 {
777 	/* reserved QPN, prefix, scope */
778 	if (memcmp(addr, broadcast, 6))
779 		return 0;
780 	/* signature lower, pkey */
781 	if (memcmp(addr + 7, broadcast + 7, 3))
782 		return 0;
783 	return 1;
784 }
785 
786 void ipoib_mcast_restart_task(struct work_struct *work)
787 {
788 	struct ipoib_dev_priv *priv =
789 		container_of(work, struct ipoib_dev_priv, restart_task);
790 	struct net_device *dev = priv->dev;
791 	struct netdev_hw_addr *ha;
792 	struct ipoib_mcast *mcast, *tmcast;
793 	LIST_HEAD(remove_list);
794 	unsigned long flags;
795 	struct ib_sa_mcmember_rec rec;
796 
797 	ipoib_dbg_mcast(priv, "restarting multicast task\n");
798 
799 	ipoib_mcast_stop_thread(dev, 0);
800 
801 	local_irq_save(flags);
802 	netif_addr_lock(dev);
803 	spin_lock(&priv->lock);
804 
805 	/*
806 	 * Unfortunately, the networking core only gives us a list of all of
807 	 * the multicast hardware addresses. We need to figure out which ones
808 	 * are new and which ones have been removed
809 	 */
810 
811 	/* Clear out the found flag */
812 	list_for_each_entry(mcast, &priv->multicast_list, list)
813 		clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
814 
815 	/* Mark all of the entries that are found or don't exist */
816 	netdev_for_each_mc_addr(ha, dev) {
817 		union ib_gid mgid;
818 
819 		if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
820 			continue;
821 
822 		memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
823 
824 		mcast = __ipoib_mcast_find(dev, &mgid);
825 		if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
826 			struct ipoib_mcast *nmcast;
827 
828 			/* ignore group which is directly joined by userspace */
829 			if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
830 			    !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
831 				ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
832 						mgid.raw);
833 				continue;
834 			}
835 
836 			/* Not found or send-only group, let's add a new entry */
837 			ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
838 					mgid.raw);
839 
840 			nmcast = ipoib_mcast_alloc(dev, 0);
841 			if (!nmcast) {
842 				ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
843 				continue;
844 			}
845 
846 			set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
847 
848 			nmcast->mcmember.mgid = mgid;
849 
850 			if (mcast) {
851 				/* Destroy the send only entry */
852 				list_move_tail(&mcast->list, &remove_list);
853 
854 				rb_replace_node(&mcast->rb_node,
855 						&nmcast->rb_node,
856 						&priv->multicast_tree);
857 			} else
858 				__ipoib_mcast_add(dev, nmcast);
859 
860 			list_add_tail(&nmcast->list, &priv->multicast_list);
861 		}
862 
863 		if (mcast)
864 			set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
865 	}
866 
867 	/* Remove all of the entries don't exist anymore */
868 	list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
869 		if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
870 		    !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
871 			ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
872 					mcast->mcmember.mgid.raw);
873 
874 			rb_erase(&mcast->rb_node, &priv->multicast_tree);
875 
876 			/* Move to the remove list */
877 			list_move_tail(&mcast->list, &remove_list);
878 		}
879 	}
880 
881 	spin_unlock(&priv->lock);
882 	netif_addr_unlock(dev);
883 	local_irq_restore(flags);
884 
885 	/* We have to cancel outside of the spinlock */
886 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
887 		ipoib_mcast_leave(mcast->dev, mcast);
888 		ipoib_mcast_free(mcast);
889 	}
890 
891 	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
892 		ipoib_mcast_start_thread(dev);
893 }
894 
895 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
896 
897 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
898 {
899 	struct ipoib_mcast_iter *iter;
900 
901 	iter = kmalloc(sizeof *iter, GFP_KERNEL);
902 	if (!iter)
903 		return NULL;
904 
905 	iter->dev = dev;
906 	memset(iter->mgid.raw, 0, 16);
907 
908 	if (ipoib_mcast_iter_next(iter)) {
909 		kfree(iter);
910 		return NULL;
911 	}
912 
913 	return iter;
914 }
915 
916 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
917 {
918 	struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
919 	struct rb_node *n;
920 	struct ipoib_mcast *mcast;
921 	int ret = 1;
922 
923 	spin_lock_irq(&priv->lock);
924 
925 	n = rb_first(&priv->multicast_tree);
926 
927 	while (n) {
928 		mcast = rb_entry(n, struct ipoib_mcast, rb_node);
929 
930 		if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
931 			   sizeof (union ib_gid)) < 0) {
932 			iter->mgid      = mcast->mcmember.mgid;
933 			iter->created   = mcast->created;
934 			iter->queuelen  = skb_queue_len(&mcast->pkt_queue);
935 			iter->complete  = !!mcast->ah;
936 			iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
937 
938 			ret = 0;
939 
940 			break;
941 		}
942 
943 		n = rb_next(n);
944 	}
945 
946 	spin_unlock_irq(&priv->lock);
947 
948 	return ret;
949 }
950 
951 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
952 			   union ib_gid *mgid,
953 			   unsigned long *created,
954 			   unsigned int *queuelen,
955 			   unsigned int *complete,
956 			   unsigned int *send_only)
957 {
958 	*mgid      = iter->mgid;
959 	*created   = iter->created;
960 	*queuelen  = iter->queuelen;
961 	*complete  = iter->complete;
962 	*send_only = iter->send_only;
963 }
964 
965 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
966