1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/skbuff.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/moduleparam.h>
38 #include <linux/ip.h>
39 #include <linux/in.h>
40 #include <linux/igmp.h>
41 #include <linux/inetdevice.h>
42 #include <linux/delay.h>
43 #include <linux/completion.h>
44 #include <linux/slab.h>
45
46 #include <net/dst.h>
47
48 #include "ipoib.h"
49
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
52
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
56 #endif
57
58 struct ipoib_mcast_iter {
59 struct net_device *dev;
60 union ib_gid mgid;
61 unsigned long created;
62 unsigned int queuelen;
63 unsigned int complete;
64 unsigned int send_only;
65 };
66
67 /* join state that allows creating mcg with sendonly member request */
68 #define SENDONLY_FULLMEMBER_JOIN 8
69
70 /*
71 * This should be called with the priv->lock held
72 */
__ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv * priv,struct ipoib_mcast * mcast,bool delay)73 static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
74 struct ipoib_mcast *mcast,
75 bool delay)
76 {
77 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
78 return;
79
80 /*
81 * We will be scheduling *something*, so cancel whatever is
82 * currently scheduled first
83 */
84 cancel_delayed_work(&priv->mcast_task);
85 if (mcast && delay) {
86 /*
87 * We had a failure and want to schedule a retry later
88 */
89 mcast->backoff *= 2;
90 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
91 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
92 mcast->delay_until = jiffies + (mcast->backoff * HZ);
93 /*
94 * Mark this mcast for its delay, but restart the
95 * task immediately. The join task will make sure to
96 * clear out all entries without delays, and then
97 * schedule itself to run again when the earliest
98 * delay expires
99 */
100 queue_delayed_work(priv->wq, &priv->mcast_task, 0);
101 } else if (delay) {
102 /*
103 * Special case of retrying after a failure to
104 * allocate the broadcast multicast group, wait
105 * 1 second and try again
106 */
107 queue_delayed_work(priv->wq, &priv->mcast_task, HZ);
108 } else
109 queue_delayed_work(priv->wq, &priv->mcast_task, 0);
110 }
111
ipoib_mcast_free(struct ipoib_mcast * mcast)112 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
113 {
114 struct net_device *dev = mcast->dev;
115 int tx_dropped = 0;
116
117 ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n",
118 mcast->mcmember.mgid.raw);
119
120 /* remove all neigh connected to this mcast */
121 ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
122
123 if (mcast->ah)
124 ipoib_put_ah(mcast->ah);
125
126 while (!skb_queue_empty(&mcast->pkt_queue)) {
127 ++tx_dropped;
128 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
129 }
130
131 netif_tx_lock_bh(dev);
132 dev->stats.tx_dropped += tx_dropped;
133 netif_tx_unlock_bh(dev);
134
135 kfree(mcast);
136 }
137
ipoib_mcast_alloc(struct net_device * dev)138 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
139 {
140 struct ipoib_mcast *mcast;
141
142 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
143 if (!mcast)
144 return NULL;
145
146 mcast->dev = dev;
147 mcast->created = jiffies;
148 mcast->delay_until = jiffies;
149 mcast->backoff = 1;
150
151 INIT_LIST_HEAD(&mcast->list);
152 INIT_LIST_HEAD(&mcast->neigh_list);
153 skb_queue_head_init(&mcast->pkt_queue);
154
155 return mcast;
156 }
157
__ipoib_mcast_find(struct net_device * dev,void * mgid)158 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
159 {
160 struct ipoib_dev_priv *priv = ipoib_priv(dev);
161 struct rb_node *n = priv->multicast_tree.rb_node;
162
163 while (n) {
164 struct ipoib_mcast *mcast;
165 int ret;
166
167 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
168
169 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
170 sizeof (union ib_gid));
171 if (ret < 0)
172 n = n->rb_left;
173 else if (ret > 0)
174 n = n->rb_right;
175 else
176 return mcast;
177 }
178
179 return NULL;
180 }
181
__ipoib_mcast_add(struct net_device * dev,struct ipoib_mcast * mcast)182 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
183 {
184 struct ipoib_dev_priv *priv = ipoib_priv(dev);
185 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
186
187 while (*n) {
188 struct ipoib_mcast *tmcast;
189 int ret;
190
191 pn = *n;
192 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
193
194 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
195 sizeof (union ib_gid));
196 if (ret < 0)
197 n = &pn->rb_left;
198 else if (ret > 0)
199 n = &pn->rb_right;
200 else
201 return -EEXIST;
202 }
203
204 rb_link_node(&mcast->rb_node, pn, n);
205 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
206
207 return 0;
208 }
209
ipoib_mcast_join_finish(struct ipoib_mcast * mcast,struct ib_sa_mcmember_rec * mcmember)210 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
211 struct ib_sa_mcmember_rec *mcmember)
212 {
213 struct net_device *dev = mcast->dev;
214 struct ipoib_dev_priv *priv = ipoib_priv(dev);
215 struct rdma_netdev *rn = netdev_priv(dev);
216 struct ipoib_ah *ah;
217 struct rdma_ah_attr av;
218 int ret;
219 int set_qkey = 0;
220 int mtu;
221
222 mcast->mcmember = *mcmember;
223
224 /* Set the multicast MTU and cached Q_Key before we attach if it's
225 * the broadcast group.
226 */
227 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
228 sizeof (union ib_gid))) {
229 spin_lock_irq(&priv->lock);
230 if (!priv->broadcast) {
231 spin_unlock_irq(&priv->lock);
232 return -EAGAIN;
233 }
234 /*update priv member according to the new mcast*/
235 priv->broadcast->mcmember.qkey = mcmember->qkey;
236 priv->broadcast->mcmember.mtu = mcmember->mtu;
237 priv->broadcast->mcmember.traffic_class = mcmember->traffic_class;
238 priv->broadcast->mcmember.rate = mcmember->rate;
239 priv->broadcast->mcmember.sl = mcmember->sl;
240 priv->broadcast->mcmember.flow_label = mcmember->flow_label;
241 priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
242 /* assume if the admin and the mcast are the same both can be changed */
243 mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
244 priv->broadcast->mcmember.mtu);
245 if (priv->mcast_mtu == priv->admin_mtu)
246 priv->admin_mtu = IPOIB_UD_MTU(mtu);
247 priv->mcast_mtu = IPOIB_UD_MTU(mtu);
248 rn->mtu = priv->mcast_mtu;
249
250 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
251 spin_unlock_irq(&priv->lock);
252 priv->tx_wr.remote_qkey = priv->qkey;
253 set_qkey = 1;
254 }
255
256 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
257 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
258 ipoib_warn(priv, "multicast group %pI6 already attached\n",
259 mcast->mcmember.mgid.raw);
260
261 return 0;
262 }
263
264 ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
265 be16_to_cpu(mcast->mcmember.mlid),
266 set_qkey, priv->qkey);
267 if (ret < 0) {
268 ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
269 mcast->mcmember.mgid.raw);
270
271 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
272 return ret;
273 }
274 }
275
276 memset(&av, 0, sizeof(av));
277 av.type = rdma_ah_find_type(priv->ca, priv->port);
278 rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid));
279 rdma_ah_set_port_num(&av, priv->port);
280 rdma_ah_set_sl(&av, mcast->mcmember.sl);
281 rdma_ah_set_static_rate(&av, mcast->mcmember.rate);
282
283 rdma_ah_set_grh(&av, &mcast->mcmember.mgid,
284 be32_to_cpu(mcast->mcmember.flow_label),
285 0, mcast->mcmember.hop_limit,
286 mcast->mcmember.traffic_class);
287
288 ah = ipoib_create_ah(dev, priv->pd, &av);
289 if (IS_ERR(ah)) {
290 ipoib_warn(priv, "ib_address_create failed %ld\n",
291 -PTR_ERR(ah));
292 /* use original error */
293 return PTR_ERR(ah);
294 }
295 spin_lock_irq(&priv->lock);
296 mcast->ah = ah;
297 spin_unlock_irq(&priv->lock);
298
299 ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
300 mcast->mcmember.mgid.raw,
301 mcast->ah->ah,
302 be16_to_cpu(mcast->mcmember.mlid),
303 mcast->mcmember.sl);
304
305 /* actually send any queued packets */
306 netif_tx_lock_bh(dev);
307 while (!skb_queue_empty(&mcast->pkt_queue)) {
308 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
309
310 netif_tx_unlock_bh(dev);
311
312 skb->dev = dev;
313
314 ret = dev_queue_xmit(skb);
315 if (ret)
316 ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
317 __func__, ret);
318 netif_tx_lock_bh(dev);
319 }
320 netif_tx_unlock_bh(dev);
321
322 return 0;
323 }
324
ipoib_mcast_carrier_on_task(struct work_struct * work)325 void ipoib_mcast_carrier_on_task(struct work_struct *work)
326 {
327 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
328 carrier_on_task);
329 struct ib_port_attr attr;
330
331 if (ib_query_port(priv->ca, priv->port, &attr) ||
332 attr.state != IB_PORT_ACTIVE) {
333 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
334 return;
335 }
336 /*
337 * Take rtnl_lock to avoid racing with ipoib_stop() and
338 * turning the carrier back on while a device is being
339 * removed. However, ipoib_stop() will attempt to flush
340 * the workqueue while holding the rtnl lock, so loop
341 * on trylock until either we get the lock or we see
342 * FLAG_OPER_UP go away as that signals that we are bailing
343 * and can safely ignore the carrier on work.
344 */
345 while (!rtnl_trylock()) {
346 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
347 return;
348 else
349 msleep(20);
350 }
351 if (!ipoib_cm_admin_enabled(priv->dev))
352 dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
353 netif_carrier_on(priv->dev);
354 rtnl_unlock();
355 }
356
ipoib_mcast_join_complete(int status,struct ib_sa_multicast * multicast)357 static int ipoib_mcast_join_complete(int status,
358 struct ib_sa_multicast *multicast)
359 {
360 struct ipoib_mcast *mcast = multicast->context;
361 struct net_device *dev = mcast->dev;
362 struct ipoib_dev_priv *priv = ipoib_priv(dev);
363
364 ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
365 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
366 "sendonly " : "",
367 mcast->mcmember.mgid.raw, status);
368
369 /* We trap for port events ourselves. */
370 if (status == -ENETRESET) {
371 status = 0;
372 goto out;
373 }
374
375 if (!status)
376 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
377
378 if (!status) {
379 mcast->backoff = 1;
380 mcast->delay_until = jiffies;
381
382 /*
383 * Defer carrier on work to priv->wq to avoid a
384 * deadlock on rtnl_lock here. Requeue our multicast
385 * work too, which will end up happening right after
386 * our carrier on task work and will allow us to
387 * send out all of the non-broadcast joins
388 */
389 if (mcast == priv->broadcast) {
390 spin_lock_irq(&priv->lock);
391 queue_work(priv->wq, &priv->carrier_on_task);
392 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
393 goto out_locked;
394 }
395 } else {
396 bool silent_fail =
397 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
398 status == -EINVAL;
399
400 if (mcast->logcount < 20) {
401 if (status == -ETIMEDOUT || status == -EAGAIN ||
402 silent_fail) {
403 ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
404 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
405 mcast->mcmember.mgid.raw, status);
406 } else {
407 ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n",
408 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
409 mcast->mcmember.mgid.raw, status);
410 }
411
412 if (!silent_fail)
413 mcast->logcount++;
414 }
415
416 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
417 mcast->backoff >= 2) {
418 /*
419 * We only retry sendonly joins once before we drop
420 * the packet and quit trying to deal with the
421 * group. However, we leave the group in the
422 * mcast list as an unjoined group. If we want to
423 * try joining again, we simply queue up a packet
424 * and restart the join thread. The empty queue
425 * is why the join thread ignores this group.
426 */
427 mcast->backoff = 1;
428 netif_tx_lock_bh(dev);
429 while (!skb_queue_empty(&mcast->pkt_queue)) {
430 ++dev->stats.tx_dropped;
431 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
432 }
433 netif_tx_unlock_bh(dev);
434 } else {
435 spin_lock_irq(&priv->lock);
436 /* Requeue this join task with a backoff delay */
437 __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
438 goto out_locked;
439 }
440 }
441 out:
442 spin_lock_irq(&priv->lock);
443 out_locked:
444 /*
445 * Make sure to set mcast->mc before we clear the busy flag to avoid
446 * racing with code that checks for BUSY before checking mcast->mc
447 */
448 if (status)
449 mcast->mc = NULL;
450 else
451 mcast->mc = multicast;
452 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
453 spin_unlock_irq(&priv->lock);
454 complete(&mcast->done);
455
456 return status;
457 }
458
459 /*
460 * Caller must hold 'priv->lock'
461 */
ipoib_mcast_join(struct net_device * dev,struct ipoib_mcast * mcast)462 static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
463 {
464 struct ipoib_dev_priv *priv = ipoib_priv(dev);
465 struct ib_sa_multicast *multicast;
466 struct ib_sa_mcmember_rec rec = {
467 .join_state = 1
468 };
469 ib_sa_comp_mask comp_mask;
470 int ret = 0;
471
472 if (!priv->broadcast ||
473 !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
474 return -EINVAL;
475
476 init_completion(&mcast->done);
477 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
478
479 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
480
481 rec.mgid = mcast->mcmember.mgid;
482 rec.port_gid = priv->local_gid;
483 rec.pkey = cpu_to_be16(priv->pkey);
484
485 comp_mask =
486 IB_SA_MCMEMBER_REC_MGID |
487 IB_SA_MCMEMBER_REC_PORT_GID |
488 IB_SA_MCMEMBER_REC_PKEY |
489 IB_SA_MCMEMBER_REC_JOIN_STATE;
490
491 if (mcast != priv->broadcast) {
492 /*
493 * RFC 4391:
494 * The MGID MUST use the same P_Key, Q_Key, SL, MTU,
495 * and HopLimit as those used in the broadcast-GID. The rest
496 * of attributes SHOULD follow the values used in the
497 * broadcast-GID as well.
498 */
499 comp_mask |=
500 IB_SA_MCMEMBER_REC_QKEY |
501 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
502 IB_SA_MCMEMBER_REC_MTU |
503 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
504 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
505 IB_SA_MCMEMBER_REC_RATE |
506 IB_SA_MCMEMBER_REC_SL |
507 IB_SA_MCMEMBER_REC_FLOW_LABEL |
508 IB_SA_MCMEMBER_REC_HOP_LIMIT;
509
510 rec.qkey = priv->broadcast->mcmember.qkey;
511 rec.mtu_selector = IB_SA_EQ;
512 rec.mtu = priv->broadcast->mcmember.mtu;
513 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
514 rec.rate_selector = IB_SA_EQ;
515 rec.rate = priv->broadcast->mcmember.rate;
516 rec.sl = priv->broadcast->mcmember.sl;
517 rec.flow_label = priv->broadcast->mcmember.flow_label;
518 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
519
520 /*
521 * Send-only IB Multicast joins work at the core IB layer but
522 * require specific SM support.
523 * We can use such joins here only if the current SM supports that feature.
524 * However, if not, we emulate an Ethernet multicast send,
525 * which does not require a multicast subscription and will
526 * still send properly. The most appropriate thing to
527 * do is to create the group if it doesn't exist as that
528 * most closely emulates the behavior, from a user space
529 * application perspective, of Ethernet multicast operation.
530 */
531 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
532 rec.join_state = SENDONLY_FULLMEMBER_JOIN;
533 }
534
535 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
536 &rec, comp_mask, GFP_ATOMIC,
537 ipoib_mcast_join_complete, mcast);
538 if (IS_ERR(multicast)) {
539 ret = PTR_ERR(multicast);
540 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
541 /* Requeue this join task with a backoff delay */
542 __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
543 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
544 complete(&mcast->done);
545 return ret;
546 }
547 return 0;
548 }
549
ipoib_mcast_join_task(struct work_struct * work)550 void ipoib_mcast_join_task(struct work_struct *work)
551 {
552 struct ipoib_dev_priv *priv =
553 container_of(work, struct ipoib_dev_priv, mcast_task.work);
554 struct net_device *dev = priv->dev;
555 struct ib_port_attr port_attr;
556 unsigned long delay_until = 0;
557 struct ipoib_mcast *mcast = NULL;
558
559 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
560 return;
561
562 if (ib_query_port(priv->ca, priv->port, &port_attr)) {
563 ipoib_dbg(priv, "ib_query_port() failed\n");
564 return;
565 }
566 if (port_attr.state != IB_PORT_ACTIVE) {
567 ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
568 port_attr.state);
569 return;
570 }
571 priv->local_lid = port_attr.lid;
572 netif_addr_lock_bh(dev);
573
574 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
575 netif_addr_unlock_bh(dev);
576 return;
577 }
578 netif_addr_unlock_bh(dev);
579
580 spin_lock_irq(&priv->lock);
581 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
582 goto out;
583
584 if (!priv->broadcast) {
585 struct ipoib_mcast *broadcast;
586
587 broadcast = ipoib_mcast_alloc(dev);
588 if (!broadcast) {
589 ipoib_warn(priv, "failed to allocate broadcast group\n");
590 /*
591 * Restart us after a 1 second delay to retry
592 * creating our broadcast group and attaching to
593 * it. Until this succeeds, this ipoib dev is
594 * completely stalled (multicast wise).
595 */
596 __ipoib_mcast_schedule_join_thread(priv, NULL, 1);
597 goto out;
598 }
599
600 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
601 sizeof (union ib_gid));
602 priv->broadcast = broadcast;
603
604 __ipoib_mcast_add(dev, priv->broadcast);
605 }
606
607 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
608 if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
609 !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
610 mcast = priv->broadcast;
611 if (mcast->backoff > 1 &&
612 time_before(jiffies, mcast->delay_until)) {
613 delay_until = mcast->delay_until;
614 mcast = NULL;
615 }
616 }
617 goto out;
618 }
619
620 /*
621 * We'll never get here until the broadcast group is both allocated
622 * and attached
623 */
624 list_for_each_entry(mcast, &priv->multicast_list, list) {
625 if (IS_ERR_OR_NULL(mcast->mc) &&
626 !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
627 (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ||
628 !skb_queue_empty(&mcast->pkt_queue))) {
629 if (mcast->backoff == 1 ||
630 time_after_eq(jiffies, mcast->delay_until)) {
631 /* Found the next unjoined group */
632 if (ipoib_mcast_join(dev, mcast)) {
633 spin_unlock_irq(&priv->lock);
634 return;
635 }
636 } else if (!delay_until ||
637 time_before(mcast->delay_until, delay_until))
638 delay_until = mcast->delay_until;
639 }
640 }
641
642 mcast = NULL;
643 ipoib_dbg_mcast(priv, "successfully started all multicast joins\n");
644
645 out:
646 if (delay_until) {
647 cancel_delayed_work(&priv->mcast_task);
648 queue_delayed_work(priv->wq, &priv->mcast_task,
649 delay_until - jiffies);
650 }
651 if (mcast)
652 ipoib_mcast_join(dev, mcast);
653
654 spin_unlock_irq(&priv->lock);
655 }
656
ipoib_mcast_start_thread(struct net_device * dev)657 void ipoib_mcast_start_thread(struct net_device *dev)
658 {
659 struct ipoib_dev_priv *priv = ipoib_priv(dev);
660 unsigned long flags;
661
662 ipoib_dbg_mcast(priv, "starting multicast thread\n");
663
664 spin_lock_irqsave(&priv->lock, flags);
665 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
666 spin_unlock_irqrestore(&priv->lock, flags);
667 }
668
ipoib_mcast_stop_thread(struct net_device * dev)669 void ipoib_mcast_stop_thread(struct net_device *dev)
670 {
671 struct ipoib_dev_priv *priv = ipoib_priv(dev);
672
673 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
674
675 cancel_delayed_work_sync(&priv->mcast_task);
676 }
677
ipoib_mcast_leave(struct net_device * dev,struct ipoib_mcast * mcast)678 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
679 {
680 struct ipoib_dev_priv *priv = ipoib_priv(dev);
681 struct rdma_netdev *rn = netdev_priv(dev);
682 int ret = 0;
683
684 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
685 ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
686
687 if (!IS_ERR_OR_NULL(mcast->mc))
688 ib_sa_free_multicast(mcast->mc);
689
690 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
691 ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
692 mcast->mcmember.mgid.raw);
693
694 /* Remove ourselves from the multicast group */
695 ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
696 be16_to_cpu(mcast->mcmember.mlid));
697 if (ret)
698 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
699 } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
700 ipoib_dbg(priv, "leaving with no mcmember but not a "
701 "SENDONLY join\n");
702
703 return 0;
704 }
705
706 /*
707 * Check if the multicast group is sendonly. If so remove it from the maps
708 * and add to the remove list
709 */
ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv * priv,u8 * mgid,struct list_head * remove_list)710 void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
711 struct list_head *remove_list)
712 {
713 /* Is this multicast ? */
714 if (*mgid == 0xff) {
715 struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid);
716
717 if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
718 list_del(&mcast->list);
719 rb_erase(&mcast->rb_node, &priv->multicast_tree);
720 list_add_tail(&mcast->list, remove_list);
721 }
722 }
723 }
724
ipoib_mcast_remove_list(struct list_head * remove_list)725 void ipoib_mcast_remove_list(struct list_head *remove_list)
726 {
727 struct ipoib_mcast *mcast, *tmcast;
728
729 /*
730 * make sure the in-flight joins have finished before we attempt
731 * to leave
732 */
733 list_for_each_entry_safe(mcast, tmcast, remove_list, list)
734 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
735 wait_for_completion(&mcast->done);
736
737 list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
738 ipoib_mcast_leave(mcast->dev, mcast);
739 ipoib_mcast_free(mcast);
740 }
741 }
742
ipoib_mcast_send(struct net_device * dev,u8 * daddr,struct sk_buff * skb)743 void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
744 {
745 struct ipoib_dev_priv *priv = ipoib_priv(dev);
746 struct rdma_netdev *rn = netdev_priv(dev);
747 struct ipoib_mcast *mcast;
748 unsigned long flags;
749 void *mgid = daddr + 4;
750
751 spin_lock_irqsave(&priv->lock, flags);
752
753 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
754 !priv->broadcast ||
755 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
756 ++dev->stats.tx_dropped;
757 dev_kfree_skb_any(skb);
758 goto unlock;
759 }
760
761 mcast = __ipoib_mcast_find(dev, mgid);
762 if (!mcast || !mcast->ah) {
763 if (!mcast) {
764 /* Let's create a new send only group now */
765 ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
766 mgid);
767
768 mcast = ipoib_mcast_alloc(dev);
769 if (!mcast) {
770 ipoib_warn(priv, "unable to allocate memory "
771 "for multicast structure\n");
772 ++dev->stats.tx_dropped;
773 dev_kfree_skb_any(skb);
774 goto unlock;
775 }
776
777 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
778 memcpy(mcast->mcmember.mgid.raw, mgid,
779 sizeof (union ib_gid));
780 __ipoib_mcast_add(dev, mcast);
781 list_add_tail(&mcast->list, &priv->multicast_list);
782 }
783 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
784 /* put pseudoheader back on for next time */
785 skb_push(skb, sizeof(struct ipoib_pseudo_header));
786 skb_queue_tail(&mcast->pkt_queue, skb);
787 } else {
788 ++dev->stats.tx_dropped;
789 dev_kfree_skb_any(skb);
790 }
791 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
792 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
793 }
794 } else {
795 struct ipoib_neigh *neigh;
796
797 spin_unlock_irqrestore(&priv->lock, flags);
798 neigh = ipoib_neigh_get(dev, daddr);
799 spin_lock_irqsave(&priv->lock, flags);
800 if (!neigh) {
801 neigh = ipoib_neigh_alloc(daddr, dev);
802 /* Make sure that the neigh will be added only
803 * once to mcast list.
804 */
805 if (neigh && list_empty(&neigh->list)) {
806 kref_get(&mcast->ah->ref);
807 neigh->ah = mcast->ah;
808 neigh->ah->valid = 1;
809 list_add_tail(&neigh->list, &mcast->neigh_list);
810 }
811 }
812 spin_unlock_irqrestore(&priv->lock, flags);
813 mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah,
814 IB_MULTICAST_QPN);
815 if (neigh)
816 ipoib_neigh_put(neigh);
817 return;
818 }
819
820 unlock:
821 spin_unlock_irqrestore(&priv->lock, flags);
822 }
823
ipoib_mcast_dev_flush(struct net_device * dev)824 void ipoib_mcast_dev_flush(struct net_device *dev)
825 {
826 struct ipoib_dev_priv *priv = ipoib_priv(dev);
827 LIST_HEAD(remove_list);
828 struct ipoib_mcast *mcast, *tmcast;
829 unsigned long flags;
830
831 mutex_lock(&priv->mcast_mutex);
832 ipoib_dbg_mcast(priv, "flushing multicast list\n");
833
834 spin_lock_irqsave(&priv->lock, flags);
835
836 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
837 list_del(&mcast->list);
838 rb_erase(&mcast->rb_node, &priv->multicast_tree);
839 list_add_tail(&mcast->list, &remove_list);
840 }
841
842 if (priv->broadcast) {
843 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
844 list_add_tail(&priv->broadcast->list, &remove_list);
845 priv->broadcast = NULL;
846 }
847
848 spin_unlock_irqrestore(&priv->lock, flags);
849
850 ipoib_mcast_remove_list(&remove_list);
851 mutex_unlock(&priv->mcast_mutex);
852 }
853
ipoib_mcast_addr_is_valid(const u8 * addr,const u8 * broadcast)854 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
855 {
856 /* reserved QPN, prefix, scope */
857 if (memcmp(addr, broadcast, 6))
858 return 0;
859 /* signature lower, pkey */
860 if (memcmp(addr + 7, broadcast + 7, 3))
861 return 0;
862 return 1;
863 }
864
ipoib_mcast_restart_task(struct work_struct * work)865 void ipoib_mcast_restart_task(struct work_struct *work)
866 {
867 struct ipoib_dev_priv *priv =
868 container_of(work, struct ipoib_dev_priv, restart_task);
869 struct net_device *dev = priv->dev;
870 struct netdev_hw_addr *ha;
871 struct ipoib_mcast *mcast, *tmcast;
872 LIST_HEAD(remove_list);
873 struct ib_sa_mcmember_rec rec;
874
875 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
876 /*
877 * shortcut...on shutdown flush is called next, just
878 * let it do all the work
879 */
880 return;
881
882 ipoib_dbg_mcast(priv, "restarting multicast task\n");
883
884 netif_addr_lock_bh(dev);
885 spin_lock_irq(&priv->lock);
886
887 /*
888 * Unfortunately, the networking core only gives us a list of all of
889 * the multicast hardware addresses. We need to figure out which ones
890 * are new and which ones have been removed
891 */
892
893 /* Clear out the found flag */
894 list_for_each_entry(mcast, &priv->multicast_list, list)
895 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
896
897 /* Mark all of the entries that are found or don't exist */
898 netdev_for_each_mc_addr(ha, dev) {
899 union ib_gid mgid;
900
901 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
902 continue;
903
904 memcpy(mgid.raw, ha->addr + 4, sizeof(mgid));
905
906 mcast = __ipoib_mcast_find(dev, &mgid);
907 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
908 struct ipoib_mcast *nmcast;
909
910 /* ignore group which is directly joined by userspace */
911 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
912 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
913 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
914 mgid.raw);
915 continue;
916 }
917
918 /* Not found or send-only group, let's add a new entry */
919 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
920 mgid.raw);
921
922 nmcast = ipoib_mcast_alloc(dev);
923 if (!nmcast) {
924 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
925 continue;
926 }
927
928 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
929
930 nmcast->mcmember.mgid = mgid;
931
932 if (mcast) {
933 /* Destroy the send only entry */
934 list_move_tail(&mcast->list, &remove_list);
935
936 rb_replace_node(&mcast->rb_node,
937 &nmcast->rb_node,
938 &priv->multicast_tree);
939 } else
940 __ipoib_mcast_add(dev, nmcast);
941
942 list_add_tail(&nmcast->list, &priv->multicast_list);
943 }
944
945 if (mcast)
946 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
947 }
948
949 /* Remove all of the entries don't exist anymore */
950 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
951 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
952 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
953 ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
954 mcast->mcmember.mgid.raw);
955
956 rb_erase(&mcast->rb_node, &priv->multicast_tree);
957
958 /* Move to the remove list */
959 list_move_tail(&mcast->list, &remove_list);
960 }
961 }
962
963 spin_unlock_irq(&priv->lock);
964 netif_addr_unlock_bh(dev);
965
966 ipoib_mcast_remove_list(&remove_list);
967
968 /*
969 * Double check that we are still up
970 */
971 if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
972 spin_lock_irq(&priv->lock);
973 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
974 spin_unlock_irq(&priv->lock);
975 }
976 }
977
978 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
979
ipoib_mcast_iter_init(struct net_device * dev)980 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
981 {
982 struct ipoib_mcast_iter *iter;
983
984 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
985 if (!iter)
986 return NULL;
987
988 iter->dev = dev;
989 memset(iter->mgid.raw, 0, 16);
990
991 if (ipoib_mcast_iter_next(iter)) {
992 kfree(iter);
993 return NULL;
994 }
995
996 return iter;
997 }
998
ipoib_mcast_iter_next(struct ipoib_mcast_iter * iter)999 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
1000 {
1001 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
1002 struct rb_node *n;
1003 struct ipoib_mcast *mcast;
1004 int ret = 1;
1005
1006 spin_lock_irq(&priv->lock);
1007
1008 n = rb_first(&priv->multicast_tree);
1009
1010 while (n) {
1011 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
1012
1013 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
1014 sizeof (union ib_gid)) < 0) {
1015 iter->mgid = mcast->mcmember.mgid;
1016 iter->created = mcast->created;
1017 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
1018 iter->complete = !!mcast->ah;
1019 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
1020
1021 ret = 0;
1022
1023 break;
1024 }
1025
1026 n = rb_next(n);
1027 }
1028
1029 spin_unlock_irq(&priv->lock);
1030
1031 return ret;
1032 }
1033
ipoib_mcast_iter_read(struct ipoib_mcast_iter * iter,union ib_gid * mgid,unsigned long * created,unsigned int * queuelen,unsigned int * complete,unsigned int * send_only)1034 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
1035 union ib_gid *mgid,
1036 unsigned long *created,
1037 unsigned int *queuelen,
1038 unsigned int *complete,
1039 unsigned int *send_only)
1040 {
1041 *mgid = iter->mgid;
1042 *created = iter->created;
1043 *queuelen = iter->queuelen;
1044 *complete = iter->complete;
1045 *send_only = iter->send_only;
1046 }
1047
1048 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
1049