1 /*
2  * Copyright(c) 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/slab.h>
49 #include <linux/sched.h>
50 #include <linux/rculist.h>
51 #include <rdma/rdma_vt.h>
52 #include <rdma/rdmavt_qp.h>
53 
54 #include "mcast.h"
55 
56 void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
57 {
58 	/*
59 	 * Anything that needs setup for multicast on a per driver or per rdi
60 	 * basis should be done in here.
61 	 */
62 	spin_lock_init(&rdi->n_mcast_grps_lock);
63 }
64 
65 /**
66  * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
67  * @qp: the QP to link
68  */
69 static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
70 {
71 	struct rvt_mcast_qp *mqp;
72 
73 	mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
74 	if (!mqp)
75 		goto bail;
76 
77 	mqp->qp = qp;
78 	atomic_inc(&qp->refcount);
79 
80 bail:
81 	return mqp;
82 }
83 
84 static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
85 {
86 	struct rvt_qp *qp = mqp->qp;
87 
88 	/* Notify hfi1_destroy_qp() if it is waiting. */
89 	if (atomic_dec_and_test(&qp->refcount))
90 		wake_up(&qp->wait);
91 
92 	kfree(mqp);
93 }
94 
95 /**
96  * mcast_alloc - allocate the multicast GID structure
97  * @mgid: the multicast GID
98  *
99  * A list of QPs will be attached to this structure.
100  */
101 static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid)
102 {
103 	struct rvt_mcast *mcast;
104 
105 	mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
106 	if (!mcast)
107 		goto bail;
108 
109 	mcast->mgid = *mgid;
110 	INIT_LIST_HEAD(&mcast->qp_list);
111 	init_waitqueue_head(&mcast->wait);
112 	atomic_set(&mcast->refcount, 0);
113 
114 bail:
115 	return mcast;
116 }
117 
118 static void rvt_mcast_free(struct rvt_mcast *mcast)
119 {
120 	struct rvt_mcast_qp *p, *tmp;
121 
122 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
123 		rvt_mcast_qp_free(p);
124 
125 	kfree(mcast);
126 }
127 
128 /**
129  * rvt_mcast_find - search the global table for the given multicast GID
130  * @ibp: the IB port structure
131  * @mgid: the multicast GID to search for
132  *
133  * Returns NULL if not found.
134  *
135  * The caller is responsible for decrementing the reference count if found.
136  */
137 struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid)
138 {
139 	struct rb_node *n;
140 	unsigned long flags;
141 	struct rvt_mcast *found = NULL;
142 
143 	spin_lock_irqsave(&ibp->lock, flags);
144 	n = ibp->mcast_tree.rb_node;
145 	while (n) {
146 		int ret;
147 		struct rvt_mcast *mcast;
148 
149 		mcast = rb_entry(n, struct rvt_mcast, rb_node);
150 
151 		ret = memcmp(mgid->raw, mcast->mgid.raw,
152 			     sizeof(union ib_gid));
153 		if (ret < 0) {
154 			n = n->rb_left;
155 		} else if (ret > 0) {
156 			n = n->rb_right;
157 		} else {
158 			atomic_inc(&mcast->refcount);
159 			found = mcast;
160 			break;
161 		}
162 	}
163 	spin_unlock_irqrestore(&ibp->lock, flags);
164 	return found;
165 }
166 EXPORT_SYMBOL(rvt_mcast_find);
167 
168 /**
169  * mcast_add - insert mcast GID into table and attach QP struct
170  * @mcast: the mcast GID table
171  * @mqp: the QP to attach
172  *
173  * Return zero if both were added.  Return EEXIST if the GID was already in
174  * the table but the QP was added.  Return ESRCH if the QP was already
175  * attached and neither structure was added.
176  */
177 static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
178 			 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
179 {
180 	struct rb_node **n = &ibp->mcast_tree.rb_node;
181 	struct rb_node *pn = NULL;
182 	int ret;
183 
184 	spin_lock_irq(&ibp->lock);
185 
186 	while (*n) {
187 		struct rvt_mcast *tmcast;
188 		struct rvt_mcast_qp *p;
189 
190 		pn = *n;
191 		tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
192 
193 		ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
194 			     sizeof(union ib_gid));
195 		if (ret < 0) {
196 			n = &pn->rb_left;
197 			continue;
198 		}
199 		if (ret > 0) {
200 			n = &pn->rb_right;
201 			continue;
202 		}
203 
204 		/* Search the QP list to see if this is already there. */
205 		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
206 			if (p->qp == mqp->qp) {
207 				ret = ESRCH;
208 				goto bail;
209 			}
210 		}
211 		if (tmcast->n_attached ==
212 		    rdi->dparms.props.max_mcast_qp_attach) {
213 			ret = ENOMEM;
214 			goto bail;
215 		}
216 
217 		tmcast->n_attached++;
218 
219 		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
220 		ret = EEXIST;
221 		goto bail;
222 	}
223 
224 	spin_lock(&rdi->n_mcast_grps_lock);
225 	if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
226 		spin_unlock(&rdi->n_mcast_grps_lock);
227 		ret = ENOMEM;
228 		goto bail;
229 	}
230 
231 	rdi->n_mcast_grps_allocated++;
232 	spin_unlock(&rdi->n_mcast_grps_lock);
233 
234 	mcast->n_attached++;
235 
236 	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
237 
238 	atomic_inc(&mcast->refcount);
239 	rb_link_node(&mcast->rb_node, pn, n);
240 	rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
241 
242 	ret = 0;
243 
244 bail:
245 	spin_unlock_irq(&ibp->lock);
246 
247 	return ret;
248 }
249 
250 int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
251 {
252 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
253 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
254 	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
255 	struct rvt_mcast *mcast;
256 	struct rvt_mcast_qp *mqp;
257 	int ret = -ENOMEM;
258 
259 	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
260 		return -EINVAL;
261 
262 	/*
263 	 * Allocate data structures since its better to do this outside of
264 	 * spin locks and it will most likely be needed.
265 	 */
266 	mcast = rvt_mcast_alloc(gid);
267 	if (!mcast)
268 		return -ENOMEM;
269 
270 	mqp = rvt_mcast_qp_alloc(qp);
271 	if (!mqp)
272 		goto bail_mcast;
273 
274 	switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
275 	case ESRCH:
276 		/* Neither was used: OK to attach the same QP twice. */
277 		ret = 0;
278 		goto bail_mqp;
279 	case EEXIST: /* The mcast wasn't used */
280 		ret = 0;
281 		goto bail_mcast;
282 	case ENOMEM:
283 		/* Exceeded the maximum number of mcast groups. */
284 		ret = -ENOMEM;
285 		goto bail_mqp;
286 	default:
287 		break;
288 	}
289 
290 	return 0;
291 
292 bail_mqp:
293 	rvt_mcast_qp_free(mqp);
294 
295 bail_mcast:
296 	rvt_mcast_free(mcast);
297 
298 	return ret;
299 }
300 
301 int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
302 {
303 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
304 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
305 	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
306 	struct rvt_mcast *mcast = NULL;
307 	struct rvt_mcast_qp *p, *tmp, *delp = NULL;
308 	struct rb_node *n;
309 	int last = 0;
310 	int ret = 0;
311 
312 	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
313 		return -EINVAL;
314 
315 	spin_lock_irq(&ibp->lock);
316 
317 	/* Find the GID in the mcast table. */
318 	n = ibp->mcast_tree.rb_node;
319 	while (1) {
320 		if (!n) {
321 			spin_unlock_irq(&ibp->lock);
322 			return -EINVAL;
323 		}
324 
325 		mcast = rb_entry(n, struct rvt_mcast, rb_node);
326 		ret = memcmp(gid->raw, mcast->mgid.raw,
327 			     sizeof(union ib_gid));
328 		if (ret < 0)
329 			n = n->rb_left;
330 		else if (ret > 0)
331 			n = n->rb_right;
332 		else
333 			break;
334 	}
335 
336 	/* Search the QP list. */
337 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
338 		if (p->qp != qp)
339 			continue;
340 		/*
341 		 * We found it, so remove it, but don't poison the forward
342 		 * link until we are sure there are no list walkers.
343 		 */
344 		list_del_rcu(&p->list);
345 		mcast->n_attached--;
346 		delp = p;
347 
348 		/* If this was the last attached QP, remove the GID too. */
349 		if (list_empty(&mcast->qp_list)) {
350 			rb_erase(&mcast->rb_node, &ibp->mcast_tree);
351 			last = 1;
352 		}
353 		break;
354 	}
355 
356 	spin_unlock_irq(&ibp->lock);
357 	/* QP not attached */
358 	if (!delp)
359 		return -EINVAL;
360 
361 	/*
362 	 * Wait for any list walkers to finish before freeing the
363 	 * list element.
364 	 */
365 	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
366 	rvt_mcast_qp_free(delp);
367 
368 	if (last) {
369 		atomic_dec(&mcast->refcount);
370 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
371 		rvt_mcast_free(mcast);
372 		spin_lock_irq(&rdi->n_mcast_grps_lock);
373 		rdi->n_mcast_grps_allocated--;
374 		spin_unlock_irq(&rdi->n_mcast_grps_lock);
375 	}
376 
377 	return 0;
378 }
379 
380 int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
381 {
382 	int i;
383 	int in_use = 0;
384 
385 	for (i = 0; i < rdi->dparms.nports; i++)
386 		if (rdi->ports[i]->mcast_tree.rb_node)
387 			in_use++;
388 	return in_use;
389 }
390