1 /*
2  * Copyright (c) 2005 Cisco Systems. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/sched.h>
36 
37 #include <asm/io.h>
38 
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
41 #include "mthca_memfree.h"
42 #include "mthca_wqe.h"
43 
44 enum {
45 	MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
46 };
47 
48 struct mthca_tavor_srq_context {
49 	__be64 wqe_base_ds;	/* low 6 bits is descriptor size */
50 	__be32 state_pd;
51 	__be32 lkey;
52 	__be32 uar;
53 	__be16 limit_watermark;
54 	__be16 wqe_cnt;
55 	u32    reserved[2];
56 };
57 
58 struct mthca_arbel_srq_context {
59 	__be32 state_logsize_srqn;
60 	__be32 lkey;
61 	__be32 db_index;
62 	__be32 logstride_usrpage;
63 	__be64 wqe_base;
64 	__be32 eq_pd;
65 	__be16 limit_watermark;
66 	__be16 wqe_cnt;
67 	u16    reserved1;
68 	__be16 wqe_counter;
69 	u32    reserved2[3];
70 };
71 
72 static void *get_wqe(struct mthca_srq *srq, int n)
73 {
74 	if (srq->is_direct)
75 		return srq->queue.direct.buf + (n << srq->wqe_shift);
76 	else
77 		return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78 			((n << srq->wqe_shift) & (PAGE_SIZE - 1));
79 }
80 
81 /*
82  * Return a pointer to the location within a WQE that we're using as a
83  * link when the WQE is in the free list.  We use the imm field
84  * because in the Tavor case, posting a WQE may overwrite the next
85  * segment of the previous WQE, but a receive WQE will never touch the
86  * imm field.  This avoids corrupting our free list if the previous
87  * WQE has already completed and been put on the free list when we
88  * post the next WQE.
89  */
90 static inline int *wqe_to_link(void *wqe)
91 {
92 	return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
93 }
94 
95 static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
96 					 struct mthca_pd *pd,
97 					 struct mthca_srq *srq,
98 					 struct mthca_tavor_srq_context *context,
99 					 bool is_user)
100 {
101 	memset(context, 0, sizeof *context);
102 
103 	context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
104 	context->state_pd    = cpu_to_be32(pd->pd_num);
105 	context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
106 
107 	if (is_user)
108 		context->uar =
109 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
110 	else
111 		context->uar = cpu_to_be32(dev->driver_uar.index);
112 }
113 
114 static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
115 					 struct mthca_pd *pd,
116 					 struct mthca_srq *srq,
117 					 struct mthca_arbel_srq_context *context,
118 					 bool is_user)
119 {
120 	int logsize, max;
121 
122 	memset(context, 0, sizeof *context);
123 
124 	/*
125 	 * Put max in a temporary variable to work around gcc bug
126 	 * triggered by ilog2() on sparc64.
127 	 */
128 	max = srq->max;
129 	logsize = ilog2(max);
130 	context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
131 	context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
132 	context->db_index = cpu_to_be32(srq->db_index);
133 	context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
134 	if (is_user)
135 		context->logstride_usrpage |=
136 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
137 	else
138 		context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
139 	context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
140 }
141 
142 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
143 {
144 	mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
145 		       srq->is_direct, &srq->mr);
146 	kfree(srq->wrid);
147 }
148 
149 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
150 			       struct mthca_srq *srq, struct ib_udata *udata)
151 {
152 	struct mthca_data_seg *scatter;
153 	void *wqe;
154 	int err;
155 	int i;
156 
157 	if (udata)
158 		return 0;
159 
160 	srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
161 	if (!srq->wrid)
162 		return -ENOMEM;
163 
164 	err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
165 			      MTHCA_MAX_DIRECT_SRQ_SIZE,
166 			      &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
167 	if (err) {
168 		kfree(srq->wrid);
169 		return err;
170 	}
171 
172 	/*
173 	 * Now initialize the SRQ buffer so that all of the WQEs are
174 	 * linked into the list of free WQEs.  In addition, set the
175 	 * scatter list L_Keys to the sentry value of 0x100.
176 	 */
177 	for (i = 0; i < srq->max; ++i) {
178 		struct mthca_next_seg *next;
179 
180 		next = wqe = get_wqe(srq, i);
181 
182 		if (i < srq->max - 1) {
183 			*wqe_to_link(wqe) = i + 1;
184 			next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
185 		} else {
186 			*wqe_to_link(wqe) = -1;
187 			next->nda_op = 0;
188 		}
189 
190 		for (scatter = wqe + sizeof (struct mthca_next_seg);
191 		     (void *) scatter < wqe + (1 << srq->wqe_shift);
192 		     ++scatter)
193 			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
194 	}
195 
196 	srq->last = get_wqe(srq, srq->max - 1);
197 
198 	return 0;
199 }
200 
201 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
202 		    struct ib_srq_attr *attr, struct mthca_srq *srq,
203 		    struct ib_udata *udata)
204 {
205 	struct mthca_mailbox *mailbox;
206 	int ds;
207 	int err;
208 
209 	/* Sanity check SRQ size before proceeding */
210 	if (attr->max_wr  > dev->limits.max_srq_wqes ||
211 	    attr->max_sge > dev->limits.max_srq_sge)
212 		return -EINVAL;
213 
214 	srq->max      = attr->max_wr;
215 	srq->max_gs   = attr->max_sge;
216 	srq->counter  = 0;
217 
218 	if (mthca_is_memfree(dev))
219 		srq->max = roundup_pow_of_two(srq->max + 1);
220 	else
221 		srq->max = srq->max + 1;
222 
223 	ds = max(64UL,
224 		 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
225 				    srq->max_gs * sizeof (struct mthca_data_seg)));
226 
227 	if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
228 		return -EINVAL;
229 
230 	srq->wqe_shift = ilog2(ds);
231 
232 	srq->srqn = mthca_alloc(&dev->srq_table.alloc);
233 	if (srq->srqn == -1)
234 		return -ENOMEM;
235 
236 	if (mthca_is_memfree(dev)) {
237 		err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
238 		if (err)
239 			goto err_out;
240 
241 		if (!udata) {
242 			srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
243 						       srq->srqn, &srq->db);
244 			if (srq->db_index < 0) {
245 				err = -ENOMEM;
246 				goto err_out_icm;
247 			}
248 		}
249 	}
250 
251 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
252 	if (IS_ERR(mailbox)) {
253 		err = PTR_ERR(mailbox);
254 		goto err_out_db;
255 	}
256 
257 	err = mthca_alloc_srq_buf(dev, pd, srq, udata);
258 	if (err)
259 		goto err_out_mailbox;
260 
261 	spin_lock_init(&srq->lock);
262 	srq->refcount = 1;
263 	init_waitqueue_head(&srq->wait);
264 	mutex_init(&srq->mutex);
265 
266 	if (mthca_is_memfree(dev))
267 		mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
268 	else
269 		mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
270 
271 	err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
272 
273 	if (err) {
274 		mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
275 		goto err_out_free_buf;
276 	}
277 
278 	spin_lock_irq(&dev->srq_table.lock);
279 	if (mthca_array_set(&dev->srq_table.srq,
280 			    srq->srqn & (dev->limits.num_srqs - 1),
281 			    srq)) {
282 		spin_unlock_irq(&dev->srq_table.lock);
283 		goto err_out_free_srq;
284 	}
285 	spin_unlock_irq(&dev->srq_table.lock);
286 
287 	mthca_free_mailbox(dev, mailbox);
288 
289 	srq->first_free = 0;
290 	srq->last_free  = srq->max - 1;
291 
292 	attr->max_wr    = srq->max - 1;
293 	attr->max_sge   = srq->max_gs;
294 
295 	return 0;
296 
297 err_out_free_srq:
298 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
299 	if (err)
300 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
301 
302 err_out_free_buf:
303 	if (!udata)
304 		mthca_free_srq_buf(dev, srq);
305 
306 err_out_mailbox:
307 	mthca_free_mailbox(dev, mailbox);
308 
309 err_out_db:
310 	if (!udata && mthca_is_memfree(dev))
311 		mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
312 
313 err_out_icm:
314 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
315 
316 err_out:
317 	mthca_free(&dev->srq_table.alloc, srq->srqn);
318 
319 	return err;
320 }
321 
322 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
323 {
324 	int c;
325 
326 	spin_lock_irq(&dev->srq_table.lock);
327 	c = srq->refcount;
328 	spin_unlock_irq(&dev->srq_table.lock);
329 
330 	return c;
331 }
332 
333 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
334 {
335 	struct mthca_mailbox *mailbox;
336 	int err;
337 
338 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
339 	if (IS_ERR(mailbox)) {
340 		mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
341 		return;
342 	}
343 
344 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
345 	if (err)
346 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
347 
348 	spin_lock_irq(&dev->srq_table.lock);
349 	mthca_array_clear(&dev->srq_table.srq,
350 			  srq->srqn & (dev->limits.num_srqs - 1));
351 	--srq->refcount;
352 	spin_unlock_irq(&dev->srq_table.lock);
353 
354 	wait_event(srq->wait, !get_srq_refcount(dev, srq));
355 
356 	if (!srq->ibsrq.uobject) {
357 		mthca_free_srq_buf(dev, srq);
358 		if (mthca_is_memfree(dev))
359 			mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
360 	}
361 
362 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
363 	mthca_free(&dev->srq_table.alloc, srq->srqn);
364 	mthca_free_mailbox(dev, mailbox);
365 }
366 
367 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
368 		     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
369 {
370 	struct mthca_dev *dev = to_mdev(ibsrq->device);
371 	struct mthca_srq *srq = to_msrq(ibsrq);
372 	int ret = 0;
373 
374 	/* We don't support resizing SRQs (yet?) */
375 	if (attr_mask & IB_SRQ_MAX_WR)
376 		return -EINVAL;
377 
378 	if (attr_mask & IB_SRQ_LIMIT) {
379 		u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
380 		if (attr->srq_limit > max_wr)
381 			return -EINVAL;
382 
383 		mutex_lock(&srq->mutex);
384 		ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
385 		mutex_unlock(&srq->mutex);
386 	}
387 
388 	return ret;
389 }
390 
391 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
392 {
393 	struct mthca_dev *dev = to_mdev(ibsrq->device);
394 	struct mthca_srq *srq = to_msrq(ibsrq);
395 	struct mthca_mailbox *mailbox;
396 	struct mthca_arbel_srq_context *arbel_ctx;
397 	struct mthca_tavor_srq_context *tavor_ctx;
398 	int err;
399 
400 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
401 	if (IS_ERR(mailbox))
402 		return PTR_ERR(mailbox);
403 
404 	err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
405 	if (err)
406 		goto out;
407 
408 	if (mthca_is_memfree(dev)) {
409 		arbel_ctx = mailbox->buf;
410 		srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
411 	} else {
412 		tavor_ctx = mailbox->buf;
413 		srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
414 	}
415 
416 	srq_attr->max_wr  = srq->max - 1;
417 	srq_attr->max_sge = srq->max_gs;
418 
419 out:
420 	mthca_free_mailbox(dev, mailbox);
421 
422 	return err;
423 }
424 
425 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
426 		     enum ib_event_type event_type)
427 {
428 	struct mthca_srq *srq;
429 	struct ib_event event;
430 
431 	spin_lock(&dev->srq_table.lock);
432 	srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
433 	if (srq)
434 		++srq->refcount;
435 	spin_unlock(&dev->srq_table.lock);
436 
437 	if (!srq) {
438 		mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
439 		return;
440 	}
441 
442 	if (!srq->ibsrq.event_handler)
443 		goto out;
444 
445 	event.device      = &dev->ib_dev;
446 	event.event       = event_type;
447 	event.element.srq = &srq->ibsrq;
448 	srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
449 
450 out:
451 	spin_lock(&dev->srq_table.lock);
452 	if (!--srq->refcount)
453 		wake_up(&srq->wait);
454 	spin_unlock(&dev->srq_table.lock);
455 }
456 
457 /*
458  * This function must be called with IRQs disabled.
459  */
460 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
461 {
462 	int ind;
463 	struct mthca_next_seg *last_free;
464 
465 	ind = wqe_addr >> srq->wqe_shift;
466 
467 	spin_lock(&srq->lock);
468 
469 	last_free = get_wqe(srq, srq->last_free);
470 	*wqe_to_link(last_free) = ind;
471 	last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
472 	*wqe_to_link(get_wqe(srq, ind)) = -1;
473 	srq->last_free = ind;
474 
475 	spin_unlock(&srq->lock);
476 }
477 
478 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
479 			      const struct ib_recv_wr **bad_wr)
480 {
481 	struct mthca_dev *dev = to_mdev(ibsrq->device);
482 	struct mthca_srq *srq = to_msrq(ibsrq);
483 	unsigned long flags;
484 	int err = 0;
485 	int first_ind;
486 	int ind;
487 	int next_ind;
488 	int nreq;
489 	int i;
490 	void *wqe;
491 	void *prev_wqe;
492 
493 	spin_lock_irqsave(&srq->lock, flags);
494 
495 	first_ind = srq->first_free;
496 
497 	for (nreq = 0; wr; wr = wr->next) {
498 		ind       = srq->first_free;
499 		wqe       = get_wqe(srq, ind);
500 		next_ind  = *wqe_to_link(wqe);
501 
502 		if (unlikely(next_ind < 0)) {
503 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 			err = -ENOMEM;
505 			*bad_wr = wr;
506 			break;
507 		}
508 
509 		prev_wqe  = srq->last;
510 		srq->last = wqe;
511 
512 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
513 		/* flags field will always remain 0 */
514 
515 		wqe += sizeof (struct mthca_next_seg);
516 
517 		if (unlikely(wr->num_sge > srq->max_gs)) {
518 			err = -EINVAL;
519 			*bad_wr = wr;
520 			srq->last = prev_wqe;
521 			break;
522 		}
523 
524 		for (i = 0; i < wr->num_sge; ++i) {
525 			mthca_set_data_seg(wqe, wr->sg_list + i);
526 			wqe += sizeof (struct mthca_data_seg);
527 		}
528 
529 		if (i < srq->max_gs)
530 			mthca_set_data_seg_inval(wqe);
531 
532 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
533 			cpu_to_be32(MTHCA_NEXT_DBD);
534 
535 		srq->wrid[ind]  = wr->wr_id;
536 		srq->first_free = next_ind;
537 
538 		++nreq;
539 		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
540 			nreq = 0;
541 
542 			/*
543 			 * Make sure that descriptors are written
544 			 * before doorbell is rung.
545 			 */
546 			wmb();
547 
548 			mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
549 				      dev->kar + MTHCA_RECEIVE_DOORBELL,
550 				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
551 
552 			first_ind = srq->first_free;
553 		}
554 	}
555 
556 	if (likely(nreq)) {
557 		/*
558 		 * Make sure that descriptors are written before
559 		 * doorbell is rung.
560 		 */
561 		wmb();
562 
563 		mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
564 			      dev->kar + MTHCA_RECEIVE_DOORBELL,
565 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
566 	}
567 
568 	/*
569 	 * Make sure doorbells don't leak out of SRQ spinlock and
570 	 * reach the HCA out of order:
571 	 */
572 	mmiowb();
573 
574 	spin_unlock_irqrestore(&srq->lock, flags);
575 	return err;
576 }
577 
578 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
579 			      const struct ib_recv_wr **bad_wr)
580 {
581 	struct mthca_dev *dev = to_mdev(ibsrq->device);
582 	struct mthca_srq *srq = to_msrq(ibsrq);
583 	unsigned long flags;
584 	int err = 0;
585 	int ind;
586 	int next_ind;
587 	int nreq;
588 	int i;
589 	void *wqe;
590 
591 	spin_lock_irqsave(&srq->lock, flags);
592 
593 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
594 		ind       = srq->first_free;
595 		wqe       = get_wqe(srq, ind);
596 		next_ind  = *wqe_to_link(wqe);
597 
598 		if (unlikely(next_ind < 0)) {
599 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
600 			err = -ENOMEM;
601 			*bad_wr = wr;
602 			break;
603 		}
604 
605 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
606 		/* flags field will always remain 0 */
607 
608 		wqe += sizeof (struct mthca_next_seg);
609 
610 		if (unlikely(wr->num_sge > srq->max_gs)) {
611 			err = -EINVAL;
612 			*bad_wr = wr;
613 			break;
614 		}
615 
616 		for (i = 0; i < wr->num_sge; ++i) {
617 			mthca_set_data_seg(wqe, wr->sg_list + i);
618 			wqe += sizeof (struct mthca_data_seg);
619 		}
620 
621 		if (i < srq->max_gs)
622 			mthca_set_data_seg_inval(wqe);
623 
624 		srq->wrid[ind]  = wr->wr_id;
625 		srq->first_free = next_ind;
626 	}
627 
628 	if (likely(nreq)) {
629 		srq->counter += nreq;
630 
631 		/*
632 		 * Make sure that descriptors are written before
633 		 * we write doorbell record.
634 		 */
635 		wmb();
636 		*srq->db = cpu_to_be32(srq->counter);
637 	}
638 
639 	spin_unlock_irqrestore(&srq->lock, flags);
640 	return err;
641 }
642 
643 int mthca_max_srq_sge(struct mthca_dev *dev)
644 {
645 	if (mthca_is_memfree(dev))
646 		return dev->limits.max_sg;
647 
648 	/*
649 	 * SRQ allocations are based on powers of 2 for Tavor,
650 	 * (although they only need to be multiples of 16 bytes).
651 	 *
652 	 * Therefore, we need to base the max number of sg entries on
653 	 * the largest power of 2 descriptor size that is <= to the
654 	 * actual max WQE descriptor size, rather than return the
655 	 * max_sg value given by the firmware (which is based on WQE
656 	 * sizes as multiples of 16, not powers of 2).
657 	 *
658 	 * If SRQ implementation is changed for Tavor to be based on
659 	 * multiples of 16, the calculation below can be deleted and
660 	 * the FW max_sg value returned.
661 	 */
662 	return min_t(int, dev->limits.max_sg,
663 		     ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
664 		      sizeof (struct mthca_next_seg)) /
665 		     sizeof (struct mthca_data_seg));
666 }
667 
668 int mthca_init_srq_table(struct mthca_dev *dev)
669 {
670 	int err;
671 
672 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
673 		return 0;
674 
675 	spin_lock_init(&dev->srq_table.lock);
676 
677 	err = mthca_alloc_init(&dev->srq_table.alloc,
678 			       dev->limits.num_srqs,
679 			       dev->limits.num_srqs - 1,
680 			       dev->limits.reserved_srqs);
681 	if (err)
682 		return err;
683 
684 	err = mthca_array_init(&dev->srq_table.srq,
685 			       dev->limits.num_srqs);
686 	if (err)
687 		mthca_alloc_cleanup(&dev->srq_table.alloc);
688 
689 	return err;
690 }
691 
692 void mthca_cleanup_srq_table(struct mthca_dev *dev)
693 {
694 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
695 		return;
696 
697 	mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
698 	mthca_alloc_cleanup(&dev->srq_table.alloc);
699 }
700