1 /*
2  * Copyright (c) 2005 Cisco Systems. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/sched.h>
36 
37 #include <asm/io.h>
38 
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
41 #include "mthca_memfree.h"
42 #include "mthca_wqe.h"
43 
44 enum {
45 	MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
46 };
47 
48 struct mthca_tavor_srq_context {
49 	__be64 wqe_base_ds;	/* low 6 bits is descriptor size */
50 	__be32 state_pd;
51 	__be32 lkey;
52 	__be32 uar;
53 	__be16 limit_watermark;
54 	__be16 wqe_cnt;
55 	u32    reserved[2];
56 };
57 
58 struct mthca_arbel_srq_context {
59 	__be32 state_logsize_srqn;
60 	__be32 lkey;
61 	__be32 db_index;
62 	__be32 logstride_usrpage;
63 	__be64 wqe_base;
64 	__be32 eq_pd;
65 	__be16 limit_watermark;
66 	__be16 wqe_cnt;
67 	u16    reserved1;
68 	__be16 wqe_counter;
69 	u32    reserved2[3];
70 };
71 
72 static void *get_wqe(struct mthca_srq *srq, int n)
73 {
74 	if (srq->is_direct)
75 		return srq->queue.direct.buf + (n << srq->wqe_shift);
76 	else
77 		return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78 			((n << srq->wqe_shift) & (PAGE_SIZE - 1));
79 }
80 
81 /*
82  * Return a pointer to the location within a WQE that we're using as a
83  * link when the WQE is in the free list.  We use the imm field
84  * because in the Tavor case, posting a WQE may overwrite the next
85  * segment of the previous WQE, but a receive WQE will never touch the
86  * imm field.  This avoids corrupting our free list if the previous
87  * WQE has already completed and been put on the free list when we
88  * post the next WQE.
89  */
90 static inline int *wqe_to_link(void *wqe)
91 {
92 	return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
93 }
94 
95 static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
96 					 struct mthca_pd *pd,
97 					 struct mthca_srq *srq,
98 					 struct mthca_tavor_srq_context *context)
99 {
100 	memset(context, 0, sizeof *context);
101 
102 	context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
103 	context->state_pd    = cpu_to_be32(pd->pd_num);
104 	context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
105 
106 	if (pd->ibpd.uobject)
107 		context->uar =
108 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
109 	else
110 		context->uar = cpu_to_be32(dev->driver_uar.index);
111 }
112 
113 static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
114 					 struct mthca_pd *pd,
115 					 struct mthca_srq *srq,
116 					 struct mthca_arbel_srq_context *context)
117 {
118 	int logsize, max;
119 
120 	memset(context, 0, sizeof *context);
121 
122 	/*
123 	 * Put max in a temporary variable to work around gcc bug
124 	 * triggered by ilog2() on sparc64.
125 	 */
126 	max = srq->max;
127 	logsize = ilog2(max);
128 	context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
129 	context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
130 	context->db_index = cpu_to_be32(srq->db_index);
131 	context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
132 	if (pd->ibpd.uobject)
133 		context->logstride_usrpage |=
134 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
135 	else
136 		context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
137 	context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
138 }
139 
140 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
141 {
142 	mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
143 		       srq->is_direct, &srq->mr);
144 	kfree(srq->wrid);
145 }
146 
147 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
148 			       struct mthca_srq *srq)
149 {
150 	struct mthca_data_seg *scatter;
151 	void *wqe;
152 	int err;
153 	int i;
154 
155 	if (pd->ibpd.uobject)
156 		return 0;
157 
158 	srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
159 	if (!srq->wrid)
160 		return -ENOMEM;
161 
162 	err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
163 			      MTHCA_MAX_DIRECT_SRQ_SIZE,
164 			      &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
165 	if (err) {
166 		kfree(srq->wrid);
167 		return err;
168 	}
169 
170 	/*
171 	 * Now initialize the SRQ buffer so that all of the WQEs are
172 	 * linked into the list of free WQEs.  In addition, set the
173 	 * scatter list L_Keys to the sentry value of 0x100.
174 	 */
175 	for (i = 0; i < srq->max; ++i) {
176 		struct mthca_next_seg *next;
177 
178 		next = wqe = get_wqe(srq, i);
179 
180 		if (i < srq->max - 1) {
181 			*wqe_to_link(wqe) = i + 1;
182 			next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
183 		} else {
184 			*wqe_to_link(wqe) = -1;
185 			next->nda_op = 0;
186 		}
187 
188 		for (scatter = wqe + sizeof (struct mthca_next_seg);
189 		     (void *) scatter < wqe + (1 << srq->wqe_shift);
190 		     ++scatter)
191 			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
192 	}
193 
194 	srq->last = get_wqe(srq, srq->max - 1);
195 
196 	return 0;
197 }
198 
199 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
200 		    struct ib_srq_attr *attr, struct mthca_srq *srq)
201 {
202 	struct mthca_mailbox *mailbox;
203 	u8 status;
204 	int ds;
205 	int err;
206 
207 	/* Sanity check SRQ size before proceeding */
208 	if (attr->max_wr  > dev->limits.max_srq_wqes ||
209 	    attr->max_sge > dev->limits.max_srq_sge)
210 		return -EINVAL;
211 
212 	srq->max      = attr->max_wr;
213 	srq->max_gs   = attr->max_sge;
214 	srq->counter  = 0;
215 
216 	if (mthca_is_memfree(dev))
217 		srq->max = roundup_pow_of_two(srq->max + 1);
218 	else
219 		srq->max = srq->max + 1;
220 
221 	ds = max(64UL,
222 		 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
223 				    srq->max_gs * sizeof (struct mthca_data_seg)));
224 
225 	if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
226 		return -EINVAL;
227 
228 	srq->wqe_shift = ilog2(ds);
229 
230 	srq->srqn = mthca_alloc(&dev->srq_table.alloc);
231 	if (srq->srqn == -1)
232 		return -ENOMEM;
233 
234 	if (mthca_is_memfree(dev)) {
235 		err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
236 		if (err)
237 			goto err_out;
238 
239 		if (!pd->ibpd.uobject) {
240 			srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
241 						       srq->srqn, &srq->db);
242 			if (srq->db_index < 0) {
243 				err = -ENOMEM;
244 				goto err_out_icm;
245 			}
246 		}
247 	}
248 
249 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
250 	if (IS_ERR(mailbox)) {
251 		err = PTR_ERR(mailbox);
252 		goto err_out_db;
253 	}
254 
255 	err = mthca_alloc_srq_buf(dev, pd, srq);
256 	if (err)
257 		goto err_out_mailbox;
258 
259 	spin_lock_init(&srq->lock);
260 	srq->refcount = 1;
261 	init_waitqueue_head(&srq->wait);
262 	mutex_init(&srq->mutex);
263 
264 	if (mthca_is_memfree(dev))
265 		mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
266 	else
267 		mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
268 
269 	err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
270 
271 	if (err) {
272 		mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
273 		goto err_out_free_buf;
274 	}
275 	if (status) {
276 		mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
277 			   status);
278 		err = -EINVAL;
279 		goto err_out_free_buf;
280 	}
281 
282 	spin_lock_irq(&dev->srq_table.lock);
283 	if (mthca_array_set(&dev->srq_table.srq,
284 			    srq->srqn & (dev->limits.num_srqs - 1),
285 			    srq)) {
286 		spin_unlock_irq(&dev->srq_table.lock);
287 		goto err_out_free_srq;
288 	}
289 	spin_unlock_irq(&dev->srq_table.lock);
290 
291 	mthca_free_mailbox(dev, mailbox);
292 
293 	srq->first_free = 0;
294 	srq->last_free  = srq->max - 1;
295 
296 	attr->max_wr    = srq->max - 1;
297 	attr->max_sge   = srq->max_gs;
298 
299 	return 0;
300 
301 err_out_free_srq:
302 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
303 	if (err)
304 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
305 	else if (status)
306 		mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
307 
308 err_out_free_buf:
309 	if (!pd->ibpd.uobject)
310 		mthca_free_srq_buf(dev, srq);
311 
312 err_out_mailbox:
313 	mthca_free_mailbox(dev, mailbox);
314 
315 err_out_db:
316 	if (!pd->ibpd.uobject && mthca_is_memfree(dev))
317 		mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
318 
319 err_out_icm:
320 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
321 
322 err_out:
323 	mthca_free(&dev->srq_table.alloc, srq->srqn);
324 
325 	return err;
326 }
327 
328 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
329 {
330 	int c;
331 
332 	spin_lock_irq(&dev->srq_table.lock);
333 	c = srq->refcount;
334 	spin_unlock_irq(&dev->srq_table.lock);
335 
336 	return c;
337 }
338 
339 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
340 {
341 	struct mthca_mailbox *mailbox;
342 	int err;
343 	u8 status;
344 
345 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
346 	if (IS_ERR(mailbox)) {
347 		mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
348 		return;
349 	}
350 
351 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
352 	if (err)
353 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
354 	else if (status)
355 		mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
356 
357 	spin_lock_irq(&dev->srq_table.lock);
358 	mthca_array_clear(&dev->srq_table.srq,
359 			  srq->srqn & (dev->limits.num_srqs - 1));
360 	--srq->refcount;
361 	spin_unlock_irq(&dev->srq_table.lock);
362 
363 	wait_event(srq->wait, !get_srq_refcount(dev, srq));
364 
365 	if (!srq->ibsrq.uobject) {
366 		mthca_free_srq_buf(dev, srq);
367 		if (mthca_is_memfree(dev))
368 			mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
369 	}
370 
371 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
372 	mthca_free(&dev->srq_table.alloc, srq->srqn);
373 	mthca_free_mailbox(dev, mailbox);
374 }
375 
376 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
377 		     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
378 {
379 	struct mthca_dev *dev = to_mdev(ibsrq->device);
380 	struct mthca_srq *srq = to_msrq(ibsrq);
381 	int ret;
382 	u8 status;
383 
384 	/* We don't support resizing SRQs (yet?) */
385 	if (attr_mask & IB_SRQ_MAX_WR)
386 		return -EINVAL;
387 
388 	if (attr_mask & IB_SRQ_LIMIT) {
389 		u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
390 		if (attr->srq_limit > max_wr)
391 			return -EINVAL;
392 
393 		mutex_lock(&srq->mutex);
394 		ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
395 		mutex_unlock(&srq->mutex);
396 
397 		if (ret)
398 			return ret;
399 		if (status)
400 			return -EINVAL;
401 	}
402 
403 	return 0;
404 }
405 
406 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
407 {
408 	struct mthca_dev *dev = to_mdev(ibsrq->device);
409 	struct mthca_srq *srq = to_msrq(ibsrq);
410 	struct mthca_mailbox *mailbox;
411 	struct mthca_arbel_srq_context *arbel_ctx;
412 	struct mthca_tavor_srq_context *tavor_ctx;
413 	u8 status;
414 	int err;
415 
416 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
417 	if (IS_ERR(mailbox))
418 		return PTR_ERR(mailbox);
419 
420 	err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
421 	if (err)
422 		goto out;
423 
424 	if (mthca_is_memfree(dev)) {
425 		arbel_ctx = mailbox->buf;
426 		srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
427 	} else {
428 		tavor_ctx = mailbox->buf;
429 		srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
430 	}
431 
432 	srq_attr->max_wr  = srq->max - 1;
433 	srq_attr->max_sge = srq->max_gs;
434 
435 out:
436 	mthca_free_mailbox(dev, mailbox);
437 
438 	return err;
439 }
440 
441 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
442 		     enum ib_event_type event_type)
443 {
444 	struct mthca_srq *srq;
445 	struct ib_event event;
446 
447 	spin_lock(&dev->srq_table.lock);
448 	srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
449 	if (srq)
450 		++srq->refcount;
451 	spin_unlock(&dev->srq_table.lock);
452 
453 	if (!srq) {
454 		mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
455 		return;
456 	}
457 
458 	if (!srq->ibsrq.event_handler)
459 		goto out;
460 
461 	event.device      = &dev->ib_dev;
462 	event.event       = event_type;
463 	event.element.srq = &srq->ibsrq;
464 	srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
465 
466 out:
467 	spin_lock(&dev->srq_table.lock);
468 	if (!--srq->refcount)
469 		wake_up(&srq->wait);
470 	spin_unlock(&dev->srq_table.lock);
471 }
472 
473 /*
474  * This function must be called with IRQs disabled.
475  */
476 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
477 {
478 	int ind;
479 	struct mthca_next_seg *last_free;
480 
481 	ind = wqe_addr >> srq->wqe_shift;
482 
483 	spin_lock(&srq->lock);
484 
485 	last_free = get_wqe(srq, srq->last_free);
486 	*wqe_to_link(last_free) = ind;
487 	last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
488 	*wqe_to_link(get_wqe(srq, ind)) = -1;
489 	srq->last_free = ind;
490 
491 	spin_unlock(&srq->lock);
492 }
493 
494 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
495 			      struct ib_recv_wr **bad_wr)
496 {
497 	struct mthca_dev *dev = to_mdev(ibsrq->device);
498 	struct mthca_srq *srq = to_msrq(ibsrq);
499 	unsigned long flags;
500 	int err = 0;
501 	int first_ind;
502 	int ind;
503 	int next_ind;
504 	int nreq;
505 	int i;
506 	void *wqe;
507 	void *prev_wqe;
508 
509 	spin_lock_irqsave(&srq->lock, flags);
510 
511 	first_ind = srq->first_free;
512 
513 	for (nreq = 0; wr; wr = wr->next) {
514 		ind       = srq->first_free;
515 		wqe       = get_wqe(srq, ind);
516 		next_ind  = *wqe_to_link(wqe);
517 
518 		if (unlikely(next_ind < 0)) {
519 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
520 			err = -ENOMEM;
521 			*bad_wr = wr;
522 			break;
523 		}
524 
525 		prev_wqe  = srq->last;
526 		srq->last = wqe;
527 
528 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
529 		/* flags field will always remain 0 */
530 
531 		wqe += sizeof (struct mthca_next_seg);
532 
533 		if (unlikely(wr->num_sge > srq->max_gs)) {
534 			err = -EINVAL;
535 			*bad_wr = wr;
536 			srq->last = prev_wqe;
537 			break;
538 		}
539 
540 		for (i = 0; i < wr->num_sge; ++i) {
541 			mthca_set_data_seg(wqe, wr->sg_list + i);
542 			wqe += sizeof (struct mthca_data_seg);
543 		}
544 
545 		if (i < srq->max_gs)
546 			mthca_set_data_seg_inval(wqe);
547 
548 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
549 			cpu_to_be32(MTHCA_NEXT_DBD);
550 
551 		srq->wrid[ind]  = wr->wr_id;
552 		srq->first_free = next_ind;
553 
554 		++nreq;
555 		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
556 			nreq = 0;
557 
558 			/*
559 			 * Make sure that descriptors are written
560 			 * before doorbell is rung.
561 			 */
562 			wmb();
563 
564 			mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
565 				      dev->kar + MTHCA_RECEIVE_DOORBELL,
566 				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
567 
568 			first_ind = srq->first_free;
569 		}
570 	}
571 
572 	if (likely(nreq)) {
573 		/*
574 		 * Make sure that descriptors are written before
575 		 * doorbell is rung.
576 		 */
577 		wmb();
578 
579 		mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
580 			      dev->kar + MTHCA_RECEIVE_DOORBELL,
581 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
582 	}
583 
584 	/*
585 	 * Make sure doorbells don't leak out of SRQ spinlock and
586 	 * reach the HCA out of order:
587 	 */
588 	mmiowb();
589 
590 	spin_unlock_irqrestore(&srq->lock, flags);
591 	return err;
592 }
593 
594 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
595 			      struct ib_recv_wr **bad_wr)
596 {
597 	struct mthca_dev *dev = to_mdev(ibsrq->device);
598 	struct mthca_srq *srq = to_msrq(ibsrq);
599 	unsigned long flags;
600 	int err = 0;
601 	int ind;
602 	int next_ind;
603 	int nreq;
604 	int i;
605 	void *wqe;
606 
607 	spin_lock_irqsave(&srq->lock, flags);
608 
609 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
610 		ind       = srq->first_free;
611 		wqe       = get_wqe(srq, ind);
612 		next_ind  = *wqe_to_link(wqe);
613 
614 		if (unlikely(next_ind < 0)) {
615 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
616 			err = -ENOMEM;
617 			*bad_wr = wr;
618 			break;
619 		}
620 
621 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
622 		/* flags field will always remain 0 */
623 
624 		wqe += sizeof (struct mthca_next_seg);
625 
626 		if (unlikely(wr->num_sge > srq->max_gs)) {
627 			err = -EINVAL;
628 			*bad_wr = wr;
629 			break;
630 		}
631 
632 		for (i = 0; i < wr->num_sge; ++i) {
633 			mthca_set_data_seg(wqe, wr->sg_list + i);
634 			wqe += sizeof (struct mthca_data_seg);
635 		}
636 
637 		if (i < srq->max_gs)
638 			mthca_set_data_seg_inval(wqe);
639 
640 		srq->wrid[ind]  = wr->wr_id;
641 		srq->first_free = next_ind;
642 	}
643 
644 	if (likely(nreq)) {
645 		srq->counter += nreq;
646 
647 		/*
648 		 * Make sure that descriptors are written before
649 		 * we write doorbell record.
650 		 */
651 		wmb();
652 		*srq->db = cpu_to_be32(srq->counter);
653 	}
654 
655 	spin_unlock_irqrestore(&srq->lock, flags);
656 	return err;
657 }
658 
659 int mthca_max_srq_sge(struct mthca_dev *dev)
660 {
661 	if (mthca_is_memfree(dev))
662 		return dev->limits.max_sg;
663 
664 	/*
665 	 * SRQ allocations are based on powers of 2 for Tavor,
666 	 * (although they only need to be multiples of 16 bytes).
667 	 *
668 	 * Therefore, we need to base the max number of sg entries on
669 	 * the largest power of 2 descriptor size that is <= to the
670 	 * actual max WQE descriptor size, rather than return the
671 	 * max_sg value given by the firmware (which is based on WQE
672 	 * sizes as multiples of 16, not powers of 2).
673 	 *
674 	 * If SRQ implementation is changed for Tavor to be based on
675 	 * multiples of 16, the calculation below can be deleted and
676 	 * the FW max_sg value returned.
677 	 */
678 	return min_t(int, dev->limits.max_sg,
679 		     ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
680 		      sizeof (struct mthca_next_seg)) /
681 		     sizeof (struct mthca_data_seg));
682 }
683 
684 int mthca_init_srq_table(struct mthca_dev *dev)
685 {
686 	int err;
687 
688 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
689 		return 0;
690 
691 	spin_lock_init(&dev->srq_table.lock);
692 
693 	err = mthca_alloc_init(&dev->srq_table.alloc,
694 			       dev->limits.num_srqs,
695 			       dev->limits.num_srqs - 1,
696 			       dev->limits.reserved_srqs);
697 	if (err)
698 		return err;
699 
700 	err = mthca_array_init(&dev->srq_table.srq,
701 			       dev->limits.num_srqs);
702 	if (err)
703 		mthca_alloc_cleanup(&dev->srq_table.alloc);
704 
705 	return err;
706 }
707 
708 void mthca_cleanup_srq_table(struct mthca_dev *dev)
709 {
710 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
711 		return;
712 
713 	mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
714 	mthca_alloc_cleanup(&dev->srq_table.alloc);
715 }
716