1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ 33 */ 34 35 #include <linux/slab.h> 36 #include <linux/string.h> 37 #include <linux/sched.h> 38 39 #include <asm/io.h> 40 41 #include "mthca_dev.h" 42 #include "mthca_cmd.h" 43 #include "mthca_memfree.h" 44 #include "mthca_wqe.h" 45 46 enum { 47 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE 48 }; 49 50 struct mthca_tavor_srq_context { 51 __be64 wqe_base_ds; /* low 6 bits is descriptor size */ 52 __be32 state_pd; 53 __be32 lkey; 54 __be32 uar; 55 __be16 limit_watermark; 56 __be16 wqe_cnt; 57 u32 reserved[2]; 58 }; 59 60 struct mthca_arbel_srq_context { 61 __be32 state_logsize_srqn; 62 __be32 lkey; 63 __be32 db_index; 64 __be32 logstride_usrpage; 65 __be64 wqe_base; 66 __be32 eq_pd; 67 __be16 limit_watermark; 68 __be16 wqe_cnt; 69 u16 reserved1; 70 __be16 wqe_counter; 71 u32 reserved2[3]; 72 }; 73 74 static void *get_wqe(struct mthca_srq *srq, int n) 75 { 76 if (srq->is_direct) 77 return srq->queue.direct.buf + (n << srq->wqe_shift); 78 else 79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + 80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); 81 } 82 83 /* 84 * Return a pointer to the location within a WQE that we're using as a 85 * link when the WQE is in the free list. We use the imm field 86 * because in the Tavor case, posting a WQE may overwrite the next 87 * segment of the previous WQE, but a receive WQE will never touch the 88 * imm field. This avoids corrupting our free list if the previous 89 * WQE has already completed and been put on the free list when we 90 * post the next WQE. 91 */ 92 static inline int *wqe_to_link(void *wqe) 93 { 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); 95 } 96 97 static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 98 struct mthca_pd *pd, 99 struct mthca_srq *srq, 100 struct mthca_tavor_srq_context *context) 101 { 102 memset(context, 0, sizeof *context); 103 104 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); 105 context->state_pd = cpu_to_be32(pd->pd_num); 106 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 107 108 if (pd->ibpd.uobject) 109 context->uar = 110 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 111 else 112 context->uar = cpu_to_be32(dev->driver_uar.index); 113 } 114 115 static void mthca_arbel_init_srq_context(struct mthca_dev *dev, 116 struct mthca_pd *pd, 117 struct mthca_srq *srq, 118 struct mthca_arbel_srq_context *context) 119 { 120 int logsize, max; 121 122 memset(context, 0, sizeof *context); 123 124 /* 125 * Put max in a temporary variable to work around gcc bug 126 * triggered by ilog2() on sparc64. 127 */ 128 max = srq->max; 129 logsize = ilog2(max); 130 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 131 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 132 context->db_index = cpu_to_be32(srq->db_index); 133 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); 134 if (pd->ibpd.uobject) 135 context->logstride_usrpage |= 136 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 137 else 138 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); 139 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); 140 } 141 142 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) 143 { 144 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, 145 srq->is_direct, &srq->mr); 146 kfree(srq->wrid); 147 } 148 149 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, 150 struct mthca_srq *srq) 151 { 152 struct mthca_data_seg *scatter; 153 void *wqe; 154 int err; 155 int i; 156 157 if (pd->ibpd.uobject) 158 return 0; 159 160 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); 161 if (!srq->wrid) 162 return -ENOMEM; 163 164 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, 165 MTHCA_MAX_DIRECT_SRQ_SIZE, 166 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); 167 if (err) { 168 kfree(srq->wrid); 169 return err; 170 } 171 172 /* 173 * Now initialize the SRQ buffer so that all of the WQEs are 174 * linked into the list of free WQEs. In addition, set the 175 * scatter list L_Keys to the sentry value of 0x100. 176 */ 177 for (i = 0; i < srq->max; ++i) { 178 wqe = get_wqe(srq, i); 179 180 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 181 182 for (scatter = wqe + sizeof (struct mthca_next_seg); 183 (void *) scatter < wqe + (1 << srq->wqe_shift); 184 ++scatter) 185 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 186 } 187 188 srq->last = get_wqe(srq, srq->max - 1); 189 190 return 0; 191 } 192 193 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 194 struct ib_srq_attr *attr, struct mthca_srq *srq) 195 { 196 struct mthca_mailbox *mailbox; 197 u8 status; 198 int ds; 199 int err; 200 201 /* Sanity check SRQ size before proceeding */ 202 if (attr->max_wr > dev->limits.max_srq_wqes || 203 attr->max_sge > dev->limits.max_srq_sge) 204 return -EINVAL; 205 206 srq->max = attr->max_wr; 207 srq->max_gs = attr->max_sge; 208 srq->counter = 0; 209 210 if (mthca_is_memfree(dev)) 211 srq->max = roundup_pow_of_two(srq->max + 1); 212 else 213 srq->max = srq->max + 1; 214 215 ds = max(64UL, 216 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 217 srq->max_gs * sizeof (struct mthca_data_seg))); 218 219 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) 220 return -EINVAL; 221 222 srq->wqe_shift = ilog2(ds); 223 224 srq->srqn = mthca_alloc(&dev->srq_table.alloc); 225 if (srq->srqn == -1) 226 return -ENOMEM; 227 228 if (mthca_is_memfree(dev)) { 229 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); 230 if (err) 231 goto err_out; 232 233 if (!pd->ibpd.uobject) { 234 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, 235 srq->srqn, &srq->db); 236 if (srq->db_index < 0) { 237 err = -ENOMEM; 238 goto err_out_icm; 239 } 240 } 241 } 242 243 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 244 if (IS_ERR(mailbox)) { 245 err = PTR_ERR(mailbox); 246 goto err_out_db; 247 } 248 249 err = mthca_alloc_srq_buf(dev, pd, srq); 250 if (err) 251 goto err_out_mailbox; 252 253 spin_lock_init(&srq->lock); 254 srq->refcount = 1; 255 init_waitqueue_head(&srq->wait); 256 mutex_init(&srq->mutex); 257 258 if (mthca_is_memfree(dev)) 259 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 260 else 261 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 262 263 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 264 265 if (err) { 266 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 267 goto err_out_free_buf; 268 } 269 if (status) { 270 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 271 status); 272 err = -EINVAL; 273 goto err_out_free_buf; 274 } 275 276 spin_lock_irq(&dev->srq_table.lock); 277 if (mthca_array_set(&dev->srq_table.srq, 278 srq->srqn & (dev->limits.num_srqs - 1), 279 srq)) { 280 spin_unlock_irq(&dev->srq_table.lock); 281 goto err_out_free_srq; 282 } 283 spin_unlock_irq(&dev->srq_table.lock); 284 285 mthca_free_mailbox(dev, mailbox); 286 287 srq->first_free = 0; 288 srq->last_free = srq->max - 1; 289 290 attr->max_wr = srq->max - 1; 291 attr->max_sge = srq->max_gs; 292 293 return 0; 294 295 err_out_free_srq: 296 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 297 if (err) 298 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 299 else if (status) 300 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 301 302 err_out_free_buf: 303 if (!pd->ibpd.uobject) 304 mthca_free_srq_buf(dev, srq); 305 306 err_out_mailbox: 307 mthca_free_mailbox(dev, mailbox); 308 309 err_out_db: 310 if (!pd->ibpd.uobject && mthca_is_memfree(dev)) 311 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 312 313 err_out_icm: 314 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 315 316 err_out: 317 mthca_free(&dev->srq_table.alloc, srq->srqn); 318 319 return err; 320 } 321 322 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) 323 { 324 int c; 325 326 spin_lock_irq(&dev->srq_table.lock); 327 c = srq->refcount; 328 spin_unlock_irq(&dev->srq_table.lock); 329 330 return c; 331 } 332 333 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 334 { 335 struct mthca_mailbox *mailbox; 336 int err; 337 u8 status; 338 339 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 340 if (IS_ERR(mailbox)) { 341 mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); 342 return; 343 } 344 345 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 346 if (err) 347 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 348 else if (status) 349 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 350 351 spin_lock_irq(&dev->srq_table.lock); 352 mthca_array_clear(&dev->srq_table.srq, 353 srq->srqn & (dev->limits.num_srqs - 1)); 354 --srq->refcount; 355 spin_unlock_irq(&dev->srq_table.lock); 356 357 wait_event(srq->wait, !get_srq_refcount(dev, srq)); 358 359 if (!srq->ibsrq.uobject) { 360 mthca_free_srq_buf(dev, srq); 361 if (mthca_is_memfree(dev)) 362 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 363 } 364 365 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 366 mthca_free(&dev->srq_table.alloc, srq->srqn); 367 mthca_free_mailbox(dev, mailbox); 368 } 369 370 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 371 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 372 { 373 struct mthca_dev *dev = to_mdev(ibsrq->device); 374 struct mthca_srq *srq = to_msrq(ibsrq); 375 int ret; 376 u8 status; 377 378 /* We don't support resizing SRQs (yet?) */ 379 if (attr_mask & IB_SRQ_MAX_WR) 380 return -EINVAL; 381 382 if (attr_mask & IB_SRQ_LIMIT) { 383 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; 384 if (attr->srq_limit > max_wr) 385 return -EINVAL; 386 387 mutex_lock(&srq->mutex); 388 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 389 mutex_unlock(&srq->mutex); 390 391 if (ret) 392 return ret; 393 if (status) 394 return -EINVAL; 395 } 396 397 return 0; 398 } 399 400 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 401 { 402 struct mthca_dev *dev = to_mdev(ibsrq->device); 403 struct mthca_srq *srq = to_msrq(ibsrq); 404 struct mthca_mailbox *mailbox; 405 struct mthca_arbel_srq_context *arbel_ctx; 406 struct mthca_tavor_srq_context *tavor_ctx; 407 u8 status; 408 int err; 409 410 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 411 if (IS_ERR(mailbox)) 412 return PTR_ERR(mailbox); 413 414 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); 415 if (err) 416 goto out; 417 418 if (mthca_is_memfree(dev)) { 419 arbel_ctx = mailbox->buf; 420 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark); 421 } else { 422 tavor_ctx = mailbox->buf; 423 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark); 424 } 425 426 srq_attr->max_wr = srq->max - 1; 427 srq_attr->max_sge = srq->max_gs; 428 429 out: 430 mthca_free_mailbox(dev, mailbox); 431 432 return err; 433 } 434 435 void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 436 enum ib_event_type event_type) 437 { 438 struct mthca_srq *srq; 439 struct ib_event event; 440 441 spin_lock(&dev->srq_table.lock); 442 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 443 if (srq) 444 ++srq->refcount; 445 spin_unlock(&dev->srq_table.lock); 446 447 if (!srq) { 448 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 449 return; 450 } 451 452 if (!srq->ibsrq.event_handler) 453 goto out; 454 455 event.device = &dev->ib_dev; 456 event.event = event_type; 457 event.element.srq = &srq->ibsrq; 458 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 459 460 out: 461 spin_lock(&dev->srq_table.lock); 462 if (!--srq->refcount) 463 wake_up(&srq->wait); 464 spin_unlock(&dev->srq_table.lock); 465 } 466 467 /* 468 * This function must be called with IRQs disabled. 469 */ 470 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 471 { 472 int ind; 473 474 ind = wqe_addr >> srq->wqe_shift; 475 476 spin_lock(&srq->lock); 477 478 if (likely(srq->first_free >= 0)) 479 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 480 else 481 srq->first_free = ind; 482 483 *wqe_to_link(get_wqe(srq, ind)) = -1; 484 srq->last_free = ind; 485 486 spin_unlock(&srq->lock); 487 } 488 489 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 490 struct ib_recv_wr **bad_wr) 491 { 492 struct mthca_dev *dev = to_mdev(ibsrq->device); 493 struct mthca_srq *srq = to_msrq(ibsrq); 494 __be32 doorbell[2]; 495 unsigned long flags; 496 int err = 0; 497 int first_ind; 498 int ind; 499 int next_ind; 500 int nreq; 501 int i; 502 void *wqe; 503 void *prev_wqe; 504 505 spin_lock_irqsave(&srq->lock, flags); 506 507 first_ind = srq->first_free; 508 509 for (nreq = 0; wr; wr = wr->next) { 510 ind = srq->first_free; 511 512 if (ind < 0) { 513 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 514 err = -ENOMEM; 515 *bad_wr = wr; 516 break; 517 } 518 519 wqe = get_wqe(srq, ind); 520 next_ind = *wqe_to_link(wqe); 521 522 if (next_ind < 0) { 523 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 524 err = -ENOMEM; 525 *bad_wr = wr; 526 break; 527 } 528 529 prev_wqe = srq->last; 530 srq->last = wqe; 531 532 ((struct mthca_next_seg *) wqe)->nda_op = 0; 533 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 534 /* flags field will always remain 0 */ 535 536 wqe += sizeof (struct mthca_next_seg); 537 538 if (unlikely(wr->num_sge > srq->max_gs)) { 539 err = -EINVAL; 540 *bad_wr = wr; 541 srq->last = prev_wqe; 542 break; 543 } 544 545 for (i = 0; i < wr->num_sge; ++i) { 546 mthca_set_data_seg(wqe, wr->sg_list + i); 547 wqe += sizeof (struct mthca_data_seg); 548 } 549 550 if (i < srq->max_gs) 551 mthca_set_data_seg_inval(wqe); 552 553 ((struct mthca_next_seg *) prev_wqe)->nda_op = 554 cpu_to_be32((ind << srq->wqe_shift) | 1); 555 wmb(); 556 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 557 cpu_to_be32(MTHCA_NEXT_DBD); 558 559 srq->wrid[ind] = wr->wr_id; 560 srq->first_free = next_ind; 561 562 ++nreq; 563 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 564 nreq = 0; 565 566 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 567 doorbell[1] = cpu_to_be32(srq->srqn << 8); 568 569 /* 570 * Make sure that descriptors are written 571 * before doorbell is rung. 572 */ 573 wmb(); 574 575 mthca_write64(doorbell, 576 dev->kar + MTHCA_RECEIVE_DOORBELL, 577 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 578 579 first_ind = srq->first_free; 580 } 581 } 582 583 if (likely(nreq)) { 584 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 585 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); 586 587 /* 588 * Make sure that descriptors are written before 589 * doorbell is rung. 590 */ 591 wmb(); 592 593 mthca_write64(doorbell, 594 dev->kar + MTHCA_RECEIVE_DOORBELL, 595 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 596 } 597 598 /* 599 * Make sure doorbells don't leak out of SRQ spinlock and 600 * reach the HCA out of order: 601 */ 602 mmiowb(); 603 604 spin_unlock_irqrestore(&srq->lock, flags); 605 return err; 606 } 607 608 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 609 struct ib_recv_wr **bad_wr) 610 { 611 struct mthca_dev *dev = to_mdev(ibsrq->device); 612 struct mthca_srq *srq = to_msrq(ibsrq); 613 unsigned long flags; 614 int err = 0; 615 int ind; 616 int next_ind; 617 int nreq; 618 int i; 619 void *wqe; 620 621 spin_lock_irqsave(&srq->lock, flags); 622 623 for (nreq = 0; wr; ++nreq, wr = wr->next) { 624 ind = srq->first_free; 625 626 if (ind < 0) { 627 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 628 err = -ENOMEM; 629 *bad_wr = wr; 630 break; 631 } 632 633 wqe = get_wqe(srq, ind); 634 next_ind = *wqe_to_link(wqe); 635 636 if (next_ind < 0) { 637 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 638 err = -ENOMEM; 639 *bad_wr = wr; 640 break; 641 } 642 643 ((struct mthca_next_seg *) wqe)->nda_op = 644 cpu_to_be32((next_ind << srq->wqe_shift) | 1); 645 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 646 /* flags field will always remain 0 */ 647 648 wqe += sizeof (struct mthca_next_seg); 649 650 if (unlikely(wr->num_sge > srq->max_gs)) { 651 err = -EINVAL; 652 *bad_wr = wr; 653 break; 654 } 655 656 for (i = 0; i < wr->num_sge; ++i) { 657 mthca_set_data_seg(wqe, wr->sg_list + i); 658 wqe += sizeof (struct mthca_data_seg); 659 } 660 661 if (i < srq->max_gs) 662 mthca_set_data_seg_inval(wqe); 663 664 srq->wrid[ind] = wr->wr_id; 665 srq->first_free = next_ind; 666 } 667 668 if (likely(nreq)) { 669 srq->counter += nreq; 670 671 /* 672 * Make sure that descriptors are written before 673 * we write doorbell record. 674 */ 675 wmb(); 676 *srq->db = cpu_to_be32(srq->counter); 677 } 678 679 spin_unlock_irqrestore(&srq->lock, flags); 680 return err; 681 } 682 683 int mthca_max_srq_sge(struct mthca_dev *dev) 684 { 685 if (mthca_is_memfree(dev)) 686 return dev->limits.max_sg; 687 688 /* 689 * SRQ allocations are based on powers of 2 for Tavor, 690 * (although they only need to be multiples of 16 bytes). 691 * 692 * Therefore, we need to base the max number of sg entries on 693 * the largest power of 2 descriptor size that is <= to the 694 * actual max WQE descriptor size, rather than return the 695 * max_sg value given by the firmware (which is based on WQE 696 * sizes as multiples of 16, not powers of 2). 697 * 698 * If SRQ implementation is changed for Tavor to be based on 699 * multiples of 16, the calculation below can be deleted and 700 * the FW max_sg value returned. 701 */ 702 return min_t(int, dev->limits.max_sg, 703 ((1 << (fls(dev->limits.max_desc_sz) - 1)) - 704 sizeof (struct mthca_next_seg)) / 705 sizeof (struct mthca_data_seg)); 706 } 707 708 int mthca_init_srq_table(struct mthca_dev *dev) 709 { 710 int err; 711 712 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 713 return 0; 714 715 spin_lock_init(&dev->srq_table.lock); 716 717 err = mthca_alloc_init(&dev->srq_table.alloc, 718 dev->limits.num_srqs, 719 dev->limits.num_srqs - 1, 720 dev->limits.reserved_srqs); 721 if (err) 722 return err; 723 724 err = mthca_array_init(&dev->srq_table.srq, 725 dev->limits.num_srqs); 726 if (err) 727 mthca_alloc_cleanup(&dev->srq_table.alloc); 728 729 return err; 730 } 731 732 void mthca_cleanup_srq_table(struct mthca_dev *dev) 733 { 734 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 735 return; 736 737 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); 738 mthca_alloc_cleanup(&dev->srq_table.alloc); 739 } 740