1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/hash.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/vmalloc.h> 52 #include <linux/slab.h> 53 #include <rdma/ib_verbs.h> 54 #include "qp.h" 55 #include "vt.h" 56 #include "trace.h" 57 58 /* 59 * Note that it is OK to post send work requests in the SQE and ERR 60 * states; rvt_do_send() will process them and generate error 61 * completions as per IB 1.2 C10-96. 62 */ 63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { 64 [IB_QPS_RESET] = 0, 65 [IB_QPS_INIT] = RVT_POST_RECV_OK, 66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, 67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | 69 RVT_PROCESS_NEXT_SEND_OK, 70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, 72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 73 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | 75 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 76 }; 77 EXPORT_SYMBOL(ib_rvt_state_ops); 78 79 static void get_map_page(struct rvt_qpn_table *qpt, 80 struct rvt_qpn_map *map, 81 gfp_t gfp) 82 { 83 unsigned long page = get_zeroed_page(gfp); 84 85 /* 86 * Free the page if someone raced with us installing it. 87 */ 88 89 spin_lock(&qpt->lock); 90 if (map->page) 91 free_page(page); 92 else 93 map->page = (void *)page; 94 spin_unlock(&qpt->lock); 95 } 96 97 /** 98 * init_qpn_table - initialize the QP number table for a device 99 * @qpt: the QPN table 100 */ 101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) 102 { 103 u32 offset, i; 104 struct rvt_qpn_map *map; 105 int ret = 0; 106 107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) 108 return -EINVAL; 109 110 spin_lock_init(&qpt->lock); 111 112 qpt->last = rdi->dparms.qpn_start; 113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; 114 115 /* 116 * Drivers may want some QPs beyond what we need for verbs let them use 117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps 118 * for those. The reserved range must be *after* the range which verbs 119 * will pick from. 120 */ 121 122 /* Figure out number of bit maps needed before reserved range */ 123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; 124 125 /* This should always be zero */ 126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; 127 128 /* Starting with the first reserved bit map */ 129 map = &qpt->map[qpt->nmaps]; 130 131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", 132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 134 if (!map->page) { 135 get_map_page(qpt, map, GFP_KERNEL); 136 if (!map->page) { 137 ret = -ENOMEM; 138 break; 139 } 140 } 141 set_bit(offset, map->page); 142 offset++; 143 if (offset == RVT_BITS_PER_PAGE) { 144 /* next page */ 145 qpt->nmaps++; 146 map++; 147 offset = 0; 148 } 149 } 150 return ret; 151 } 152 153 /** 154 * free_qpn_table - free the QP number table for a device 155 * @qpt: the QPN table 156 */ 157 static void free_qpn_table(struct rvt_qpn_table *qpt) 158 { 159 int i; 160 161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 162 free_page((unsigned long)qpt->map[i].page); 163 } 164 165 int rvt_driver_qp_init(struct rvt_dev_info *rdi) 166 { 167 int i; 168 int ret = -ENOMEM; 169 170 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) { 171 rvt_pr_info(rdi, "Driver is doing QP init.\n"); 172 return 0; 173 } 174 175 if (!rdi->dparms.qp_table_size) 176 return -EINVAL; 177 178 /* 179 * If driver is not doing any QP allocation then make sure it is 180 * providing the necessary QP functions. 181 */ 182 if (!rdi->driver_f.free_all_qps || 183 !rdi->driver_f.qp_priv_alloc || 184 !rdi->driver_f.qp_priv_free || 185 !rdi->driver_f.notify_qp_reset) 186 return -EINVAL; 187 188 /* allocate parent object */ 189 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, 190 rdi->dparms.node); 191 if (!rdi->qp_dev) 192 return -ENOMEM; 193 194 /* allocate hash table */ 195 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; 196 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); 197 rdi->qp_dev->qp_table = 198 kmalloc_node(rdi->qp_dev->qp_table_size * 199 sizeof(*rdi->qp_dev->qp_table), 200 GFP_KERNEL, rdi->dparms.node); 201 if (!rdi->qp_dev->qp_table) 202 goto no_qp_table; 203 204 for (i = 0; i < rdi->qp_dev->qp_table_size; i++) 205 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); 206 207 spin_lock_init(&rdi->qp_dev->qpt_lock); 208 209 /* initialize qpn map */ 210 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) 211 goto fail_table; 212 213 spin_lock_init(&rdi->n_qps_lock); 214 215 return 0; 216 217 fail_table: 218 kfree(rdi->qp_dev->qp_table); 219 free_qpn_table(&rdi->qp_dev->qpn_table); 220 221 no_qp_table: 222 kfree(rdi->qp_dev); 223 224 return ret; 225 } 226 227 /** 228 * free_all_qps - check for QPs still in use 229 * @qpt: the QP table to empty 230 * 231 * There should not be any QPs still in use. 232 * Free memory for table. 233 */ 234 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) 235 { 236 unsigned long flags; 237 struct rvt_qp *qp; 238 unsigned n, qp_inuse = 0; 239 spinlock_t *ql; /* work around too long line below */ 240 241 if (rdi->driver_f.free_all_qps) 242 qp_inuse = rdi->driver_f.free_all_qps(rdi); 243 244 qp_inuse += rvt_mcast_tree_empty(rdi); 245 246 if (!rdi->qp_dev) 247 return qp_inuse; 248 249 ql = &rdi->qp_dev->qpt_lock; 250 spin_lock_irqsave(ql, flags); 251 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { 252 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], 253 lockdep_is_held(ql)); 254 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); 255 256 for (; qp; qp = rcu_dereference_protected(qp->next, 257 lockdep_is_held(ql))) 258 qp_inuse++; 259 } 260 spin_unlock_irqrestore(ql, flags); 261 synchronize_rcu(); 262 return qp_inuse; 263 } 264 265 void rvt_qp_exit(struct rvt_dev_info *rdi) 266 { 267 u32 qps_inuse = rvt_free_all_qps(rdi); 268 269 if (qps_inuse) 270 rvt_pr_err(rdi, "QP memory leak! %u still in use\n", 271 qps_inuse); 272 if (!rdi->qp_dev) 273 return; 274 275 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) 276 return; /* driver did the qp init so nothing else to do */ 277 278 kfree(rdi->qp_dev->qp_table); 279 free_qpn_table(&rdi->qp_dev->qpn_table); 280 kfree(rdi->qp_dev); 281 } 282 283 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 284 struct rvt_qpn_map *map, unsigned off) 285 { 286 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 287 } 288 289 /** 290 * alloc_qpn - Allocate the next available qpn or zero/one for QP type 291 * IB_QPT_SMI/IB_QPT_GSI 292 *@rdi: rvt device info structure 293 *@qpt: queue pair number table pointer 294 *@port_num: IB port number, 1 based, comes from core 295 * 296 * Return: The queue pair number 297 */ 298 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 299 enum ib_qp_type type, u8 port_num, gfp_t gfp) 300 { 301 u32 i, offset, max_scan, qpn; 302 struct rvt_qpn_map *map; 303 u32 ret; 304 305 if (rdi->driver_f.alloc_qpn) 306 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); 307 308 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 309 unsigned n; 310 311 ret = type == IB_QPT_GSI; 312 n = 1 << (ret + 2 * (port_num - 1)); 313 spin_lock(&qpt->lock); 314 if (qpt->flags & n) 315 ret = -EINVAL; 316 else 317 qpt->flags |= n; 318 spin_unlock(&qpt->lock); 319 goto bail; 320 } 321 322 qpn = qpt->last + qpt->incr; 323 if (qpn >= RVT_QPN_MAX) 324 qpn = qpt->incr | ((qpt->last & 1) ^ 1); 325 /* offset carries bit 0 */ 326 offset = qpn & RVT_BITS_PER_PAGE_MASK; 327 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; 328 max_scan = qpt->nmaps - !offset; 329 for (i = 0;;) { 330 if (unlikely(!map->page)) { 331 get_map_page(qpt, map, gfp); 332 if (unlikely(!map->page)) 333 break; 334 } 335 do { 336 if (!test_and_set_bit(offset, map->page)) { 337 qpt->last = qpn; 338 ret = qpn; 339 goto bail; 340 } 341 offset += qpt->incr; 342 /* 343 * This qpn might be bogus if offset >= BITS_PER_PAGE. 344 * That is OK. It gets re-assigned below 345 */ 346 qpn = mk_qpn(qpt, map, offset); 347 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); 348 /* 349 * In order to keep the number of pages allocated to a 350 * minimum, we scan the all existing pages before increasing 351 * the size of the bitmap table. 352 */ 353 if (++i > max_scan) { 354 if (qpt->nmaps == RVT_QPNMAP_ENTRIES) 355 break; 356 map = &qpt->map[qpt->nmaps++]; 357 /* start at incr with current bit 0 */ 358 offset = qpt->incr | (offset & 1); 359 } else if (map < &qpt->map[qpt->nmaps]) { 360 ++map; 361 /* start at incr with current bit 0 */ 362 offset = qpt->incr | (offset & 1); 363 } else { 364 map = &qpt->map[0]; 365 /* wrap to first map page, invert bit 0 */ 366 offset = qpt->incr | ((offset & 1) ^ 1); 367 } 368 /* there can be no bits at shift and below */ 369 WARN_ON(offset & (rdi->dparms.qos_shift - 1)); 370 qpn = mk_qpn(qpt, map, offset); 371 } 372 373 ret = -ENOMEM; 374 375 bail: 376 return ret; 377 } 378 379 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 380 { 381 struct rvt_qpn_map *map; 382 383 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 384 if (map->page) 385 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 386 } 387 388 /** 389 * reset_qp - initialize the QP state to the reset state 390 * @qp: the QP to reset 391 * @type: the QP type 392 * r and s lock are required to be held by the caller 393 */ 394 void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 395 enum ib_qp_type type) 396 { 397 if (qp->state != IB_QPS_RESET) { 398 qp->state = IB_QPS_RESET; 399 400 /* Let drivers flush their waitlist */ 401 rdi->driver_f.flush_qp_waiters(qp); 402 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 403 spin_unlock(&qp->s_lock); 404 spin_unlock_irq(&qp->r_lock); 405 406 /* Stop the send queue and the retry timer */ 407 rdi->driver_f.stop_send_queue(qp); 408 409 /* Wait for things to stop */ 410 rdi->driver_f.quiesce_qp(qp); 411 412 /* take qp out the hash and wait for it to be unused */ 413 rvt_remove_qp(rdi, qp); 414 wait_event(qp->wait, !atomic_read(&qp->refcount)); 415 416 /* grab the lock b/c it was locked at call time */ 417 spin_lock_irq(&qp->r_lock); 418 spin_lock(&qp->s_lock); 419 420 rvt_clear_mr_refs(qp, 1); 421 } 422 423 /* 424 * Let the driver do any tear down it needs to for a qp 425 * that has been reset 426 */ 427 rdi->driver_f.notify_qp_reset(qp); 428 429 qp->remote_qpn = 0; 430 qp->qkey = 0; 431 qp->qp_access_flags = 0; 432 qp->s_flags &= RVT_S_SIGNAL_REQ_WR; 433 qp->s_hdrwords = 0; 434 qp->s_wqe = NULL; 435 qp->s_draining = 0; 436 qp->s_next_psn = 0; 437 qp->s_last_psn = 0; 438 qp->s_sending_psn = 0; 439 qp->s_sending_hpsn = 0; 440 qp->s_psn = 0; 441 qp->r_psn = 0; 442 qp->r_msn = 0; 443 if (type == IB_QPT_RC) { 444 qp->s_state = IB_OPCODE_RC_SEND_LAST; 445 qp->r_state = IB_OPCODE_RC_SEND_LAST; 446 } else { 447 qp->s_state = IB_OPCODE_UC_SEND_LAST; 448 qp->r_state = IB_OPCODE_UC_SEND_LAST; 449 } 450 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 451 qp->r_nak_state = 0; 452 qp->r_aflags = 0; 453 qp->r_flags = 0; 454 qp->s_head = 0; 455 qp->s_tail = 0; 456 qp->s_cur = 0; 457 qp->s_acked = 0; 458 qp->s_last = 0; 459 qp->s_ssn = 1; 460 qp->s_lsn = 0; 461 qp->s_mig_state = IB_MIG_MIGRATED; 462 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 463 qp->r_head_ack_queue = 0; 464 qp->s_tail_ack_queue = 0; 465 qp->s_num_rd_atomic = 0; 466 if (qp->r_rq.wq) { 467 qp->r_rq.wq->head = 0; 468 qp->r_rq.wq->tail = 0; 469 } 470 qp->r_sge.num_sge = 0; 471 } 472 EXPORT_SYMBOL(rvt_reset_qp); 473 474 /** 475 * rvt_create_qp - create a queue pair for a device 476 * @ibpd: the protection domain who's device we create the queue pair for 477 * @init_attr: the attributes of the queue pair 478 * @udata: user data for libibverbs.so 479 * 480 * Queue pair creation is mostly an rvt issue. However, drivers have their own 481 * unique idea of what queue pair numbers mean. For instance there is a reserved 482 * range for PSM. 483 * 484 * Returns the queue pair on success, otherwise returns an errno. 485 * 486 * Called by the ib_create_qp() core verbs function. 487 */ 488 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 489 struct ib_qp_init_attr *init_attr, 490 struct ib_udata *udata) 491 { 492 struct rvt_qp *qp; 493 int err; 494 struct rvt_swqe *swq = NULL; 495 size_t sz; 496 size_t sg_list_sz; 497 struct ib_qp *ret = ERR_PTR(-ENOMEM); 498 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 499 void *priv = NULL; 500 gfp_t gfp; 501 502 if (!rdi) 503 return ERR_PTR(-EINVAL); 504 505 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || 506 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 507 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 508 return ERR_PTR(-EINVAL); 509 510 /* GFP_NOIO is applicable to RC QP's only */ 511 512 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && 513 init_attr->qp_type != IB_QPT_RC) 514 return ERR_PTR(-EINVAL); 515 516 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? 517 GFP_NOIO : GFP_KERNEL; 518 519 /* Check receive queue parameters if no SRQ is specified. */ 520 if (!init_attr->srq) { 521 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || 522 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) 523 return ERR_PTR(-EINVAL); 524 525 if (init_attr->cap.max_send_sge + 526 init_attr->cap.max_send_wr + 527 init_attr->cap.max_recv_sge + 528 init_attr->cap.max_recv_wr == 0) 529 return ERR_PTR(-EINVAL); 530 } 531 532 switch (init_attr->qp_type) { 533 case IB_QPT_SMI: 534 case IB_QPT_GSI: 535 if (init_attr->port_num == 0 || 536 init_attr->port_num > ibpd->device->phys_port_cnt) 537 return ERR_PTR(-EINVAL); 538 case IB_QPT_UC: 539 case IB_QPT_RC: 540 case IB_QPT_UD: 541 sz = sizeof(struct rvt_sge) * 542 init_attr->cap.max_send_sge + 543 sizeof(struct rvt_swqe); 544 if (gfp == GFP_NOIO) 545 swq = __vmalloc( 546 (init_attr->cap.max_send_wr + 1) * sz, 547 gfp, PAGE_KERNEL); 548 else 549 swq = vmalloc_node( 550 (init_attr->cap.max_send_wr + 1) * sz, 551 rdi->dparms.node); 552 if (!swq) 553 return ERR_PTR(-ENOMEM); 554 555 sz = sizeof(*qp); 556 sg_list_sz = 0; 557 if (init_attr->srq) { 558 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); 559 560 if (srq->rq.max_sge > 1) 561 sg_list_sz = sizeof(*qp->r_sg_list) * 562 (srq->rq.max_sge - 1); 563 } else if (init_attr->cap.max_recv_sge > 1) 564 sg_list_sz = sizeof(*qp->r_sg_list) * 565 (init_attr->cap.max_recv_sge - 1); 566 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); 567 if (!qp) 568 goto bail_swq; 569 570 RCU_INIT_POINTER(qp->next, NULL); 571 572 /* 573 * Driver needs to set up it's private QP structure and do any 574 * initialization that is needed. 575 */ 576 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 577 if (!priv) 578 goto bail_qp; 579 qp->priv = priv; 580 qp->timeout_jiffies = 581 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 582 1000UL); 583 if (init_attr->srq) { 584 sz = 0; 585 } else { 586 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 587 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 588 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 589 sizeof(struct rvt_rwqe); 590 if (udata) 591 qp->r_rq.wq = vmalloc_user( 592 sizeof(struct rvt_rwq) + 593 qp->r_rq.size * sz); 594 else if (gfp == GFP_NOIO) 595 qp->r_rq.wq = __vmalloc( 596 sizeof(struct rvt_rwq) + 597 qp->r_rq.size * sz, 598 gfp, PAGE_KERNEL); 599 else 600 qp->r_rq.wq = vmalloc_node( 601 sizeof(struct rvt_rwq) + 602 qp->r_rq.size * sz, 603 rdi->dparms.node); 604 if (!qp->r_rq.wq) 605 goto bail_driver_priv; 606 } 607 608 /* 609 * ib_create_qp() will initialize qp->ibqp 610 * except for qp->ibqp.qp_num. 611 */ 612 spin_lock_init(&qp->r_lock); 613 spin_lock_init(&qp->s_lock); 614 spin_lock_init(&qp->r_rq.lock); 615 atomic_set(&qp->refcount, 0); 616 init_waitqueue_head(&qp->wait); 617 init_timer(&qp->s_timer); 618 qp->s_timer.data = (unsigned long)qp; 619 INIT_LIST_HEAD(&qp->rspwait); 620 qp->state = IB_QPS_RESET; 621 qp->s_wq = swq; 622 qp->s_size = init_attr->cap.max_send_wr + 1; 623 qp->s_max_sge = init_attr->cap.max_send_sge; 624 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 625 qp->s_flags = RVT_S_SIGNAL_REQ_WR; 626 627 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 628 init_attr->qp_type, 629 init_attr->port_num, gfp); 630 if (err < 0) { 631 ret = ERR_PTR(err); 632 goto bail_rq_wq; 633 } 634 qp->ibqp.qp_num = err; 635 qp->port_num = init_attr->port_num; 636 rvt_reset_qp(rdi, qp, init_attr->qp_type); 637 break; 638 639 default: 640 /* Don't support raw QPs */ 641 return ERR_PTR(-EINVAL); 642 } 643 644 init_attr->cap.max_inline_data = 0; 645 646 /* 647 * Return the address of the RWQ as the offset to mmap. 648 * See rvt_mmap() for details. 649 */ 650 if (udata && udata->outlen >= sizeof(__u64)) { 651 if (!qp->r_rq.wq) { 652 __u64 offset = 0; 653 654 err = ib_copy_to_udata(udata, &offset, 655 sizeof(offset)); 656 if (err) { 657 ret = ERR_PTR(err); 658 goto bail_qpn; 659 } 660 } else { 661 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 662 663 qp->ip = rvt_create_mmap_info(rdi, s, 664 ibpd->uobject->context, 665 qp->r_rq.wq); 666 if (!qp->ip) { 667 ret = ERR_PTR(-ENOMEM); 668 goto bail_qpn; 669 } 670 671 err = ib_copy_to_udata(udata, &qp->ip->offset, 672 sizeof(qp->ip->offset)); 673 if (err) { 674 ret = ERR_PTR(err); 675 goto bail_ip; 676 } 677 } 678 } 679 680 spin_lock(&rdi->n_qps_lock); 681 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 682 spin_unlock(&rdi->n_qps_lock); 683 ret = ERR_PTR(-ENOMEM); 684 goto bail_ip; 685 } 686 687 rdi->n_qps_allocated++; 688 /* 689 * Maintain a busy_jiffies variable that will be added to the timeout 690 * period in mod_retry_timer and add_retry_timer. This busy jiffies 691 * is scaled by the number of rc qps created for the device to reduce 692 * the number of timeouts occurring when there is a large number of 693 * qps. busy_jiffies is incremented every rc qp scaling interval. 694 * The scaling interval is selected based on extensive performance 695 * evaluation of targeted workloads. 696 */ 697 if (init_attr->qp_type == IB_QPT_RC) { 698 rdi->n_rc_qps++; 699 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; 700 } 701 spin_unlock(&rdi->n_qps_lock); 702 703 if (qp->ip) { 704 spin_lock_irq(&rdi->pending_lock); 705 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); 706 spin_unlock_irq(&rdi->pending_lock); 707 } 708 709 ret = &qp->ibqp; 710 711 /* 712 * We have our QP and its good, now keep track of what types of opcodes 713 * can be processed on this QP. We do this by keeping track of what the 714 * 3 high order bits of the opcode are. 715 */ 716 switch (init_attr->qp_type) { 717 case IB_QPT_SMI: 718 case IB_QPT_GSI: 719 case IB_QPT_UD: 720 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK; 721 break; 722 case IB_QPT_RC: 723 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK; 724 break; 725 case IB_QPT_UC: 726 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK; 727 break; 728 default: 729 ret = ERR_PTR(-EINVAL); 730 goto bail_ip; 731 } 732 733 return ret; 734 735 bail_ip: 736 kref_put(&qp->ip->ref, rvt_release_mmap_info); 737 738 bail_qpn: 739 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 740 741 bail_rq_wq: 742 vfree(qp->r_rq.wq); 743 744 bail_driver_priv: 745 rdi->driver_f.qp_priv_free(rdi, qp); 746 747 bail_qp: 748 kfree(qp); 749 750 bail_swq: 751 vfree(swq); 752 753 return ret; 754 } 755 756 void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) 757 { 758 unsigned n; 759 760 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 761 rvt_put_ss(&qp->s_rdma_read_sge); 762 763 rvt_put_ss(&qp->r_sge); 764 765 if (clr_sends) { 766 while (qp->s_last != qp->s_head) { 767 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); 768 unsigned i; 769 770 for (i = 0; i < wqe->wr.num_sge; i++) { 771 struct rvt_sge *sge = &wqe->sg_list[i]; 772 773 rvt_put_mr(sge->mr); 774 } 775 if (qp->ibqp.qp_type == IB_QPT_UD || 776 qp->ibqp.qp_type == IB_QPT_SMI || 777 qp->ibqp.qp_type == IB_QPT_GSI) 778 atomic_dec(&ibah_to_rvtah( 779 wqe->ud_wr.ah)->refcount); 780 if (++qp->s_last >= qp->s_size) 781 qp->s_last = 0; 782 } 783 if (qp->s_rdma_mr) { 784 rvt_put_mr(qp->s_rdma_mr); 785 qp->s_rdma_mr = NULL; 786 } 787 } 788 789 if (qp->ibqp.qp_type != IB_QPT_RC) 790 return; 791 792 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 793 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 794 795 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 796 e->rdma_sge.mr) { 797 rvt_put_mr(e->rdma_sge.mr); 798 e->rdma_sge.mr = NULL; 799 } 800 } 801 } 802 EXPORT_SYMBOL(rvt_clear_mr_refs); 803 804 /** 805 * rvt_error_qp - put a QP into the error state 806 * @qp: the QP to put into the error state 807 * @err: the receive completion error to signal if a RWQE is active 808 * 809 * Flushes both send and receive work queues. 810 * Returns true if last WQE event should be generated. 811 * The QP r_lock and s_lock should be held and interrupts disabled. 812 * If we are already in error state, just return. 813 */ 814 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 815 { 816 struct ib_wc wc; 817 int ret = 0; 818 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 819 820 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 821 goto bail; 822 823 qp->state = IB_QPS_ERR; 824 825 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 826 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 827 del_timer(&qp->s_timer); 828 } 829 830 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) 831 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; 832 833 rdi->driver_f.notify_error_qp(qp); 834 835 /* Schedule the sending tasklet to drain the send work queue. */ 836 if (qp->s_last != qp->s_head) 837 rdi->driver_f.schedule_send(qp); 838 839 rvt_clear_mr_refs(qp, 0); 840 841 memset(&wc, 0, sizeof(wc)); 842 wc.qp = &qp->ibqp; 843 wc.opcode = IB_WC_RECV; 844 845 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 846 wc.wr_id = qp->r_wr_id; 847 wc.status = err; 848 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 849 } 850 wc.status = IB_WC_WR_FLUSH_ERR; 851 852 if (qp->r_rq.wq) { 853 struct rvt_rwq *wq; 854 u32 head; 855 u32 tail; 856 857 spin_lock(&qp->r_rq.lock); 858 859 /* sanity check pointers before trusting them */ 860 wq = qp->r_rq.wq; 861 head = wq->head; 862 if (head >= qp->r_rq.size) 863 head = 0; 864 tail = wq->tail; 865 if (tail >= qp->r_rq.size) 866 tail = 0; 867 while (tail != head) { 868 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 869 if (++tail >= qp->r_rq.size) 870 tail = 0; 871 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 872 } 873 wq->tail = tail; 874 875 spin_unlock(&qp->r_rq.lock); 876 } else if (qp->ibqp.event_handler) { 877 ret = 1; 878 } 879 880 bail: 881 return ret; 882 } 883 EXPORT_SYMBOL(rvt_error_qp); 884 885 /* 886 * Put the QP into the hash table. 887 * The hash table holds a reference to the QP. 888 */ 889 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 890 { 891 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 892 unsigned long flags; 893 894 atomic_inc(&qp->refcount); 895 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 896 897 if (qp->ibqp.qp_num <= 1) { 898 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); 899 } else { 900 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 901 902 qp->next = rdi->qp_dev->qp_table[n]; 903 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); 904 trace_rvt_qpinsert(qp, n); 905 } 906 907 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 908 } 909 910 /* 911 * Remove the QP from the table so it can't be found asynchronously by 912 * the receive routine. 913 */ 914 void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 915 { 916 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 917 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 918 unsigned long flags; 919 int removed = 1; 920 921 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 922 923 if (rcu_dereference_protected(rvp->qp[0], 924 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 925 RCU_INIT_POINTER(rvp->qp[0], NULL); 926 } else if (rcu_dereference_protected(rvp->qp[1], 927 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 928 RCU_INIT_POINTER(rvp->qp[1], NULL); 929 } else { 930 struct rvt_qp *q; 931 struct rvt_qp __rcu **qpp; 932 933 removed = 0; 934 qpp = &rdi->qp_dev->qp_table[n]; 935 for (; (q = rcu_dereference_protected(*qpp, 936 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; 937 qpp = &q->next) { 938 if (q == qp) { 939 RCU_INIT_POINTER(*qpp, 940 rcu_dereference_protected(qp->next, 941 lockdep_is_held(&rdi->qp_dev->qpt_lock))); 942 removed = 1; 943 trace_rvt_qpremove(qp, n); 944 break; 945 } 946 } 947 } 948 949 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 950 if (removed) { 951 synchronize_rcu(); 952 if (atomic_dec_and_test(&qp->refcount)) 953 wake_up(&qp->wait); 954 } 955 } 956 EXPORT_SYMBOL(rvt_remove_qp); 957 958 /** 959 * qib_modify_qp - modify the attributes of a queue pair 960 * @ibqp: the queue pair who's attributes we're modifying 961 * @attr: the new attributes 962 * @attr_mask: the mask of attributes to modify 963 * @udata: user data for libibverbs.so 964 * 965 * Returns 0 on success, otherwise returns an errno. 966 */ 967 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 968 int attr_mask, struct ib_udata *udata) 969 { 970 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 971 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 972 enum ib_qp_state cur_state, new_state; 973 struct ib_event ev; 974 int lastwqe = 0; 975 int mig = 0; 976 int pmtu = 0; /* for gcc warning only */ 977 enum rdma_link_layer link; 978 979 link = rdma_port_get_link_layer(ibqp->device, qp->port_num); 980 981 spin_lock_irq(&qp->r_lock); 982 spin_lock(&qp->s_lock); 983 984 cur_state = attr_mask & IB_QP_CUR_STATE ? 985 attr->cur_qp_state : qp->state; 986 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 987 988 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 989 attr_mask, link)) 990 goto inval; 991 992 if (rdi->driver_f.check_modify_qp && 993 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) 994 goto inval; 995 996 if (attr_mask & IB_QP_AV) { 997 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 998 goto inval; 999 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 1000 goto inval; 1001 } 1002 1003 if (attr_mask & IB_QP_ALT_PATH) { 1004 if (attr->alt_ah_attr.dlid >= 1005 be16_to_cpu(IB_MULTICAST_LID_BASE)) 1006 goto inval; 1007 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 1008 goto inval; 1009 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) 1010 goto inval; 1011 } 1012 1013 if (attr_mask & IB_QP_PKEY_INDEX) 1014 if (attr->pkey_index >= rvt_get_npkeys(rdi)) 1015 goto inval; 1016 1017 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1018 if (attr->min_rnr_timer > 31) 1019 goto inval; 1020 1021 if (attr_mask & IB_QP_PORT) 1022 if (qp->ibqp.qp_type == IB_QPT_SMI || 1023 qp->ibqp.qp_type == IB_QPT_GSI || 1024 attr->port_num == 0 || 1025 attr->port_num > ibqp->device->phys_port_cnt) 1026 goto inval; 1027 1028 if (attr_mask & IB_QP_DEST_QPN) 1029 if (attr->dest_qp_num > RVT_QPN_MASK) 1030 goto inval; 1031 1032 if (attr_mask & IB_QP_RETRY_CNT) 1033 if (attr->retry_cnt > 7) 1034 goto inval; 1035 1036 if (attr_mask & IB_QP_RNR_RETRY) 1037 if (attr->rnr_retry > 7) 1038 goto inval; 1039 1040 /* 1041 * Don't allow invalid path_mtu values. OK to set greater 1042 * than the active mtu (or even the max_cap, if we have tuned 1043 * that to a small mtu. We'll set qp->path_mtu 1044 * to the lesser of requested attribute mtu and active, 1045 * for packetizing messages. 1046 * Note that the QP port has to be set in INIT and MTU in RTR. 1047 */ 1048 if (attr_mask & IB_QP_PATH_MTU) { 1049 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); 1050 if (pmtu < 0) 1051 goto inval; 1052 } 1053 1054 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1055 if (attr->path_mig_state == IB_MIG_REARM) { 1056 if (qp->s_mig_state == IB_MIG_ARMED) 1057 goto inval; 1058 if (new_state != IB_QPS_RTS) 1059 goto inval; 1060 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 1061 if (qp->s_mig_state == IB_MIG_REARM) 1062 goto inval; 1063 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 1064 goto inval; 1065 if (qp->s_mig_state == IB_MIG_ARMED) 1066 mig = 1; 1067 } else { 1068 goto inval; 1069 } 1070 } 1071 1072 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1073 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) 1074 goto inval; 1075 1076 switch (new_state) { 1077 case IB_QPS_RESET: 1078 if (qp->state != IB_QPS_RESET) 1079 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1080 break; 1081 1082 case IB_QPS_RTR: 1083 /* Allow event to re-trigger if QP set to RTR more than once */ 1084 qp->r_flags &= ~RVT_R_COMM_EST; 1085 qp->state = new_state; 1086 break; 1087 1088 case IB_QPS_SQD: 1089 qp->s_draining = qp->s_last != qp->s_cur; 1090 qp->state = new_state; 1091 break; 1092 1093 case IB_QPS_SQE: 1094 if (qp->ibqp.qp_type == IB_QPT_RC) 1095 goto inval; 1096 qp->state = new_state; 1097 break; 1098 1099 case IB_QPS_ERR: 1100 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1101 break; 1102 1103 default: 1104 qp->state = new_state; 1105 break; 1106 } 1107 1108 if (attr_mask & IB_QP_PKEY_INDEX) 1109 qp->s_pkey_index = attr->pkey_index; 1110 1111 if (attr_mask & IB_QP_PORT) 1112 qp->port_num = attr->port_num; 1113 1114 if (attr_mask & IB_QP_DEST_QPN) 1115 qp->remote_qpn = attr->dest_qp_num; 1116 1117 if (attr_mask & IB_QP_SQ_PSN) { 1118 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; 1119 qp->s_psn = qp->s_next_psn; 1120 qp->s_sending_psn = qp->s_next_psn; 1121 qp->s_last_psn = qp->s_next_psn - 1; 1122 qp->s_sending_hpsn = qp->s_last_psn; 1123 } 1124 1125 if (attr_mask & IB_QP_RQ_PSN) 1126 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; 1127 1128 if (attr_mask & IB_QP_ACCESS_FLAGS) 1129 qp->qp_access_flags = attr->qp_access_flags; 1130 1131 if (attr_mask & IB_QP_AV) { 1132 qp->remote_ah_attr = attr->ah_attr; 1133 qp->s_srate = attr->ah_attr.static_rate; 1134 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); 1135 } 1136 1137 if (attr_mask & IB_QP_ALT_PATH) { 1138 qp->alt_ah_attr = attr->alt_ah_attr; 1139 qp->s_alt_pkey_index = attr->alt_pkey_index; 1140 } 1141 1142 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1143 qp->s_mig_state = attr->path_mig_state; 1144 if (mig) { 1145 qp->remote_ah_attr = qp->alt_ah_attr; 1146 qp->port_num = qp->alt_ah_attr.port_num; 1147 qp->s_pkey_index = qp->s_alt_pkey_index; 1148 } 1149 } 1150 1151 if (attr_mask & IB_QP_PATH_MTU) { 1152 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); 1153 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); 1154 } 1155 1156 if (attr_mask & IB_QP_RETRY_CNT) { 1157 qp->s_retry_cnt = attr->retry_cnt; 1158 qp->s_retry = attr->retry_cnt; 1159 } 1160 1161 if (attr_mask & IB_QP_RNR_RETRY) { 1162 qp->s_rnr_retry_cnt = attr->rnr_retry; 1163 qp->s_rnr_retry = attr->rnr_retry; 1164 } 1165 1166 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1167 qp->r_min_rnr_timer = attr->min_rnr_timer; 1168 1169 if (attr_mask & IB_QP_TIMEOUT) { 1170 qp->timeout = attr->timeout; 1171 qp->timeout_jiffies = 1172 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1173 1000UL); 1174 } 1175 1176 if (attr_mask & IB_QP_QKEY) 1177 qp->qkey = attr->qkey; 1178 1179 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1180 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 1181 1182 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 1183 qp->s_max_rd_atomic = attr->max_rd_atomic; 1184 1185 if (rdi->driver_f.modify_qp) 1186 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); 1187 1188 spin_unlock(&qp->s_lock); 1189 spin_unlock_irq(&qp->r_lock); 1190 1191 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1192 rvt_insert_qp(rdi, qp); 1193 1194 if (lastwqe) { 1195 ev.device = qp->ibqp.device; 1196 ev.element.qp = &qp->ibqp; 1197 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 1198 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1199 } 1200 if (mig) { 1201 ev.device = qp->ibqp.device; 1202 ev.element.qp = &qp->ibqp; 1203 ev.event = IB_EVENT_PATH_MIG; 1204 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1205 } 1206 return 0; 1207 1208 inval: 1209 spin_unlock(&qp->s_lock); 1210 spin_unlock_irq(&qp->r_lock); 1211 return -EINVAL; 1212 } 1213 1214 /** 1215 * rvt_destroy_qp - destroy a queue pair 1216 * @ibqp: the queue pair to destroy 1217 * 1218 * Returns 0 on success. 1219 * 1220 * Note that this can be called while the QP is actively sending or 1221 * receiving! 1222 */ 1223 int rvt_destroy_qp(struct ib_qp *ibqp) 1224 { 1225 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1226 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1227 1228 spin_lock_irq(&qp->r_lock); 1229 spin_lock(&qp->s_lock); 1230 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1231 spin_unlock(&qp->s_lock); 1232 spin_unlock_irq(&qp->r_lock); 1233 1234 /* qpn is now available for use again */ 1235 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1236 1237 spin_lock(&rdi->n_qps_lock); 1238 rdi->n_qps_allocated--; 1239 if (qp->ibqp.qp_type == IB_QPT_RC) { 1240 rdi->n_rc_qps--; 1241 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; 1242 } 1243 spin_unlock(&rdi->n_qps_lock); 1244 1245 if (qp->ip) 1246 kref_put(&qp->ip->ref, rvt_release_mmap_info); 1247 else 1248 vfree(qp->r_rq.wq); 1249 vfree(qp->s_wq); 1250 rdi->driver_f.qp_priv_free(rdi, qp); 1251 kfree(qp); 1252 return 0; 1253 } 1254 1255 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1256 int attr_mask, struct ib_qp_init_attr *init_attr) 1257 { 1258 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1259 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1260 1261 attr->qp_state = qp->state; 1262 attr->cur_qp_state = attr->qp_state; 1263 attr->path_mtu = qp->path_mtu; 1264 attr->path_mig_state = qp->s_mig_state; 1265 attr->qkey = qp->qkey; 1266 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; 1267 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; 1268 attr->dest_qp_num = qp->remote_qpn; 1269 attr->qp_access_flags = qp->qp_access_flags; 1270 attr->cap.max_send_wr = qp->s_size - 1; 1271 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 1272 attr->cap.max_send_sge = qp->s_max_sge; 1273 attr->cap.max_recv_sge = qp->r_rq.max_sge; 1274 attr->cap.max_inline_data = 0; 1275 attr->ah_attr = qp->remote_ah_attr; 1276 attr->alt_ah_attr = qp->alt_ah_attr; 1277 attr->pkey_index = qp->s_pkey_index; 1278 attr->alt_pkey_index = qp->s_alt_pkey_index; 1279 attr->en_sqd_async_notify = 0; 1280 attr->sq_draining = qp->s_draining; 1281 attr->max_rd_atomic = qp->s_max_rd_atomic; 1282 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 1283 attr->min_rnr_timer = qp->r_min_rnr_timer; 1284 attr->port_num = qp->port_num; 1285 attr->timeout = qp->timeout; 1286 attr->retry_cnt = qp->s_retry_cnt; 1287 attr->rnr_retry = qp->s_rnr_retry_cnt; 1288 attr->alt_port_num = qp->alt_ah_attr.port_num; 1289 attr->alt_timeout = qp->alt_timeout; 1290 1291 init_attr->event_handler = qp->ibqp.event_handler; 1292 init_attr->qp_context = qp->ibqp.qp_context; 1293 init_attr->send_cq = qp->ibqp.send_cq; 1294 init_attr->recv_cq = qp->ibqp.recv_cq; 1295 init_attr->srq = qp->ibqp.srq; 1296 init_attr->cap = attr->cap; 1297 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) 1298 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 1299 else 1300 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 1301 init_attr->qp_type = qp->ibqp.qp_type; 1302 init_attr->port_num = qp->port_num; 1303 return 0; 1304 } 1305 1306 /** 1307 * rvt_post_receive - post a receive on a QP 1308 * @ibqp: the QP to post the receive on 1309 * @wr: the WR to post 1310 * @bad_wr: the first bad WR is put here 1311 * 1312 * This may be called from interrupt context. 1313 */ 1314 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1315 struct ib_recv_wr **bad_wr) 1316 { 1317 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1318 struct rvt_rwq *wq = qp->r_rq.wq; 1319 unsigned long flags; 1320 1321 /* Check that state is OK to post receive. */ 1322 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { 1323 *bad_wr = wr; 1324 return -EINVAL; 1325 } 1326 1327 for (; wr; wr = wr->next) { 1328 struct rvt_rwqe *wqe; 1329 u32 next; 1330 int i; 1331 1332 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { 1333 *bad_wr = wr; 1334 return -EINVAL; 1335 } 1336 1337 spin_lock_irqsave(&qp->r_rq.lock, flags); 1338 next = wq->head + 1; 1339 if (next >= qp->r_rq.size) 1340 next = 0; 1341 if (next == wq->tail) { 1342 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1343 *bad_wr = wr; 1344 return -ENOMEM; 1345 } 1346 1347 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); 1348 wqe->wr_id = wr->wr_id; 1349 wqe->num_sge = wr->num_sge; 1350 for (i = 0; i < wr->num_sge; i++) 1351 wqe->sg_list[i] = wr->sg_list[i]; 1352 /* Make sure queue entry is written before the head index. */ 1353 smp_wmb(); 1354 wq->head = next; 1355 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1356 } 1357 return 0; 1358 } 1359 1360 /** 1361 * rvt_post_one_wr - post one RC, UC, or UD send work request 1362 * @qp: the QP to post on 1363 * @wr: the work request to send 1364 */ 1365 static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) 1366 { 1367 struct rvt_swqe *wqe; 1368 u32 next; 1369 int i; 1370 int j; 1371 int acc; 1372 struct rvt_lkey_table *rkt; 1373 struct rvt_pd *pd; 1374 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 1375 1376 /* IB spec says that num_sge == 0 is OK. */ 1377 if (unlikely(wr->num_sge > qp->s_max_sge)) 1378 return -EINVAL; 1379 1380 /* 1381 * Don't allow RDMA reads or atomic operations on UC or 1382 * undefined operations. 1383 * Make sure buffer is large enough to hold the result for atomics. 1384 */ 1385 if (qp->ibqp.qp_type == IB_QPT_UC) { 1386 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ) 1387 return -EINVAL; 1388 } else if (qp->ibqp.qp_type != IB_QPT_RC) { 1389 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 1390 if (wr->opcode != IB_WR_SEND && 1391 wr->opcode != IB_WR_SEND_WITH_IMM) 1392 return -EINVAL; 1393 /* Check UD destination address PD */ 1394 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) 1395 return -EINVAL; 1396 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { 1397 return -EINVAL; 1398 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 1399 (wr->num_sge == 0 || 1400 wr->sg_list[0].length < sizeof(u64) || 1401 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 1402 return -EINVAL; 1403 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { 1404 return -EINVAL; 1405 } 1406 1407 next = qp->s_head + 1; 1408 if (next >= qp->s_size) 1409 next = 0; 1410 if (next == qp->s_last) 1411 return -ENOMEM; 1412 1413 if (rdi->driver_f.check_send_wr && 1414 rdi->driver_f.check_send_wr(qp, wr)) 1415 return -EINVAL; 1416 1417 rkt = &rdi->lkey_table; 1418 pd = ibpd_to_rvtpd(qp->ibqp.pd); 1419 wqe = rvt_get_swqe_ptr(qp, qp->s_head); 1420 1421 if (qp->ibqp.qp_type != IB_QPT_UC && 1422 qp->ibqp.qp_type != IB_QPT_RC) 1423 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); 1424 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 1425 wr->opcode == IB_WR_RDMA_WRITE || 1426 wr->opcode == IB_WR_RDMA_READ) 1427 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); 1428 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1429 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1430 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); 1431 else 1432 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); 1433 1434 wqe->length = 0; 1435 j = 0; 1436 if (wr->num_sge) { 1437 acc = wr->opcode >= IB_WR_RDMA_READ ? 1438 IB_ACCESS_LOCAL_WRITE : 0; 1439 for (i = 0; i < wr->num_sge; i++) { 1440 u32 length = wr->sg_list[i].length; 1441 int ok; 1442 1443 if (length == 0) 1444 continue; 1445 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], 1446 &wr->sg_list[i], acc); 1447 if (!ok) 1448 goto bail_inval_free; 1449 wqe->length += length; 1450 j++; 1451 } 1452 wqe->wr.num_sge = j; 1453 } 1454 if (qp->ibqp.qp_type == IB_QPT_UC || 1455 qp->ibqp.qp_type == IB_QPT_RC) { 1456 if (wqe->length > 0x80000000U) 1457 goto bail_inval_free; 1458 } else { 1459 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 1460 } 1461 wqe->ssn = qp->s_ssn++; 1462 qp->s_head = next; 1463 1464 return 0; 1465 1466 bail_inval_free: 1467 /* release mr holds */ 1468 while (j) { 1469 struct rvt_sge *sge = &wqe->sg_list[--j]; 1470 1471 rvt_put_mr(sge->mr); 1472 } 1473 return -EINVAL; 1474 } 1475 1476 /** 1477 * rvt_post_send - post a send on a QP 1478 * @ibqp: the QP to post the send on 1479 * @wr: the list of work requests to post 1480 * @bad_wr: the first bad WR is put here 1481 * 1482 * This may be called from interrupt context. 1483 */ 1484 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1485 struct ib_send_wr **bad_wr) 1486 { 1487 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1488 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1489 unsigned long flags = 0; 1490 int call_send; 1491 unsigned nreq = 0; 1492 int err = 0; 1493 1494 spin_lock_irqsave(&qp->s_lock, flags); 1495 1496 /* 1497 * Ensure QP state is such that we can send. If not bail out early, 1498 * there is no need to do this every time we post a send. 1499 */ 1500 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { 1501 spin_unlock_irqrestore(&qp->s_lock, flags); 1502 return -EINVAL; 1503 } 1504 1505 /* 1506 * If the send queue is empty, and we only have a single WR then just go 1507 * ahead and kick the send engine into gear. Otherwise we will always 1508 * just schedule the send to happen later. 1509 */ 1510 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; 1511 1512 for (; wr; wr = wr->next) { 1513 err = rvt_post_one_wr(qp, wr); 1514 if (unlikely(err)) { 1515 *bad_wr = wr; 1516 goto bail; 1517 } 1518 nreq++; 1519 } 1520 bail: 1521 if (nreq && !call_send) 1522 rdi->driver_f.schedule_send(qp); 1523 spin_unlock_irqrestore(&qp->s_lock, flags); 1524 if (nreq && call_send) 1525 rdi->driver_f.do_send(qp); 1526 return err; 1527 } 1528 1529 /** 1530 * rvt_post_srq_receive - post a receive on a shared receive queue 1531 * @ibsrq: the SRQ to post the receive on 1532 * @wr: the list of work requests to post 1533 * @bad_wr: A pointer to the first WR to cause a problem is put here 1534 * 1535 * This may be called from interrupt context. 1536 */ 1537 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 1538 struct ib_recv_wr **bad_wr) 1539 { 1540 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 1541 struct rvt_rwq *wq; 1542 unsigned long flags; 1543 1544 for (; wr; wr = wr->next) { 1545 struct rvt_rwqe *wqe; 1546 u32 next; 1547 int i; 1548 1549 if ((unsigned)wr->num_sge > srq->rq.max_sge) { 1550 *bad_wr = wr; 1551 return -EINVAL; 1552 } 1553 1554 spin_lock_irqsave(&srq->rq.lock, flags); 1555 wq = srq->rq.wq; 1556 next = wq->head + 1; 1557 if (next >= srq->rq.size) 1558 next = 0; 1559 if (next == wq->tail) { 1560 spin_unlock_irqrestore(&srq->rq.lock, flags); 1561 *bad_wr = wr; 1562 return -ENOMEM; 1563 } 1564 1565 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); 1566 wqe->wr_id = wr->wr_id; 1567 wqe->num_sge = wr->num_sge; 1568 for (i = 0; i < wr->num_sge; i++) 1569 wqe->sg_list[i] = wr->sg_list[i]; 1570 /* Make sure queue entry is written before the head index. */ 1571 smp_wmb(); 1572 wq->head = next; 1573 spin_unlock_irqrestore(&srq->rq.lock, flags); 1574 } 1575 return 0; 1576 } 1577 1578 void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 1579 { 1580 struct rvt_qpn_map *map; 1581 1582 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 1583 if (map->page) 1584 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 1585 } 1586 EXPORT_SYMBOL(rvt_free_qpn); 1587 1588 void rvt_dec_qp_cnt(struct rvt_dev_info *rdi) 1589 { 1590 spin_lock(&rdi->n_qps_lock); 1591 rdi->n_qps_allocated--; 1592 spin_unlock(&rdi->n_qps_lock); 1593 } 1594 EXPORT_SYMBOL(rvt_dec_qp_cnt); 1595