1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/hash.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/vmalloc.h> 52 #include <linux/slab.h> 53 #include <rdma/ib_verbs.h> 54 #include "qp.h" 55 #include "vt.h" 56 #include "trace.h" 57 58 /* 59 * Note that it is OK to post send work requests in the SQE and ERR 60 * states; rvt_do_send() will process them and generate error 61 * completions as per IB 1.2 C10-96. 62 */ 63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { 64 [IB_QPS_RESET] = 0, 65 [IB_QPS_INIT] = RVT_POST_RECV_OK, 66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, 67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | 69 RVT_PROCESS_NEXT_SEND_OK, 70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, 72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 73 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | 75 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 76 }; 77 EXPORT_SYMBOL(ib_rvt_state_ops); 78 79 static void get_map_page(struct rvt_qpn_table *qpt, 80 struct rvt_qpn_map *map, 81 gfp_t gfp) 82 { 83 unsigned long page = get_zeroed_page(gfp); 84 85 /* 86 * Free the page if someone raced with us installing it. 87 */ 88 89 spin_lock(&qpt->lock); 90 if (map->page) 91 free_page(page); 92 else 93 map->page = (void *)page; 94 spin_unlock(&qpt->lock); 95 } 96 97 /** 98 * init_qpn_table - initialize the QP number table for a device 99 * @qpt: the QPN table 100 */ 101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) 102 { 103 u32 offset, i; 104 struct rvt_qpn_map *map; 105 int ret = 0; 106 107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) 108 return -EINVAL; 109 110 spin_lock_init(&qpt->lock); 111 112 qpt->last = rdi->dparms.qpn_start; 113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; 114 115 /* 116 * Drivers may want some QPs beyond what we need for verbs let them use 117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps 118 * for those. The reserved range must be *after* the range which verbs 119 * will pick from. 120 */ 121 122 /* Figure out number of bit maps needed before reserved range */ 123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; 124 125 /* This should always be zero */ 126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; 127 128 /* Starting with the first reserved bit map */ 129 map = &qpt->map[qpt->nmaps]; 130 131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", 132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 134 if (!map->page) { 135 get_map_page(qpt, map, GFP_KERNEL); 136 if (!map->page) { 137 ret = -ENOMEM; 138 break; 139 } 140 } 141 set_bit(offset, map->page); 142 offset++; 143 if (offset == RVT_BITS_PER_PAGE) { 144 /* next page */ 145 qpt->nmaps++; 146 map++; 147 offset = 0; 148 } 149 } 150 return ret; 151 } 152 153 /** 154 * free_qpn_table - free the QP number table for a device 155 * @qpt: the QPN table 156 */ 157 static void free_qpn_table(struct rvt_qpn_table *qpt) 158 { 159 int i; 160 161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 162 free_page((unsigned long)qpt->map[i].page); 163 } 164 165 /** 166 * rvt_driver_qp_init - Init driver qp resources 167 * @rdi: rvt dev strucutre 168 * 169 * Return: 0 on success 170 */ 171 int rvt_driver_qp_init(struct rvt_dev_info *rdi) 172 { 173 int i; 174 int ret = -ENOMEM; 175 176 if (!rdi->dparms.qp_table_size) 177 return -EINVAL; 178 179 /* 180 * If driver is not doing any QP allocation then make sure it is 181 * providing the necessary QP functions. 182 */ 183 if (!rdi->driver_f.free_all_qps || 184 !rdi->driver_f.qp_priv_alloc || 185 !rdi->driver_f.qp_priv_free || 186 !rdi->driver_f.notify_qp_reset) 187 return -EINVAL; 188 189 /* allocate parent object */ 190 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, 191 rdi->dparms.node); 192 if (!rdi->qp_dev) 193 return -ENOMEM; 194 195 /* allocate hash table */ 196 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; 197 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); 198 rdi->qp_dev->qp_table = 199 kmalloc_node(rdi->qp_dev->qp_table_size * 200 sizeof(*rdi->qp_dev->qp_table), 201 GFP_KERNEL, rdi->dparms.node); 202 if (!rdi->qp_dev->qp_table) 203 goto no_qp_table; 204 205 for (i = 0; i < rdi->qp_dev->qp_table_size; i++) 206 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); 207 208 spin_lock_init(&rdi->qp_dev->qpt_lock); 209 210 /* initialize qpn map */ 211 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) 212 goto fail_table; 213 214 spin_lock_init(&rdi->n_qps_lock); 215 216 return 0; 217 218 fail_table: 219 kfree(rdi->qp_dev->qp_table); 220 free_qpn_table(&rdi->qp_dev->qpn_table); 221 222 no_qp_table: 223 kfree(rdi->qp_dev); 224 225 return ret; 226 } 227 228 /** 229 * free_all_qps - check for QPs still in use 230 * @qpt: the QP table to empty 231 * 232 * There should not be any QPs still in use. 233 * Free memory for table. 234 */ 235 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) 236 { 237 unsigned long flags; 238 struct rvt_qp *qp; 239 unsigned n, qp_inuse = 0; 240 spinlock_t *ql; /* work around too long line below */ 241 242 if (rdi->driver_f.free_all_qps) 243 qp_inuse = rdi->driver_f.free_all_qps(rdi); 244 245 qp_inuse += rvt_mcast_tree_empty(rdi); 246 247 if (!rdi->qp_dev) 248 return qp_inuse; 249 250 ql = &rdi->qp_dev->qpt_lock; 251 spin_lock_irqsave(ql, flags); 252 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { 253 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], 254 lockdep_is_held(ql)); 255 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); 256 257 for (; qp; qp = rcu_dereference_protected(qp->next, 258 lockdep_is_held(ql))) 259 qp_inuse++; 260 } 261 spin_unlock_irqrestore(ql, flags); 262 synchronize_rcu(); 263 return qp_inuse; 264 } 265 266 /** 267 * rvt_qp_exit - clean up qps on device exit 268 * @rdi: rvt dev structure 269 * 270 * Check for qp leaks and free resources. 271 */ 272 void rvt_qp_exit(struct rvt_dev_info *rdi) 273 { 274 u32 qps_inuse = rvt_free_all_qps(rdi); 275 276 if (qps_inuse) 277 rvt_pr_err(rdi, "QP memory leak! %u still in use\n", 278 qps_inuse); 279 if (!rdi->qp_dev) 280 return; 281 282 kfree(rdi->qp_dev->qp_table); 283 free_qpn_table(&rdi->qp_dev->qpn_table); 284 kfree(rdi->qp_dev); 285 } 286 287 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 288 struct rvt_qpn_map *map, unsigned off) 289 { 290 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 291 } 292 293 /** 294 * alloc_qpn - Allocate the next available qpn or zero/one for QP type 295 * IB_QPT_SMI/IB_QPT_GSI 296 *@rdi: rvt device info structure 297 *@qpt: queue pair number table pointer 298 *@port_num: IB port number, 1 based, comes from core 299 * 300 * Return: The queue pair number 301 */ 302 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 303 enum ib_qp_type type, u8 port_num, gfp_t gfp) 304 { 305 u32 i, offset, max_scan, qpn; 306 struct rvt_qpn_map *map; 307 u32 ret; 308 309 if (rdi->driver_f.alloc_qpn) 310 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); 311 312 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 313 unsigned n; 314 315 ret = type == IB_QPT_GSI; 316 n = 1 << (ret + 2 * (port_num - 1)); 317 spin_lock(&qpt->lock); 318 if (qpt->flags & n) 319 ret = -EINVAL; 320 else 321 qpt->flags |= n; 322 spin_unlock(&qpt->lock); 323 goto bail; 324 } 325 326 qpn = qpt->last + qpt->incr; 327 if (qpn >= RVT_QPN_MAX) 328 qpn = qpt->incr | ((qpt->last & 1) ^ 1); 329 /* offset carries bit 0 */ 330 offset = qpn & RVT_BITS_PER_PAGE_MASK; 331 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; 332 max_scan = qpt->nmaps - !offset; 333 for (i = 0;;) { 334 if (unlikely(!map->page)) { 335 get_map_page(qpt, map, gfp); 336 if (unlikely(!map->page)) 337 break; 338 } 339 do { 340 if (!test_and_set_bit(offset, map->page)) { 341 qpt->last = qpn; 342 ret = qpn; 343 goto bail; 344 } 345 offset += qpt->incr; 346 /* 347 * This qpn might be bogus if offset >= BITS_PER_PAGE. 348 * That is OK. It gets re-assigned below 349 */ 350 qpn = mk_qpn(qpt, map, offset); 351 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); 352 /* 353 * In order to keep the number of pages allocated to a 354 * minimum, we scan the all existing pages before increasing 355 * the size of the bitmap table. 356 */ 357 if (++i > max_scan) { 358 if (qpt->nmaps == RVT_QPNMAP_ENTRIES) 359 break; 360 map = &qpt->map[qpt->nmaps++]; 361 /* start at incr with current bit 0 */ 362 offset = qpt->incr | (offset & 1); 363 } else if (map < &qpt->map[qpt->nmaps]) { 364 ++map; 365 /* start at incr with current bit 0 */ 366 offset = qpt->incr | (offset & 1); 367 } else { 368 map = &qpt->map[0]; 369 /* wrap to first map page, invert bit 0 */ 370 offset = qpt->incr | ((offset & 1) ^ 1); 371 } 372 /* there can be no bits at shift and below */ 373 WARN_ON(offset & (rdi->dparms.qos_shift - 1)); 374 qpn = mk_qpn(qpt, map, offset); 375 } 376 377 ret = -ENOMEM; 378 379 bail: 380 return ret; 381 } 382 383 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 384 { 385 struct rvt_qpn_map *map; 386 387 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 388 if (map->page) 389 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 390 } 391 392 /** 393 * rvt_clear_mr_refs - Drop help mr refs 394 * @qp: rvt qp data structure 395 * @clr_sends: If shoudl clear send side or not 396 */ 397 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) 398 { 399 unsigned n; 400 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 401 402 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 403 rvt_put_ss(&qp->s_rdma_read_sge); 404 405 rvt_put_ss(&qp->r_sge); 406 407 if (clr_sends) { 408 while (qp->s_last != qp->s_head) { 409 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); 410 unsigned i; 411 412 for (i = 0; i < wqe->wr.num_sge; i++) { 413 struct rvt_sge *sge = &wqe->sg_list[i]; 414 415 rvt_put_mr(sge->mr); 416 } 417 if (qp->ibqp.qp_type == IB_QPT_UD || 418 qp->ibqp.qp_type == IB_QPT_SMI || 419 qp->ibqp.qp_type == IB_QPT_GSI) 420 atomic_dec(&ibah_to_rvtah( 421 wqe->ud_wr.ah)->refcount); 422 if (++qp->s_last >= qp->s_size) 423 qp->s_last = 0; 424 smp_wmb(); /* see qp_set_savail */ 425 } 426 if (qp->s_rdma_mr) { 427 rvt_put_mr(qp->s_rdma_mr); 428 qp->s_rdma_mr = NULL; 429 } 430 } 431 432 if (qp->ibqp.qp_type != IB_QPT_RC) 433 return; 434 435 for (n = 0; n < rvt_max_atomic(rdi); n++) { 436 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 437 438 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 439 e->rdma_sge.mr) { 440 rvt_put_mr(e->rdma_sge.mr); 441 e->rdma_sge.mr = NULL; 442 } 443 } 444 } 445 446 /** 447 * rvt_remove_qp - remove qp form table 448 * @rdi: rvt dev struct 449 * @qp: qp to remove 450 * 451 * Remove the QP from the table so it can't be found asynchronously by 452 * the receive routine. 453 */ 454 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 455 { 456 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 457 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 458 unsigned long flags; 459 int removed = 1; 460 461 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 462 463 if (rcu_dereference_protected(rvp->qp[0], 464 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 465 RCU_INIT_POINTER(rvp->qp[0], NULL); 466 } else if (rcu_dereference_protected(rvp->qp[1], 467 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 468 RCU_INIT_POINTER(rvp->qp[1], NULL); 469 } else { 470 struct rvt_qp *q; 471 struct rvt_qp __rcu **qpp; 472 473 removed = 0; 474 qpp = &rdi->qp_dev->qp_table[n]; 475 for (; (q = rcu_dereference_protected(*qpp, 476 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; 477 qpp = &q->next) { 478 if (q == qp) { 479 RCU_INIT_POINTER(*qpp, 480 rcu_dereference_protected(qp->next, 481 lockdep_is_held(&rdi->qp_dev->qpt_lock))); 482 removed = 1; 483 trace_rvt_qpremove(qp, n); 484 break; 485 } 486 } 487 } 488 489 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 490 if (removed) { 491 synchronize_rcu(); 492 if (atomic_dec_and_test(&qp->refcount)) 493 wake_up(&qp->wait); 494 } 495 } 496 497 /** 498 * reset_qp - initialize the QP state to the reset state 499 * @qp: the QP to reset 500 * @type: the QP type 501 * r and s lock are required to be held by the caller 502 */ 503 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 504 enum ib_qp_type type) 505 { 506 if (qp->state != IB_QPS_RESET) { 507 qp->state = IB_QPS_RESET; 508 509 /* Let drivers flush their waitlist */ 510 rdi->driver_f.flush_qp_waiters(qp); 511 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 512 spin_unlock(&qp->s_lock); 513 spin_unlock(&qp->s_hlock); 514 spin_unlock_irq(&qp->r_lock); 515 516 /* Stop the send queue and the retry timer */ 517 rdi->driver_f.stop_send_queue(qp); 518 519 /* Wait for things to stop */ 520 rdi->driver_f.quiesce_qp(qp); 521 522 /* take qp out the hash and wait for it to be unused */ 523 rvt_remove_qp(rdi, qp); 524 wait_event(qp->wait, !atomic_read(&qp->refcount)); 525 526 /* grab the lock b/c it was locked at call time */ 527 spin_lock_irq(&qp->r_lock); 528 spin_lock(&qp->s_hlock); 529 spin_lock(&qp->s_lock); 530 531 rvt_clear_mr_refs(qp, 1); 532 } 533 534 /* 535 * Let the driver do any tear down it needs to for a qp 536 * that has been reset 537 */ 538 rdi->driver_f.notify_qp_reset(qp); 539 540 qp->remote_qpn = 0; 541 qp->qkey = 0; 542 qp->qp_access_flags = 0; 543 qp->s_flags &= RVT_S_SIGNAL_REQ_WR; 544 qp->s_hdrwords = 0; 545 qp->s_wqe = NULL; 546 qp->s_draining = 0; 547 qp->s_next_psn = 0; 548 qp->s_last_psn = 0; 549 qp->s_sending_psn = 0; 550 qp->s_sending_hpsn = 0; 551 qp->s_psn = 0; 552 qp->r_psn = 0; 553 qp->r_msn = 0; 554 if (type == IB_QPT_RC) { 555 qp->s_state = IB_OPCODE_RC_SEND_LAST; 556 qp->r_state = IB_OPCODE_RC_SEND_LAST; 557 } else { 558 qp->s_state = IB_OPCODE_UC_SEND_LAST; 559 qp->r_state = IB_OPCODE_UC_SEND_LAST; 560 } 561 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 562 qp->r_nak_state = 0; 563 qp->r_aflags = 0; 564 qp->r_flags = 0; 565 qp->s_head = 0; 566 qp->s_tail = 0; 567 qp->s_cur = 0; 568 qp->s_acked = 0; 569 qp->s_last = 0; 570 qp->s_ssn = 1; 571 qp->s_lsn = 0; 572 qp->s_mig_state = IB_MIG_MIGRATED; 573 if (qp->s_ack_queue) 574 memset( 575 qp->s_ack_queue, 576 0, 577 rvt_max_atomic(rdi) * 578 sizeof(*qp->s_ack_queue)); 579 qp->r_head_ack_queue = 0; 580 qp->s_tail_ack_queue = 0; 581 qp->s_num_rd_atomic = 0; 582 if (qp->r_rq.wq) { 583 qp->r_rq.wq->head = 0; 584 qp->r_rq.wq->tail = 0; 585 } 586 qp->r_sge.num_sge = 0; 587 } 588 589 /** 590 * rvt_create_qp - create a queue pair for a device 591 * @ibpd: the protection domain who's device we create the queue pair for 592 * @init_attr: the attributes of the queue pair 593 * @udata: user data for libibverbs.so 594 * 595 * Queue pair creation is mostly an rvt issue. However, drivers have their own 596 * unique idea of what queue pair numbers mean. For instance there is a reserved 597 * range for PSM. 598 * 599 * Return: the queue pair on success, otherwise returns an errno. 600 * 601 * Called by the ib_create_qp() core verbs function. 602 */ 603 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 604 struct ib_qp_init_attr *init_attr, 605 struct ib_udata *udata) 606 { 607 struct rvt_qp *qp; 608 int err; 609 struct rvt_swqe *swq = NULL; 610 size_t sz; 611 size_t sg_list_sz; 612 struct ib_qp *ret = ERR_PTR(-ENOMEM); 613 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 614 void *priv = NULL; 615 gfp_t gfp; 616 617 if (!rdi) 618 return ERR_PTR(-EINVAL); 619 620 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || 621 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 622 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 623 return ERR_PTR(-EINVAL); 624 625 /* GFP_NOIO is applicable to RC QP's only */ 626 627 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && 628 init_attr->qp_type != IB_QPT_RC) 629 return ERR_PTR(-EINVAL); 630 631 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? 632 GFP_NOIO : GFP_KERNEL; 633 634 /* Check receive queue parameters if no SRQ is specified. */ 635 if (!init_attr->srq) { 636 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || 637 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) 638 return ERR_PTR(-EINVAL); 639 640 if (init_attr->cap.max_send_sge + 641 init_attr->cap.max_send_wr + 642 init_attr->cap.max_recv_sge + 643 init_attr->cap.max_recv_wr == 0) 644 return ERR_PTR(-EINVAL); 645 } 646 647 switch (init_attr->qp_type) { 648 case IB_QPT_SMI: 649 case IB_QPT_GSI: 650 if (init_attr->port_num == 0 || 651 init_attr->port_num > ibpd->device->phys_port_cnt) 652 return ERR_PTR(-EINVAL); 653 case IB_QPT_UC: 654 case IB_QPT_RC: 655 case IB_QPT_UD: 656 sz = sizeof(struct rvt_sge) * 657 init_attr->cap.max_send_sge + 658 sizeof(struct rvt_swqe); 659 if (gfp == GFP_NOIO) 660 swq = __vmalloc( 661 (init_attr->cap.max_send_wr + 1) * sz, 662 gfp | __GFP_ZERO, PAGE_KERNEL); 663 else 664 swq = vzalloc_node( 665 (init_attr->cap.max_send_wr + 1) * sz, 666 rdi->dparms.node); 667 if (!swq) 668 return ERR_PTR(-ENOMEM); 669 670 sz = sizeof(*qp); 671 sg_list_sz = 0; 672 if (init_attr->srq) { 673 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); 674 675 if (srq->rq.max_sge > 1) 676 sg_list_sz = sizeof(*qp->r_sg_list) * 677 (srq->rq.max_sge - 1); 678 } else if (init_attr->cap.max_recv_sge > 1) 679 sg_list_sz = sizeof(*qp->r_sg_list) * 680 (init_attr->cap.max_recv_sge - 1); 681 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); 682 if (!qp) 683 goto bail_swq; 684 685 RCU_INIT_POINTER(qp->next, NULL); 686 if (init_attr->qp_type == IB_QPT_RC) { 687 qp->s_ack_queue = 688 kzalloc_node( 689 sizeof(*qp->s_ack_queue) * 690 rvt_max_atomic(rdi), 691 gfp, 692 rdi->dparms.node); 693 if (!qp->s_ack_queue) 694 goto bail_qp; 695 } 696 697 /* 698 * Driver needs to set up it's private QP structure and do any 699 * initialization that is needed. 700 */ 701 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 702 if (!priv) 703 goto bail_qp; 704 qp->priv = priv; 705 qp->timeout_jiffies = 706 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 707 1000UL); 708 if (init_attr->srq) { 709 sz = 0; 710 } else { 711 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 712 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 713 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 714 sizeof(struct rvt_rwqe); 715 if (udata) 716 qp->r_rq.wq = vmalloc_user( 717 sizeof(struct rvt_rwq) + 718 qp->r_rq.size * sz); 719 else if (gfp == GFP_NOIO) 720 qp->r_rq.wq = __vmalloc( 721 sizeof(struct rvt_rwq) + 722 qp->r_rq.size * sz, 723 gfp | __GFP_ZERO, PAGE_KERNEL); 724 else 725 qp->r_rq.wq = vzalloc_node( 726 sizeof(struct rvt_rwq) + 727 qp->r_rq.size * sz, 728 rdi->dparms.node); 729 if (!qp->r_rq.wq) 730 goto bail_driver_priv; 731 } 732 733 /* 734 * ib_create_qp() will initialize qp->ibqp 735 * except for qp->ibqp.qp_num. 736 */ 737 spin_lock_init(&qp->r_lock); 738 spin_lock_init(&qp->s_hlock); 739 spin_lock_init(&qp->s_lock); 740 spin_lock_init(&qp->r_rq.lock); 741 atomic_set(&qp->refcount, 0); 742 init_waitqueue_head(&qp->wait); 743 init_timer(&qp->s_timer); 744 qp->s_timer.data = (unsigned long)qp; 745 INIT_LIST_HEAD(&qp->rspwait); 746 qp->state = IB_QPS_RESET; 747 qp->s_wq = swq; 748 qp->s_size = init_attr->cap.max_send_wr + 1; 749 qp->s_avail = init_attr->cap.max_send_wr; 750 qp->s_max_sge = init_attr->cap.max_send_sge; 751 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 752 qp->s_flags = RVT_S_SIGNAL_REQ_WR; 753 754 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 755 init_attr->qp_type, 756 init_attr->port_num, gfp); 757 if (err < 0) { 758 ret = ERR_PTR(err); 759 goto bail_rq_wq; 760 } 761 qp->ibqp.qp_num = err; 762 qp->port_num = init_attr->port_num; 763 rvt_reset_qp(rdi, qp, init_attr->qp_type); 764 break; 765 766 default: 767 /* Don't support raw QPs */ 768 return ERR_PTR(-EINVAL); 769 } 770 771 init_attr->cap.max_inline_data = 0; 772 773 /* 774 * Return the address of the RWQ as the offset to mmap. 775 * See rvt_mmap() for details. 776 */ 777 if (udata && udata->outlen >= sizeof(__u64)) { 778 if (!qp->r_rq.wq) { 779 __u64 offset = 0; 780 781 err = ib_copy_to_udata(udata, &offset, 782 sizeof(offset)); 783 if (err) { 784 ret = ERR_PTR(err); 785 goto bail_qpn; 786 } 787 } else { 788 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 789 790 qp->ip = rvt_create_mmap_info(rdi, s, 791 ibpd->uobject->context, 792 qp->r_rq.wq); 793 if (!qp->ip) { 794 ret = ERR_PTR(-ENOMEM); 795 goto bail_qpn; 796 } 797 798 err = ib_copy_to_udata(udata, &qp->ip->offset, 799 sizeof(qp->ip->offset)); 800 if (err) { 801 ret = ERR_PTR(err); 802 goto bail_ip; 803 } 804 } 805 qp->pid = current->pid; 806 } 807 808 spin_lock(&rdi->n_qps_lock); 809 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 810 spin_unlock(&rdi->n_qps_lock); 811 ret = ERR_PTR(-ENOMEM); 812 goto bail_ip; 813 } 814 815 rdi->n_qps_allocated++; 816 /* 817 * Maintain a busy_jiffies variable that will be added to the timeout 818 * period in mod_retry_timer and add_retry_timer. This busy jiffies 819 * is scaled by the number of rc qps created for the device to reduce 820 * the number of timeouts occurring when there is a large number of 821 * qps. busy_jiffies is incremented every rc qp scaling interval. 822 * The scaling interval is selected based on extensive performance 823 * evaluation of targeted workloads. 824 */ 825 if (init_attr->qp_type == IB_QPT_RC) { 826 rdi->n_rc_qps++; 827 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; 828 } 829 spin_unlock(&rdi->n_qps_lock); 830 831 if (qp->ip) { 832 spin_lock_irq(&rdi->pending_lock); 833 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); 834 spin_unlock_irq(&rdi->pending_lock); 835 } 836 837 ret = &qp->ibqp; 838 839 /* 840 * We have our QP and its good, now keep track of what types of opcodes 841 * can be processed on this QP. We do this by keeping track of what the 842 * 3 high order bits of the opcode are. 843 */ 844 switch (init_attr->qp_type) { 845 case IB_QPT_SMI: 846 case IB_QPT_GSI: 847 case IB_QPT_UD: 848 qp->allowed_ops = IB_OPCODE_UD; 849 break; 850 case IB_QPT_RC: 851 qp->allowed_ops = IB_OPCODE_RC; 852 break; 853 case IB_QPT_UC: 854 qp->allowed_ops = IB_OPCODE_UC; 855 break; 856 default: 857 ret = ERR_PTR(-EINVAL); 858 goto bail_ip; 859 } 860 861 return ret; 862 863 bail_ip: 864 kref_put(&qp->ip->ref, rvt_release_mmap_info); 865 866 bail_qpn: 867 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 868 869 bail_rq_wq: 870 vfree(qp->r_rq.wq); 871 872 bail_driver_priv: 873 rdi->driver_f.qp_priv_free(rdi, qp); 874 875 bail_qp: 876 kfree(qp->s_ack_queue); 877 kfree(qp); 878 879 bail_swq: 880 vfree(swq); 881 882 return ret; 883 } 884 885 /** 886 * rvt_error_qp - put a QP into the error state 887 * @qp: the QP to put into the error state 888 * @err: the receive completion error to signal if a RWQE is active 889 * 890 * Flushes both send and receive work queues. 891 * 892 * Return: true if last WQE event should be generated. 893 * The QP r_lock and s_lock should be held and interrupts disabled. 894 * If we are already in error state, just return. 895 */ 896 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 897 { 898 struct ib_wc wc; 899 int ret = 0; 900 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 901 902 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 903 goto bail; 904 905 qp->state = IB_QPS_ERR; 906 907 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 908 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 909 del_timer(&qp->s_timer); 910 } 911 912 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) 913 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; 914 915 rdi->driver_f.notify_error_qp(qp); 916 917 /* Schedule the sending tasklet to drain the send work queue. */ 918 if (ACCESS_ONCE(qp->s_last) != qp->s_head) 919 rdi->driver_f.schedule_send(qp); 920 921 rvt_clear_mr_refs(qp, 0); 922 923 memset(&wc, 0, sizeof(wc)); 924 wc.qp = &qp->ibqp; 925 wc.opcode = IB_WC_RECV; 926 927 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 928 wc.wr_id = qp->r_wr_id; 929 wc.status = err; 930 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 931 } 932 wc.status = IB_WC_WR_FLUSH_ERR; 933 934 if (qp->r_rq.wq) { 935 struct rvt_rwq *wq; 936 u32 head; 937 u32 tail; 938 939 spin_lock(&qp->r_rq.lock); 940 941 /* sanity check pointers before trusting them */ 942 wq = qp->r_rq.wq; 943 head = wq->head; 944 if (head >= qp->r_rq.size) 945 head = 0; 946 tail = wq->tail; 947 if (tail >= qp->r_rq.size) 948 tail = 0; 949 while (tail != head) { 950 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 951 if (++tail >= qp->r_rq.size) 952 tail = 0; 953 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 954 } 955 wq->tail = tail; 956 957 spin_unlock(&qp->r_rq.lock); 958 } else if (qp->ibqp.event_handler) { 959 ret = 1; 960 } 961 962 bail: 963 return ret; 964 } 965 EXPORT_SYMBOL(rvt_error_qp); 966 967 /* 968 * Put the QP into the hash table. 969 * The hash table holds a reference to the QP. 970 */ 971 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 972 { 973 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 974 unsigned long flags; 975 976 atomic_inc(&qp->refcount); 977 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 978 979 if (qp->ibqp.qp_num <= 1) { 980 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); 981 } else { 982 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 983 984 qp->next = rdi->qp_dev->qp_table[n]; 985 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); 986 trace_rvt_qpinsert(qp, n); 987 } 988 989 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 990 } 991 992 /** 993 * qib_modify_qp - modify the attributes of a queue pair 994 * @ibqp: the queue pair who's attributes we're modifying 995 * @attr: the new attributes 996 * @attr_mask: the mask of attributes to modify 997 * @udata: user data for libibverbs.so 998 * 999 * Return: 0 on success, otherwise returns an errno. 1000 */ 1001 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1002 int attr_mask, struct ib_udata *udata) 1003 { 1004 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1005 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1006 enum ib_qp_state cur_state, new_state; 1007 struct ib_event ev; 1008 int lastwqe = 0; 1009 int mig = 0; 1010 int pmtu = 0; /* for gcc warning only */ 1011 enum rdma_link_layer link; 1012 1013 link = rdma_port_get_link_layer(ibqp->device, qp->port_num); 1014 1015 spin_lock_irq(&qp->r_lock); 1016 spin_lock(&qp->s_hlock); 1017 spin_lock(&qp->s_lock); 1018 1019 cur_state = attr_mask & IB_QP_CUR_STATE ? 1020 attr->cur_qp_state : qp->state; 1021 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1022 1023 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1024 attr_mask, link)) 1025 goto inval; 1026 1027 if (rdi->driver_f.check_modify_qp && 1028 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) 1029 goto inval; 1030 1031 if (attr_mask & IB_QP_AV) { 1032 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 1033 goto inval; 1034 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 1035 goto inval; 1036 } 1037 1038 if (attr_mask & IB_QP_ALT_PATH) { 1039 if (attr->alt_ah_attr.dlid >= 1040 be16_to_cpu(IB_MULTICAST_LID_BASE)) 1041 goto inval; 1042 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 1043 goto inval; 1044 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) 1045 goto inval; 1046 } 1047 1048 if (attr_mask & IB_QP_PKEY_INDEX) 1049 if (attr->pkey_index >= rvt_get_npkeys(rdi)) 1050 goto inval; 1051 1052 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1053 if (attr->min_rnr_timer > 31) 1054 goto inval; 1055 1056 if (attr_mask & IB_QP_PORT) 1057 if (qp->ibqp.qp_type == IB_QPT_SMI || 1058 qp->ibqp.qp_type == IB_QPT_GSI || 1059 attr->port_num == 0 || 1060 attr->port_num > ibqp->device->phys_port_cnt) 1061 goto inval; 1062 1063 if (attr_mask & IB_QP_DEST_QPN) 1064 if (attr->dest_qp_num > RVT_QPN_MASK) 1065 goto inval; 1066 1067 if (attr_mask & IB_QP_RETRY_CNT) 1068 if (attr->retry_cnt > 7) 1069 goto inval; 1070 1071 if (attr_mask & IB_QP_RNR_RETRY) 1072 if (attr->rnr_retry > 7) 1073 goto inval; 1074 1075 /* 1076 * Don't allow invalid path_mtu values. OK to set greater 1077 * than the active mtu (or even the max_cap, if we have tuned 1078 * that to a small mtu. We'll set qp->path_mtu 1079 * to the lesser of requested attribute mtu and active, 1080 * for packetizing messages. 1081 * Note that the QP port has to be set in INIT and MTU in RTR. 1082 */ 1083 if (attr_mask & IB_QP_PATH_MTU) { 1084 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); 1085 if (pmtu < 0) 1086 goto inval; 1087 } 1088 1089 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1090 if (attr->path_mig_state == IB_MIG_REARM) { 1091 if (qp->s_mig_state == IB_MIG_ARMED) 1092 goto inval; 1093 if (new_state != IB_QPS_RTS) 1094 goto inval; 1095 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 1096 if (qp->s_mig_state == IB_MIG_REARM) 1097 goto inval; 1098 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 1099 goto inval; 1100 if (qp->s_mig_state == IB_MIG_ARMED) 1101 mig = 1; 1102 } else { 1103 goto inval; 1104 } 1105 } 1106 1107 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1108 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) 1109 goto inval; 1110 1111 switch (new_state) { 1112 case IB_QPS_RESET: 1113 if (qp->state != IB_QPS_RESET) 1114 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1115 break; 1116 1117 case IB_QPS_RTR: 1118 /* Allow event to re-trigger if QP set to RTR more than once */ 1119 qp->r_flags &= ~RVT_R_COMM_EST; 1120 qp->state = new_state; 1121 break; 1122 1123 case IB_QPS_SQD: 1124 qp->s_draining = qp->s_last != qp->s_cur; 1125 qp->state = new_state; 1126 break; 1127 1128 case IB_QPS_SQE: 1129 if (qp->ibqp.qp_type == IB_QPT_RC) 1130 goto inval; 1131 qp->state = new_state; 1132 break; 1133 1134 case IB_QPS_ERR: 1135 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1136 break; 1137 1138 default: 1139 qp->state = new_state; 1140 break; 1141 } 1142 1143 if (attr_mask & IB_QP_PKEY_INDEX) 1144 qp->s_pkey_index = attr->pkey_index; 1145 1146 if (attr_mask & IB_QP_PORT) 1147 qp->port_num = attr->port_num; 1148 1149 if (attr_mask & IB_QP_DEST_QPN) 1150 qp->remote_qpn = attr->dest_qp_num; 1151 1152 if (attr_mask & IB_QP_SQ_PSN) { 1153 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; 1154 qp->s_psn = qp->s_next_psn; 1155 qp->s_sending_psn = qp->s_next_psn; 1156 qp->s_last_psn = qp->s_next_psn - 1; 1157 qp->s_sending_hpsn = qp->s_last_psn; 1158 } 1159 1160 if (attr_mask & IB_QP_RQ_PSN) 1161 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; 1162 1163 if (attr_mask & IB_QP_ACCESS_FLAGS) 1164 qp->qp_access_flags = attr->qp_access_flags; 1165 1166 if (attr_mask & IB_QP_AV) { 1167 qp->remote_ah_attr = attr->ah_attr; 1168 qp->s_srate = attr->ah_attr.static_rate; 1169 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); 1170 } 1171 1172 if (attr_mask & IB_QP_ALT_PATH) { 1173 qp->alt_ah_attr = attr->alt_ah_attr; 1174 qp->s_alt_pkey_index = attr->alt_pkey_index; 1175 } 1176 1177 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1178 qp->s_mig_state = attr->path_mig_state; 1179 if (mig) { 1180 qp->remote_ah_attr = qp->alt_ah_attr; 1181 qp->port_num = qp->alt_ah_attr.port_num; 1182 qp->s_pkey_index = qp->s_alt_pkey_index; 1183 } 1184 } 1185 1186 if (attr_mask & IB_QP_PATH_MTU) { 1187 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); 1188 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); 1189 qp->log_pmtu = ilog2(qp->pmtu); 1190 } 1191 1192 if (attr_mask & IB_QP_RETRY_CNT) { 1193 qp->s_retry_cnt = attr->retry_cnt; 1194 qp->s_retry = attr->retry_cnt; 1195 } 1196 1197 if (attr_mask & IB_QP_RNR_RETRY) { 1198 qp->s_rnr_retry_cnt = attr->rnr_retry; 1199 qp->s_rnr_retry = attr->rnr_retry; 1200 } 1201 1202 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1203 qp->r_min_rnr_timer = attr->min_rnr_timer; 1204 1205 if (attr_mask & IB_QP_TIMEOUT) { 1206 qp->timeout = attr->timeout; 1207 qp->timeout_jiffies = 1208 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1209 1000UL); 1210 } 1211 1212 if (attr_mask & IB_QP_QKEY) 1213 qp->qkey = attr->qkey; 1214 1215 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1216 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 1217 1218 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 1219 qp->s_max_rd_atomic = attr->max_rd_atomic; 1220 1221 if (rdi->driver_f.modify_qp) 1222 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); 1223 1224 spin_unlock(&qp->s_lock); 1225 spin_unlock(&qp->s_hlock); 1226 spin_unlock_irq(&qp->r_lock); 1227 1228 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1229 rvt_insert_qp(rdi, qp); 1230 1231 if (lastwqe) { 1232 ev.device = qp->ibqp.device; 1233 ev.element.qp = &qp->ibqp; 1234 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 1235 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1236 } 1237 if (mig) { 1238 ev.device = qp->ibqp.device; 1239 ev.element.qp = &qp->ibqp; 1240 ev.event = IB_EVENT_PATH_MIG; 1241 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1242 } 1243 return 0; 1244 1245 inval: 1246 spin_unlock(&qp->s_lock); 1247 spin_unlock(&qp->s_hlock); 1248 spin_unlock_irq(&qp->r_lock); 1249 return -EINVAL; 1250 } 1251 1252 /** rvt_free_qpn - Free a qpn from the bit map 1253 * @qpt: QP table 1254 * @qpn: queue pair number to free 1255 */ 1256 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 1257 { 1258 struct rvt_qpn_map *map; 1259 1260 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 1261 if (map->page) 1262 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 1263 } 1264 1265 /** 1266 * rvt_destroy_qp - destroy a queue pair 1267 * @ibqp: the queue pair to destroy 1268 * 1269 * Note that this can be called while the QP is actively sending or 1270 * receiving! 1271 * 1272 * Return: 0 on success. 1273 */ 1274 int rvt_destroy_qp(struct ib_qp *ibqp) 1275 { 1276 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1277 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1278 1279 spin_lock_irq(&qp->r_lock); 1280 spin_lock(&qp->s_hlock); 1281 spin_lock(&qp->s_lock); 1282 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1283 spin_unlock(&qp->s_lock); 1284 spin_unlock(&qp->s_hlock); 1285 spin_unlock_irq(&qp->r_lock); 1286 1287 /* qpn is now available for use again */ 1288 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1289 1290 spin_lock(&rdi->n_qps_lock); 1291 rdi->n_qps_allocated--; 1292 if (qp->ibqp.qp_type == IB_QPT_RC) { 1293 rdi->n_rc_qps--; 1294 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; 1295 } 1296 spin_unlock(&rdi->n_qps_lock); 1297 1298 if (qp->ip) 1299 kref_put(&qp->ip->ref, rvt_release_mmap_info); 1300 else 1301 vfree(qp->r_rq.wq); 1302 vfree(qp->s_wq); 1303 rdi->driver_f.qp_priv_free(rdi, qp); 1304 kfree(qp->s_ack_queue); 1305 kfree(qp); 1306 return 0; 1307 } 1308 1309 /** 1310 * rvt_query_qp - query an ipbq 1311 * @ibqp: IB qp to query 1312 * @attr: attr struct to fill in 1313 * @attr_mask: attr mask ignored 1314 * @init_attr: struct to fill in 1315 * 1316 * Return: always 0 1317 */ 1318 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1319 int attr_mask, struct ib_qp_init_attr *init_attr) 1320 { 1321 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1322 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1323 1324 attr->qp_state = qp->state; 1325 attr->cur_qp_state = attr->qp_state; 1326 attr->path_mtu = qp->path_mtu; 1327 attr->path_mig_state = qp->s_mig_state; 1328 attr->qkey = qp->qkey; 1329 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; 1330 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; 1331 attr->dest_qp_num = qp->remote_qpn; 1332 attr->qp_access_flags = qp->qp_access_flags; 1333 attr->cap.max_send_wr = qp->s_size - 1; 1334 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 1335 attr->cap.max_send_sge = qp->s_max_sge; 1336 attr->cap.max_recv_sge = qp->r_rq.max_sge; 1337 attr->cap.max_inline_data = 0; 1338 attr->ah_attr = qp->remote_ah_attr; 1339 attr->alt_ah_attr = qp->alt_ah_attr; 1340 attr->pkey_index = qp->s_pkey_index; 1341 attr->alt_pkey_index = qp->s_alt_pkey_index; 1342 attr->en_sqd_async_notify = 0; 1343 attr->sq_draining = qp->s_draining; 1344 attr->max_rd_atomic = qp->s_max_rd_atomic; 1345 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 1346 attr->min_rnr_timer = qp->r_min_rnr_timer; 1347 attr->port_num = qp->port_num; 1348 attr->timeout = qp->timeout; 1349 attr->retry_cnt = qp->s_retry_cnt; 1350 attr->rnr_retry = qp->s_rnr_retry_cnt; 1351 attr->alt_port_num = qp->alt_ah_attr.port_num; 1352 attr->alt_timeout = qp->alt_timeout; 1353 1354 init_attr->event_handler = qp->ibqp.event_handler; 1355 init_attr->qp_context = qp->ibqp.qp_context; 1356 init_attr->send_cq = qp->ibqp.send_cq; 1357 init_attr->recv_cq = qp->ibqp.recv_cq; 1358 init_attr->srq = qp->ibqp.srq; 1359 init_attr->cap = attr->cap; 1360 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) 1361 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 1362 else 1363 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 1364 init_attr->qp_type = qp->ibqp.qp_type; 1365 init_attr->port_num = qp->port_num; 1366 return 0; 1367 } 1368 1369 /** 1370 * rvt_post_receive - post a receive on a QP 1371 * @ibqp: the QP to post the receive on 1372 * @wr: the WR to post 1373 * @bad_wr: the first bad WR is put here 1374 * 1375 * This may be called from interrupt context. 1376 * 1377 * Return: 0 on success otherwise errno 1378 */ 1379 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1380 struct ib_recv_wr **bad_wr) 1381 { 1382 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1383 struct rvt_rwq *wq = qp->r_rq.wq; 1384 unsigned long flags; 1385 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) && 1386 !qp->ibqp.srq; 1387 1388 /* Check that state is OK to post receive. */ 1389 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { 1390 *bad_wr = wr; 1391 return -EINVAL; 1392 } 1393 1394 for (; wr; wr = wr->next) { 1395 struct rvt_rwqe *wqe; 1396 u32 next; 1397 int i; 1398 1399 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { 1400 *bad_wr = wr; 1401 return -EINVAL; 1402 } 1403 1404 spin_lock_irqsave(&qp->r_rq.lock, flags); 1405 next = wq->head + 1; 1406 if (next >= qp->r_rq.size) 1407 next = 0; 1408 if (next == wq->tail) { 1409 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1410 *bad_wr = wr; 1411 return -ENOMEM; 1412 } 1413 if (unlikely(qp_err_flush)) { 1414 struct ib_wc wc; 1415 1416 memset(&wc, 0, sizeof(wc)); 1417 wc.qp = &qp->ibqp; 1418 wc.opcode = IB_WC_RECV; 1419 wc.wr_id = wr->wr_id; 1420 wc.status = IB_WC_WR_FLUSH_ERR; 1421 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 1422 } else { 1423 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); 1424 wqe->wr_id = wr->wr_id; 1425 wqe->num_sge = wr->num_sge; 1426 for (i = 0; i < wr->num_sge; i++) 1427 wqe->sg_list[i] = wr->sg_list[i]; 1428 /* 1429 * Make sure queue entry is written 1430 * before the head index. 1431 */ 1432 smp_wmb(); 1433 wq->head = next; 1434 } 1435 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1436 } 1437 return 0; 1438 } 1439 1440 /** 1441 * qp_get_savail - return number of avail send entries 1442 * 1443 * @qp - the qp 1444 * 1445 * This assumes the s_hlock is held but the s_last 1446 * qp variable is uncontrolled. 1447 */ 1448 static inline u32 qp_get_savail(struct rvt_qp *qp) 1449 { 1450 u32 slast; 1451 u32 ret; 1452 1453 smp_read_barrier_depends(); /* see rc.c */ 1454 slast = ACCESS_ONCE(qp->s_last); 1455 if (qp->s_head >= slast) 1456 ret = qp->s_size - (qp->s_head - slast); 1457 else 1458 ret = slast - qp->s_head; 1459 return ret - 1; 1460 } 1461 1462 /** 1463 * rvt_post_one_wr - post one RC, UC, or UD send work request 1464 * @qp: the QP to post on 1465 * @wr: the work request to send 1466 */ 1467 static int rvt_post_one_wr(struct rvt_qp *qp, 1468 struct ib_send_wr *wr, 1469 int *call_send) 1470 { 1471 struct rvt_swqe *wqe; 1472 u32 next; 1473 int i; 1474 int j; 1475 int acc; 1476 struct rvt_lkey_table *rkt; 1477 struct rvt_pd *pd; 1478 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 1479 u8 log_pmtu; 1480 int ret; 1481 1482 /* IB spec says that num_sge == 0 is OK. */ 1483 if (unlikely(wr->num_sge > qp->s_max_sge)) 1484 return -EINVAL; 1485 1486 /* 1487 * Don't allow RDMA reads or atomic operations on UC or 1488 * undefined operations. 1489 * Make sure buffer is large enough to hold the result for atomics. 1490 */ 1491 if (qp->ibqp.qp_type == IB_QPT_UC) { 1492 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ) 1493 return -EINVAL; 1494 } else if (qp->ibqp.qp_type != IB_QPT_RC) { 1495 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 1496 if (wr->opcode != IB_WR_SEND && 1497 wr->opcode != IB_WR_SEND_WITH_IMM) 1498 return -EINVAL; 1499 /* Check UD destination address PD */ 1500 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) 1501 return -EINVAL; 1502 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { 1503 return -EINVAL; 1504 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 1505 (wr->num_sge == 0 || 1506 wr->sg_list[0].length < sizeof(u64) || 1507 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 1508 return -EINVAL; 1509 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { 1510 return -EINVAL; 1511 } 1512 /* check for avail */ 1513 if (unlikely(!qp->s_avail)) { 1514 qp->s_avail = qp_get_savail(qp); 1515 if (WARN_ON(qp->s_avail > (qp->s_size - 1))) 1516 rvt_pr_err(rdi, 1517 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u", 1518 qp->ibqp.qp_num, qp->s_size, qp->s_avail, 1519 qp->s_head, qp->s_tail, qp->s_cur, 1520 qp->s_acked, qp->s_last); 1521 if (!qp->s_avail) 1522 return -ENOMEM; 1523 } 1524 next = qp->s_head + 1; 1525 if (next >= qp->s_size) 1526 next = 0; 1527 1528 rkt = &rdi->lkey_table; 1529 pd = ibpd_to_rvtpd(qp->ibqp.pd); 1530 wqe = rvt_get_swqe_ptr(qp, qp->s_head); 1531 1532 if (qp->ibqp.qp_type != IB_QPT_UC && 1533 qp->ibqp.qp_type != IB_QPT_RC) 1534 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); 1535 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 1536 wr->opcode == IB_WR_RDMA_WRITE || 1537 wr->opcode == IB_WR_RDMA_READ) 1538 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); 1539 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1540 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1541 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); 1542 else 1543 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); 1544 1545 wqe->length = 0; 1546 j = 0; 1547 if (wr->num_sge) { 1548 acc = wr->opcode >= IB_WR_RDMA_READ ? 1549 IB_ACCESS_LOCAL_WRITE : 0; 1550 for (i = 0; i < wr->num_sge; i++) { 1551 u32 length = wr->sg_list[i].length; 1552 int ok; 1553 1554 if (length == 0) 1555 continue; 1556 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], 1557 &wr->sg_list[i], acc); 1558 if (!ok) { 1559 ret = -EINVAL; 1560 goto bail_inval_free; 1561 } 1562 wqe->length += length; 1563 j++; 1564 } 1565 wqe->wr.num_sge = j; 1566 } 1567 1568 /* general part of wqe valid - allow for driver checks */ 1569 if (rdi->driver_f.check_send_wqe) { 1570 ret = rdi->driver_f.check_send_wqe(qp, wqe); 1571 if (ret < 0) 1572 goto bail_inval_free; 1573 if (ret) 1574 *call_send = ret; 1575 } 1576 1577 log_pmtu = qp->log_pmtu; 1578 if (qp->ibqp.qp_type != IB_QPT_UC && 1579 qp->ibqp.qp_type != IB_QPT_RC) { 1580 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah); 1581 1582 log_pmtu = ah->log_pmtu; 1583 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 1584 } 1585 1586 wqe->ssn = qp->s_ssn++; 1587 wqe->psn = qp->s_next_psn; 1588 wqe->lpsn = wqe->psn + 1589 (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0); 1590 qp->s_next_psn = wqe->lpsn + 1; 1591 trace_rvt_post_one_wr(qp, wqe); 1592 smp_wmb(); /* see request builders */ 1593 qp->s_avail--; 1594 qp->s_head = next; 1595 1596 return 0; 1597 1598 bail_inval_free: 1599 /* release mr holds */ 1600 while (j) { 1601 struct rvt_sge *sge = &wqe->sg_list[--j]; 1602 1603 rvt_put_mr(sge->mr); 1604 } 1605 return ret; 1606 } 1607 1608 /** 1609 * rvt_post_send - post a send on a QP 1610 * @ibqp: the QP to post the send on 1611 * @wr: the list of work requests to post 1612 * @bad_wr: the first bad WR is put here 1613 * 1614 * This may be called from interrupt context. 1615 * 1616 * Return: 0 on success else errno 1617 */ 1618 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1619 struct ib_send_wr **bad_wr) 1620 { 1621 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1622 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1623 unsigned long flags = 0; 1624 int call_send; 1625 unsigned nreq = 0; 1626 int err = 0; 1627 1628 spin_lock_irqsave(&qp->s_hlock, flags); 1629 1630 /* 1631 * Ensure QP state is such that we can send. If not bail out early, 1632 * there is no need to do this every time we post a send. 1633 */ 1634 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { 1635 spin_unlock_irqrestore(&qp->s_hlock, flags); 1636 return -EINVAL; 1637 } 1638 1639 /* 1640 * If the send queue is empty, and we only have a single WR then just go 1641 * ahead and kick the send engine into gear. Otherwise we will always 1642 * just schedule the send to happen later. 1643 */ 1644 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; 1645 1646 for (; wr; wr = wr->next) { 1647 err = rvt_post_one_wr(qp, wr, &call_send); 1648 if (unlikely(err)) { 1649 *bad_wr = wr; 1650 goto bail; 1651 } 1652 nreq++; 1653 } 1654 bail: 1655 spin_unlock_irqrestore(&qp->s_hlock, flags); 1656 if (nreq) { 1657 if (call_send) 1658 rdi->driver_f.do_send(qp); 1659 else 1660 rdi->driver_f.schedule_send_no_lock(qp); 1661 } 1662 return err; 1663 } 1664 1665 /** 1666 * rvt_post_srq_receive - post a receive on a shared receive queue 1667 * @ibsrq: the SRQ to post the receive on 1668 * @wr: the list of work requests to post 1669 * @bad_wr: A pointer to the first WR to cause a problem is put here 1670 * 1671 * This may be called from interrupt context. 1672 * 1673 * Return: 0 on success else errno 1674 */ 1675 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 1676 struct ib_recv_wr **bad_wr) 1677 { 1678 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 1679 struct rvt_rwq *wq; 1680 unsigned long flags; 1681 1682 for (; wr; wr = wr->next) { 1683 struct rvt_rwqe *wqe; 1684 u32 next; 1685 int i; 1686 1687 if ((unsigned)wr->num_sge > srq->rq.max_sge) { 1688 *bad_wr = wr; 1689 return -EINVAL; 1690 } 1691 1692 spin_lock_irqsave(&srq->rq.lock, flags); 1693 wq = srq->rq.wq; 1694 next = wq->head + 1; 1695 if (next >= srq->rq.size) 1696 next = 0; 1697 if (next == wq->tail) { 1698 spin_unlock_irqrestore(&srq->rq.lock, flags); 1699 *bad_wr = wr; 1700 return -ENOMEM; 1701 } 1702 1703 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); 1704 wqe->wr_id = wr->wr_id; 1705 wqe->num_sge = wr->num_sge; 1706 for (i = 0; i < wr->num_sge; i++) 1707 wqe->sg_list[i] = wr->sg_list[i]; 1708 /* Make sure queue entry is written before the head index. */ 1709 smp_wmb(); 1710 wq->head = next; 1711 spin_unlock_irqrestore(&srq->rq.lock, flags); 1712 } 1713 return 0; 1714 } 1715