1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/hash.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/vmalloc.h> 52 #include <linux/slab.h> 53 #include <rdma/ib_verbs.h> 54 #include "qp.h" 55 #include "vt.h" 56 #include "trace.h" 57 58 /* 59 * Note that it is OK to post send work requests in the SQE and ERR 60 * states; rvt_do_send() will process them and generate error 61 * completions as per IB 1.2 C10-96. 62 */ 63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { 64 [IB_QPS_RESET] = 0, 65 [IB_QPS_INIT] = RVT_POST_RECV_OK, 66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, 67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | 69 RVT_PROCESS_NEXT_SEND_OK, 70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, 72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 73 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | 75 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 76 }; 77 EXPORT_SYMBOL(ib_rvt_state_ops); 78 79 static void get_map_page(struct rvt_qpn_table *qpt, 80 struct rvt_qpn_map *map, 81 gfp_t gfp) 82 { 83 unsigned long page = get_zeroed_page(gfp); 84 85 /* 86 * Free the page if someone raced with us installing it. 87 */ 88 89 spin_lock(&qpt->lock); 90 if (map->page) 91 free_page(page); 92 else 93 map->page = (void *)page; 94 spin_unlock(&qpt->lock); 95 } 96 97 /** 98 * init_qpn_table - initialize the QP number table for a device 99 * @qpt: the QPN table 100 */ 101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) 102 { 103 u32 offset, i; 104 struct rvt_qpn_map *map; 105 int ret = 0; 106 107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) 108 return -EINVAL; 109 110 spin_lock_init(&qpt->lock); 111 112 qpt->last = rdi->dparms.qpn_start; 113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; 114 115 /* 116 * Drivers may want some QPs beyond what we need for verbs let them use 117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps 118 * for those. The reserved range must be *after* the range which verbs 119 * will pick from. 120 */ 121 122 /* Figure out number of bit maps needed before reserved range */ 123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; 124 125 /* This should always be zero */ 126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; 127 128 /* Starting with the first reserved bit map */ 129 map = &qpt->map[qpt->nmaps]; 130 131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", 132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 134 if (!map->page) { 135 get_map_page(qpt, map, GFP_KERNEL); 136 if (!map->page) { 137 ret = -ENOMEM; 138 break; 139 } 140 } 141 set_bit(offset, map->page); 142 offset++; 143 if (offset == RVT_BITS_PER_PAGE) { 144 /* next page */ 145 qpt->nmaps++; 146 map++; 147 offset = 0; 148 } 149 } 150 return ret; 151 } 152 153 /** 154 * free_qpn_table - free the QP number table for a device 155 * @qpt: the QPN table 156 */ 157 static void free_qpn_table(struct rvt_qpn_table *qpt) 158 { 159 int i; 160 161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 162 free_page((unsigned long)qpt->map[i].page); 163 } 164 165 int rvt_driver_qp_init(struct rvt_dev_info *rdi) 166 { 167 int i; 168 int ret = -ENOMEM; 169 170 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) { 171 rvt_pr_info(rdi, "Driver is doing QP init.\n"); 172 return 0; 173 } 174 175 if (!rdi->dparms.qp_table_size) 176 return -EINVAL; 177 178 /* 179 * If driver is not doing any QP allocation then make sure it is 180 * providing the necessary QP functions. 181 */ 182 if (!rdi->driver_f.free_all_qps || 183 !rdi->driver_f.qp_priv_alloc || 184 !rdi->driver_f.qp_priv_free || 185 !rdi->driver_f.notify_qp_reset) 186 return -EINVAL; 187 188 /* allocate parent object */ 189 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, 190 rdi->dparms.node); 191 if (!rdi->qp_dev) 192 return -ENOMEM; 193 194 /* allocate hash table */ 195 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; 196 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); 197 rdi->qp_dev->qp_table = 198 kmalloc_node(rdi->qp_dev->qp_table_size * 199 sizeof(*rdi->qp_dev->qp_table), 200 GFP_KERNEL, rdi->dparms.node); 201 if (!rdi->qp_dev->qp_table) 202 goto no_qp_table; 203 204 for (i = 0; i < rdi->qp_dev->qp_table_size; i++) 205 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); 206 207 spin_lock_init(&rdi->qp_dev->qpt_lock); 208 209 /* initialize qpn map */ 210 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) 211 goto fail_table; 212 213 spin_lock_init(&rdi->n_qps_lock); 214 215 return 0; 216 217 fail_table: 218 kfree(rdi->qp_dev->qp_table); 219 free_qpn_table(&rdi->qp_dev->qpn_table); 220 221 no_qp_table: 222 kfree(rdi->qp_dev); 223 224 return ret; 225 } 226 227 /** 228 * free_all_qps - check for QPs still in use 229 * @qpt: the QP table to empty 230 * 231 * There should not be any QPs still in use. 232 * Free memory for table. 233 */ 234 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) 235 { 236 unsigned long flags; 237 struct rvt_qp *qp; 238 unsigned n, qp_inuse = 0; 239 spinlock_t *ql; /* work around too long line below */ 240 241 if (rdi->driver_f.free_all_qps) 242 qp_inuse = rdi->driver_f.free_all_qps(rdi); 243 244 qp_inuse += rvt_mcast_tree_empty(rdi); 245 246 if (!rdi->qp_dev) 247 return qp_inuse; 248 249 ql = &rdi->qp_dev->qpt_lock; 250 spin_lock_irqsave(ql, flags); 251 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { 252 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], 253 lockdep_is_held(ql)); 254 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); 255 256 for (; qp; qp = rcu_dereference_protected(qp->next, 257 lockdep_is_held(ql))) 258 qp_inuse++; 259 } 260 spin_unlock_irqrestore(ql, flags); 261 synchronize_rcu(); 262 return qp_inuse; 263 } 264 265 void rvt_qp_exit(struct rvt_dev_info *rdi) 266 { 267 u32 qps_inuse = rvt_free_all_qps(rdi); 268 269 if (qps_inuse) 270 rvt_pr_err(rdi, "QP memory leak! %u still in use\n", 271 qps_inuse); 272 if (!rdi->qp_dev) 273 return; 274 275 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) 276 return; /* driver did the qp init so nothing else to do */ 277 278 kfree(rdi->qp_dev->qp_table); 279 free_qpn_table(&rdi->qp_dev->qpn_table); 280 kfree(rdi->qp_dev); 281 } 282 283 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 284 struct rvt_qpn_map *map, unsigned off) 285 { 286 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 287 } 288 289 /** 290 * alloc_qpn - Allocate the next available qpn or zero/one for QP type 291 * IB_QPT_SMI/IB_QPT_GSI 292 *@rdi: rvt device info structure 293 *@qpt: queue pair number table pointer 294 *@port_num: IB port number, 1 based, comes from core 295 * 296 * Return: The queue pair number 297 */ 298 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 299 enum ib_qp_type type, u8 port_num, gfp_t gfp) 300 { 301 u32 i, offset, max_scan, qpn; 302 struct rvt_qpn_map *map; 303 u32 ret; 304 305 if (rdi->driver_f.alloc_qpn) 306 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); 307 308 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 309 unsigned n; 310 311 ret = type == IB_QPT_GSI; 312 n = 1 << (ret + 2 * (port_num - 1)); 313 spin_lock(&qpt->lock); 314 if (qpt->flags & n) 315 ret = -EINVAL; 316 else 317 qpt->flags |= n; 318 spin_unlock(&qpt->lock); 319 goto bail; 320 } 321 322 qpn = qpt->last + qpt->incr; 323 if (qpn >= RVT_QPN_MAX) 324 qpn = qpt->incr | ((qpt->last & 1) ^ 1); 325 /* offset carries bit 0 */ 326 offset = qpn & RVT_BITS_PER_PAGE_MASK; 327 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; 328 max_scan = qpt->nmaps - !offset; 329 for (i = 0;;) { 330 if (unlikely(!map->page)) { 331 get_map_page(qpt, map, gfp); 332 if (unlikely(!map->page)) 333 break; 334 } 335 do { 336 if (!test_and_set_bit(offset, map->page)) { 337 qpt->last = qpn; 338 ret = qpn; 339 goto bail; 340 } 341 offset += qpt->incr; 342 /* 343 * This qpn might be bogus if offset >= BITS_PER_PAGE. 344 * That is OK. It gets re-assigned below 345 */ 346 qpn = mk_qpn(qpt, map, offset); 347 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); 348 /* 349 * In order to keep the number of pages allocated to a 350 * minimum, we scan the all existing pages before increasing 351 * the size of the bitmap table. 352 */ 353 if (++i > max_scan) { 354 if (qpt->nmaps == RVT_QPNMAP_ENTRIES) 355 break; 356 map = &qpt->map[qpt->nmaps++]; 357 /* start at incr with current bit 0 */ 358 offset = qpt->incr | (offset & 1); 359 } else if (map < &qpt->map[qpt->nmaps]) { 360 ++map; 361 /* start at incr with current bit 0 */ 362 offset = qpt->incr | (offset & 1); 363 } else { 364 map = &qpt->map[0]; 365 /* wrap to first map page, invert bit 0 */ 366 offset = qpt->incr | ((offset & 1) ^ 1); 367 } 368 /* there can be no bits at shift and below */ 369 WARN_ON(offset & (rdi->dparms.qos_shift - 1)); 370 qpn = mk_qpn(qpt, map, offset); 371 } 372 373 ret = -ENOMEM; 374 375 bail: 376 return ret; 377 } 378 379 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 380 { 381 struct rvt_qpn_map *map; 382 383 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 384 if (map->page) 385 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 386 } 387 388 /** 389 * reset_qp - initialize the QP state to the reset state 390 * @qp: the QP to reset 391 * @type: the QP type 392 * r and s lock are required to be held by the caller 393 */ 394 void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 395 enum ib_qp_type type) 396 { 397 if (qp->state != IB_QPS_RESET) { 398 qp->state = IB_QPS_RESET; 399 400 /* Let drivers flush their waitlist */ 401 rdi->driver_f.flush_qp_waiters(qp); 402 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 403 spin_unlock(&qp->s_lock); 404 spin_unlock_irq(&qp->r_lock); 405 406 /* Stop the send queue and the retry timer */ 407 rdi->driver_f.stop_send_queue(qp); 408 del_timer_sync(&qp->s_timer); 409 410 /* Wait for things to stop */ 411 rdi->driver_f.quiesce_qp(qp); 412 413 /* take qp out the hash and wait for it to be unused */ 414 rvt_remove_qp(rdi, qp); 415 wait_event(qp->wait, !atomic_read(&qp->refcount)); 416 417 /* grab the lock b/c it was locked at call time */ 418 spin_lock_irq(&qp->r_lock); 419 spin_lock(&qp->s_lock); 420 421 rvt_clear_mr_refs(qp, 1); 422 } 423 424 /* 425 * Let the driver do any tear down it needs to for a qp 426 * that has been reset 427 */ 428 rdi->driver_f.notify_qp_reset(qp); 429 430 qp->remote_qpn = 0; 431 qp->qkey = 0; 432 qp->qp_access_flags = 0; 433 qp->s_flags &= RVT_S_SIGNAL_REQ_WR; 434 qp->s_hdrwords = 0; 435 qp->s_wqe = NULL; 436 qp->s_draining = 0; 437 qp->s_next_psn = 0; 438 qp->s_last_psn = 0; 439 qp->s_sending_psn = 0; 440 qp->s_sending_hpsn = 0; 441 qp->s_psn = 0; 442 qp->r_psn = 0; 443 qp->r_msn = 0; 444 if (type == IB_QPT_RC) { 445 qp->s_state = IB_OPCODE_RC_SEND_LAST; 446 qp->r_state = IB_OPCODE_RC_SEND_LAST; 447 } else { 448 qp->s_state = IB_OPCODE_UC_SEND_LAST; 449 qp->r_state = IB_OPCODE_UC_SEND_LAST; 450 } 451 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 452 qp->r_nak_state = 0; 453 qp->r_aflags = 0; 454 qp->r_flags = 0; 455 qp->s_head = 0; 456 qp->s_tail = 0; 457 qp->s_cur = 0; 458 qp->s_acked = 0; 459 qp->s_last = 0; 460 qp->s_ssn = 1; 461 qp->s_lsn = 0; 462 qp->s_mig_state = IB_MIG_MIGRATED; 463 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 464 qp->r_head_ack_queue = 0; 465 qp->s_tail_ack_queue = 0; 466 qp->s_num_rd_atomic = 0; 467 if (qp->r_rq.wq) { 468 qp->r_rq.wq->head = 0; 469 qp->r_rq.wq->tail = 0; 470 } 471 qp->r_sge.num_sge = 0; 472 } 473 EXPORT_SYMBOL(rvt_reset_qp); 474 475 /** 476 * rvt_create_qp - create a queue pair for a device 477 * @ibpd: the protection domain who's device we create the queue pair for 478 * @init_attr: the attributes of the queue pair 479 * @udata: user data for libibverbs.so 480 * 481 * Queue pair creation is mostly an rvt issue. However, drivers have their own 482 * unique idea of what queue pair numbers mean. For instance there is a reserved 483 * range for PSM. 484 * 485 * Returns the queue pair on success, otherwise returns an errno. 486 * 487 * Called by the ib_create_qp() core verbs function. 488 */ 489 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 490 struct ib_qp_init_attr *init_attr, 491 struct ib_udata *udata) 492 { 493 struct rvt_qp *qp; 494 int err; 495 struct rvt_swqe *swq = NULL; 496 size_t sz; 497 size_t sg_list_sz; 498 struct ib_qp *ret = ERR_PTR(-ENOMEM); 499 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 500 void *priv = NULL; 501 gfp_t gfp; 502 503 if (!rdi) 504 return ERR_PTR(-EINVAL); 505 506 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || 507 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 508 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 509 return ERR_PTR(-EINVAL); 510 511 /* GFP_NOIO is applicable to RC QP's only */ 512 513 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && 514 init_attr->qp_type != IB_QPT_RC) 515 return ERR_PTR(-EINVAL); 516 517 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? 518 GFP_NOIO : GFP_KERNEL; 519 520 /* Check receive queue parameters if no SRQ is specified. */ 521 if (!init_attr->srq) { 522 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || 523 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) 524 return ERR_PTR(-EINVAL); 525 526 if (init_attr->cap.max_send_sge + 527 init_attr->cap.max_send_wr + 528 init_attr->cap.max_recv_sge + 529 init_attr->cap.max_recv_wr == 0) 530 return ERR_PTR(-EINVAL); 531 } 532 533 switch (init_attr->qp_type) { 534 case IB_QPT_SMI: 535 case IB_QPT_GSI: 536 if (init_attr->port_num == 0 || 537 init_attr->port_num > ibpd->device->phys_port_cnt) 538 return ERR_PTR(-EINVAL); 539 case IB_QPT_UC: 540 case IB_QPT_RC: 541 case IB_QPT_UD: 542 sz = sizeof(struct rvt_sge) * 543 init_attr->cap.max_send_sge + 544 sizeof(struct rvt_swqe); 545 if (gfp == GFP_NOIO) 546 swq = __vmalloc( 547 (init_attr->cap.max_send_wr + 1) * sz, 548 gfp, PAGE_KERNEL); 549 else 550 swq = vmalloc_node( 551 (init_attr->cap.max_send_wr + 1) * sz, 552 rdi->dparms.node); 553 if (!swq) 554 return ERR_PTR(-ENOMEM); 555 556 sz = sizeof(*qp); 557 sg_list_sz = 0; 558 if (init_attr->srq) { 559 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); 560 561 if (srq->rq.max_sge > 1) 562 sg_list_sz = sizeof(*qp->r_sg_list) * 563 (srq->rq.max_sge - 1); 564 } else if (init_attr->cap.max_recv_sge > 1) 565 sg_list_sz = sizeof(*qp->r_sg_list) * 566 (init_attr->cap.max_recv_sge - 1); 567 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); 568 if (!qp) 569 goto bail_swq; 570 571 RCU_INIT_POINTER(qp->next, NULL); 572 573 /* 574 * Driver needs to set up it's private QP structure and do any 575 * initialization that is needed. 576 */ 577 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 578 if (!priv) 579 goto bail_qp; 580 qp->priv = priv; 581 qp->timeout_jiffies = 582 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 583 1000UL); 584 if (init_attr->srq) { 585 sz = 0; 586 } else { 587 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 588 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 589 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 590 sizeof(struct rvt_rwqe); 591 if (udata) 592 qp->r_rq.wq = vmalloc_user( 593 sizeof(struct rvt_rwq) + 594 qp->r_rq.size * sz); 595 else if (gfp == GFP_NOIO) 596 qp->r_rq.wq = __vmalloc( 597 sizeof(struct rvt_rwq) + 598 qp->r_rq.size * sz, 599 gfp, PAGE_KERNEL); 600 else 601 qp->r_rq.wq = vmalloc_node( 602 sizeof(struct rvt_rwq) + 603 qp->r_rq.size * sz, 604 rdi->dparms.node); 605 if (!qp->r_rq.wq) 606 goto bail_driver_priv; 607 } 608 609 /* 610 * ib_create_qp() will initialize qp->ibqp 611 * except for qp->ibqp.qp_num. 612 */ 613 spin_lock_init(&qp->r_lock); 614 spin_lock_init(&qp->s_lock); 615 spin_lock_init(&qp->r_rq.lock); 616 atomic_set(&qp->refcount, 0); 617 init_waitqueue_head(&qp->wait); 618 init_timer(&qp->s_timer); 619 qp->s_timer.data = (unsigned long)qp; 620 INIT_LIST_HEAD(&qp->rspwait); 621 qp->state = IB_QPS_RESET; 622 qp->s_wq = swq; 623 qp->s_size = init_attr->cap.max_send_wr + 1; 624 qp->s_max_sge = init_attr->cap.max_send_sge; 625 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 626 qp->s_flags = RVT_S_SIGNAL_REQ_WR; 627 628 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 629 init_attr->qp_type, 630 init_attr->port_num, gfp); 631 if (err < 0) { 632 ret = ERR_PTR(err); 633 goto bail_rq_wq; 634 } 635 qp->ibqp.qp_num = err; 636 qp->port_num = init_attr->port_num; 637 rvt_reset_qp(rdi, qp, init_attr->qp_type); 638 break; 639 640 default: 641 /* Don't support raw QPs */ 642 return ERR_PTR(-EINVAL); 643 } 644 645 init_attr->cap.max_inline_data = 0; 646 647 /* 648 * Return the address of the RWQ as the offset to mmap. 649 * See rvt_mmap() for details. 650 */ 651 if (udata && udata->outlen >= sizeof(__u64)) { 652 if (!qp->r_rq.wq) { 653 __u64 offset = 0; 654 655 err = ib_copy_to_udata(udata, &offset, 656 sizeof(offset)); 657 if (err) { 658 ret = ERR_PTR(err); 659 goto bail_qpn; 660 } 661 } else { 662 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 663 664 qp->ip = rvt_create_mmap_info(rdi, s, 665 ibpd->uobject->context, 666 qp->r_rq.wq); 667 if (!qp->ip) { 668 ret = ERR_PTR(-ENOMEM); 669 goto bail_qpn; 670 } 671 672 err = ib_copy_to_udata(udata, &qp->ip->offset, 673 sizeof(qp->ip->offset)); 674 if (err) { 675 ret = ERR_PTR(err); 676 goto bail_ip; 677 } 678 } 679 } 680 681 spin_lock(&rdi->n_qps_lock); 682 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 683 spin_unlock(&rdi->n_qps_lock); 684 ret = ERR_PTR(-ENOMEM); 685 goto bail_ip; 686 } 687 688 rdi->n_qps_allocated++; 689 spin_unlock(&rdi->n_qps_lock); 690 691 if (qp->ip) { 692 spin_lock_irq(&rdi->pending_lock); 693 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); 694 spin_unlock_irq(&rdi->pending_lock); 695 } 696 697 ret = &qp->ibqp; 698 699 /* 700 * We have our QP and its good, now keep track of what types of opcodes 701 * can be processed on this QP. We do this by keeping track of what the 702 * 3 high order bits of the opcode are. 703 */ 704 switch (init_attr->qp_type) { 705 case IB_QPT_SMI: 706 case IB_QPT_GSI: 707 case IB_QPT_UD: 708 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK; 709 break; 710 case IB_QPT_RC: 711 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK; 712 break; 713 case IB_QPT_UC: 714 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK; 715 break; 716 default: 717 ret = ERR_PTR(-EINVAL); 718 goto bail_ip; 719 } 720 721 return ret; 722 723 bail_ip: 724 kref_put(&qp->ip->ref, rvt_release_mmap_info); 725 726 bail_qpn: 727 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 728 729 bail_rq_wq: 730 vfree(qp->r_rq.wq); 731 732 bail_driver_priv: 733 rdi->driver_f.qp_priv_free(rdi, qp); 734 735 bail_qp: 736 kfree(qp); 737 738 bail_swq: 739 vfree(swq); 740 741 return ret; 742 } 743 744 void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) 745 { 746 unsigned n; 747 748 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 749 rvt_put_ss(&qp->s_rdma_read_sge); 750 751 rvt_put_ss(&qp->r_sge); 752 753 if (clr_sends) { 754 while (qp->s_last != qp->s_head) { 755 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); 756 unsigned i; 757 758 for (i = 0; i < wqe->wr.num_sge; i++) { 759 struct rvt_sge *sge = &wqe->sg_list[i]; 760 761 rvt_put_mr(sge->mr); 762 } 763 if (qp->ibqp.qp_type == IB_QPT_UD || 764 qp->ibqp.qp_type == IB_QPT_SMI || 765 qp->ibqp.qp_type == IB_QPT_GSI) 766 atomic_dec(&ibah_to_rvtah( 767 wqe->ud_wr.ah)->refcount); 768 if (++qp->s_last >= qp->s_size) 769 qp->s_last = 0; 770 } 771 if (qp->s_rdma_mr) { 772 rvt_put_mr(qp->s_rdma_mr); 773 qp->s_rdma_mr = NULL; 774 } 775 } 776 777 if (qp->ibqp.qp_type != IB_QPT_RC) 778 return; 779 780 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 781 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 782 783 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 784 e->rdma_sge.mr) { 785 rvt_put_mr(e->rdma_sge.mr); 786 e->rdma_sge.mr = NULL; 787 } 788 } 789 } 790 EXPORT_SYMBOL(rvt_clear_mr_refs); 791 792 /** 793 * rvt_error_qp - put a QP into the error state 794 * @qp: the QP to put into the error state 795 * @err: the receive completion error to signal if a RWQE is active 796 * 797 * Flushes both send and receive work queues. 798 * Returns true if last WQE event should be generated. 799 * The QP r_lock and s_lock should be held and interrupts disabled. 800 * If we are already in error state, just return. 801 */ 802 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 803 { 804 struct ib_wc wc; 805 int ret = 0; 806 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 807 808 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 809 goto bail; 810 811 qp->state = IB_QPS_ERR; 812 813 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 814 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 815 del_timer(&qp->s_timer); 816 } 817 818 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) 819 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; 820 821 rdi->driver_f.notify_error_qp(qp); 822 823 /* Schedule the sending tasklet to drain the send work queue. */ 824 if (qp->s_last != qp->s_head) 825 rdi->driver_f.schedule_send(qp); 826 827 rvt_clear_mr_refs(qp, 0); 828 829 memset(&wc, 0, sizeof(wc)); 830 wc.qp = &qp->ibqp; 831 wc.opcode = IB_WC_RECV; 832 833 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 834 wc.wr_id = qp->r_wr_id; 835 wc.status = err; 836 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 837 } 838 wc.status = IB_WC_WR_FLUSH_ERR; 839 840 if (qp->r_rq.wq) { 841 struct rvt_rwq *wq; 842 u32 head; 843 u32 tail; 844 845 spin_lock(&qp->r_rq.lock); 846 847 /* sanity check pointers before trusting them */ 848 wq = qp->r_rq.wq; 849 head = wq->head; 850 if (head >= qp->r_rq.size) 851 head = 0; 852 tail = wq->tail; 853 if (tail >= qp->r_rq.size) 854 tail = 0; 855 while (tail != head) { 856 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 857 if (++tail >= qp->r_rq.size) 858 tail = 0; 859 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 860 } 861 wq->tail = tail; 862 863 spin_unlock(&qp->r_rq.lock); 864 } else if (qp->ibqp.event_handler) { 865 ret = 1; 866 } 867 868 bail: 869 return ret; 870 } 871 EXPORT_SYMBOL(rvt_error_qp); 872 873 /* 874 * Put the QP into the hash table. 875 * The hash table holds a reference to the QP. 876 */ 877 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 878 { 879 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 880 unsigned long flags; 881 882 atomic_inc(&qp->refcount); 883 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 884 885 if (qp->ibqp.qp_num <= 1) { 886 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); 887 } else { 888 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 889 890 qp->next = rdi->qp_dev->qp_table[n]; 891 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); 892 trace_rvt_qpinsert(qp, n); 893 } 894 895 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 896 } 897 898 /* 899 * Remove the QP from the table so it can't be found asynchronously by 900 * the receive routine. 901 */ 902 void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 903 { 904 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 905 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 906 unsigned long flags; 907 int removed = 1; 908 909 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 910 911 if (rcu_dereference_protected(rvp->qp[0], 912 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 913 RCU_INIT_POINTER(rvp->qp[0], NULL); 914 } else if (rcu_dereference_protected(rvp->qp[1], 915 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 916 RCU_INIT_POINTER(rvp->qp[1], NULL); 917 } else { 918 struct rvt_qp *q; 919 struct rvt_qp __rcu **qpp; 920 921 removed = 0; 922 qpp = &rdi->qp_dev->qp_table[n]; 923 for (; (q = rcu_dereference_protected(*qpp, 924 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; 925 qpp = &q->next) { 926 if (q == qp) { 927 RCU_INIT_POINTER(*qpp, 928 rcu_dereference_protected(qp->next, 929 lockdep_is_held(&rdi->qp_dev->qpt_lock))); 930 removed = 1; 931 trace_rvt_qpremove(qp, n); 932 break; 933 } 934 } 935 } 936 937 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 938 if (removed) { 939 synchronize_rcu(); 940 if (atomic_dec_and_test(&qp->refcount)) 941 wake_up(&qp->wait); 942 } 943 } 944 EXPORT_SYMBOL(rvt_remove_qp); 945 946 /** 947 * qib_modify_qp - modify the attributes of a queue pair 948 * @ibqp: the queue pair who's attributes we're modifying 949 * @attr: the new attributes 950 * @attr_mask: the mask of attributes to modify 951 * @udata: user data for libibverbs.so 952 * 953 * Returns 0 on success, otherwise returns an errno. 954 */ 955 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 956 int attr_mask, struct ib_udata *udata) 957 { 958 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 959 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 960 enum ib_qp_state cur_state, new_state; 961 struct ib_event ev; 962 int lastwqe = 0; 963 int mig = 0; 964 int pmtu = 0; /* for gcc warning only */ 965 enum rdma_link_layer link; 966 967 link = rdma_port_get_link_layer(ibqp->device, qp->port_num); 968 969 spin_lock_irq(&qp->r_lock); 970 spin_lock(&qp->s_lock); 971 972 cur_state = attr_mask & IB_QP_CUR_STATE ? 973 attr->cur_qp_state : qp->state; 974 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 975 976 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 977 attr_mask, link)) 978 goto inval; 979 980 if (rdi->driver_f.check_modify_qp && 981 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) 982 goto inval; 983 984 if (attr_mask & IB_QP_AV) { 985 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 986 goto inval; 987 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 988 goto inval; 989 } 990 991 if (attr_mask & IB_QP_ALT_PATH) { 992 if (attr->alt_ah_attr.dlid >= 993 be16_to_cpu(IB_MULTICAST_LID_BASE)) 994 goto inval; 995 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 996 goto inval; 997 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) 998 goto inval; 999 } 1000 1001 if (attr_mask & IB_QP_PKEY_INDEX) 1002 if (attr->pkey_index >= rvt_get_npkeys(rdi)) 1003 goto inval; 1004 1005 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1006 if (attr->min_rnr_timer > 31) 1007 goto inval; 1008 1009 if (attr_mask & IB_QP_PORT) 1010 if (qp->ibqp.qp_type == IB_QPT_SMI || 1011 qp->ibqp.qp_type == IB_QPT_GSI || 1012 attr->port_num == 0 || 1013 attr->port_num > ibqp->device->phys_port_cnt) 1014 goto inval; 1015 1016 if (attr_mask & IB_QP_DEST_QPN) 1017 if (attr->dest_qp_num > RVT_QPN_MASK) 1018 goto inval; 1019 1020 if (attr_mask & IB_QP_RETRY_CNT) 1021 if (attr->retry_cnt > 7) 1022 goto inval; 1023 1024 if (attr_mask & IB_QP_RNR_RETRY) 1025 if (attr->rnr_retry > 7) 1026 goto inval; 1027 1028 /* 1029 * Don't allow invalid path_mtu values. OK to set greater 1030 * than the active mtu (or even the max_cap, if we have tuned 1031 * that to a small mtu. We'll set qp->path_mtu 1032 * to the lesser of requested attribute mtu and active, 1033 * for packetizing messages. 1034 * Note that the QP port has to be set in INIT and MTU in RTR. 1035 */ 1036 if (attr_mask & IB_QP_PATH_MTU) { 1037 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); 1038 if (pmtu < 0) 1039 goto inval; 1040 } 1041 1042 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1043 if (attr->path_mig_state == IB_MIG_REARM) { 1044 if (qp->s_mig_state == IB_MIG_ARMED) 1045 goto inval; 1046 if (new_state != IB_QPS_RTS) 1047 goto inval; 1048 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 1049 if (qp->s_mig_state == IB_MIG_REARM) 1050 goto inval; 1051 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 1052 goto inval; 1053 if (qp->s_mig_state == IB_MIG_ARMED) 1054 mig = 1; 1055 } else { 1056 goto inval; 1057 } 1058 } 1059 1060 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1061 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) 1062 goto inval; 1063 1064 switch (new_state) { 1065 case IB_QPS_RESET: 1066 if (qp->state != IB_QPS_RESET) 1067 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1068 break; 1069 1070 case IB_QPS_RTR: 1071 /* Allow event to re-trigger if QP set to RTR more than once */ 1072 qp->r_flags &= ~RVT_R_COMM_EST; 1073 qp->state = new_state; 1074 break; 1075 1076 case IB_QPS_SQD: 1077 qp->s_draining = qp->s_last != qp->s_cur; 1078 qp->state = new_state; 1079 break; 1080 1081 case IB_QPS_SQE: 1082 if (qp->ibqp.qp_type == IB_QPT_RC) 1083 goto inval; 1084 qp->state = new_state; 1085 break; 1086 1087 case IB_QPS_ERR: 1088 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1089 break; 1090 1091 default: 1092 qp->state = new_state; 1093 break; 1094 } 1095 1096 if (attr_mask & IB_QP_PKEY_INDEX) 1097 qp->s_pkey_index = attr->pkey_index; 1098 1099 if (attr_mask & IB_QP_PORT) 1100 qp->port_num = attr->port_num; 1101 1102 if (attr_mask & IB_QP_DEST_QPN) 1103 qp->remote_qpn = attr->dest_qp_num; 1104 1105 if (attr_mask & IB_QP_SQ_PSN) { 1106 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; 1107 qp->s_psn = qp->s_next_psn; 1108 qp->s_sending_psn = qp->s_next_psn; 1109 qp->s_last_psn = qp->s_next_psn - 1; 1110 qp->s_sending_hpsn = qp->s_last_psn; 1111 } 1112 1113 if (attr_mask & IB_QP_RQ_PSN) 1114 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; 1115 1116 if (attr_mask & IB_QP_ACCESS_FLAGS) 1117 qp->qp_access_flags = attr->qp_access_flags; 1118 1119 if (attr_mask & IB_QP_AV) { 1120 qp->remote_ah_attr = attr->ah_attr; 1121 qp->s_srate = attr->ah_attr.static_rate; 1122 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); 1123 } 1124 1125 if (attr_mask & IB_QP_ALT_PATH) { 1126 qp->alt_ah_attr = attr->alt_ah_attr; 1127 qp->s_alt_pkey_index = attr->alt_pkey_index; 1128 } 1129 1130 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1131 qp->s_mig_state = attr->path_mig_state; 1132 if (mig) { 1133 qp->remote_ah_attr = qp->alt_ah_attr; 1134 qp->port_num = qp->alt_ah_attr.port_num; 1135 qp->s_pkey_index = qp->s_alt_pkey_index; 1136 1137 /* 1138 * Ignored by drivers which do not support it. Not 1139 * really worth creating a call back into the driver 1140 * just to set a flag. 1141 */ 1142 qp->s_flags |= RVT_S_AHG_CLEAR; 1143 } 1144 } 1145 1146 if (attr_mask & IB_QP_PATH_MTU) { 1147 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); 1148 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); 1149 } 1150 1151 if (attr_mask & IB_QP_RETRY_CNT) { 1152 qp->s_retry_cnt = attr->retry_cnt; 1153 qp->s_retry = attr->retry_cnt; 1154 } 1155 1156 if (attr_mask & IB_QP_RNR_RETRY) { 1157 qp->s_rnr_retry_cnt = attr->rnr_retry; 1158 qp->s_rnr_retry = attr->rnr_retry; 1159 } 1160 1161 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1162 qp->r_min_rnr_timer = attr->min_rnr_timer; 1163 1164 if (attr_mask & IB_QP_TIMEOUT) { 1165 qp->timeout = attr->timeout; 1166 qp->timeout_jiffies = 1167 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1168 1000UL); 1169 } 1170 1171 if (attr_mask & IB_QP_QKEY) 1172 qp->qkey = attr->qkey; 1173 1174 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1175 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 1176 1177 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 1178 qp->s_max_rd_atomic = attr->max_rd_atomic; 1179 1180 if (rdi->driver_f.modify_qp) 1181 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); 1182 1183 spin_unlock(&qp->s_lock); 1184 spin_unlock_irq(&qp->r_lock); 1185 1186 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1187 rvt_insert_qp(rdi, qp); 1188 1189 if (lastwqe) { 1190 ev.device = qp->ibqp.device; 1191 ev.element.qp = &qp->ibqp; 1192 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 1193 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1194 } 1195 if (mig) { 1196 ev.device = qp->ibqp.device; 1197 ev.element.qp = &qp->ibqp; 1198 ev.event = IB_EVENT_PATH_MIG; 1199 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1200 } 1201 return 0; 1202 1203 inval: 1204 spin_unlock(&qp->s_lock); 1205 spin_unlock_irq(&qp->r_lock); 1206 return -EINVAL; 1207 } 1208 1209 /** 1210 * rvt_destroy_qp - destroy a queue pair 1211 * @ibqp: the queue pair to destroy 1212 * 1213 * Returns 0 on success. 1214 * 1215 * Note that this can be called while the QP is actively sending or 1216 * receiving! 1217 */ 1218 int rvt_destroy_qp(struct ib_qp *ibqp) 1219 { 1220 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1221 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1222 1223 spin_lock_irq(&qp->r_lock); 1224 spin_lock(&qp->s_lock); 1225 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1226 spin_unlock(&qp->s_lock); 1227 spin_unlock_irq(&qp->r_lock); 1228 1229 /* qpn is now available for use again */ 1230 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1231 1232 spin_lock(&rdi->n_qps_lock); 1233 rdi->n_qps_allocated--; 1234 spin_unlock(&rdi->n_qps_lock); 1235 1236 if (qp->ip) 1237 kref_put(&qp->ip->ref, rvt_release_mmap_info); 1238 else 1239 vfree(qp->r_rq.wq); 1240 vfree(qp->s_wq); 1241 rdi->driver_f.qp_priv_free(rdi, qp); 1242 kfree(qp); 1243 return 0; 1244 } 1245 1246 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1247 int attr_mask, struct ib_qp_init_attr *init_attr) 1248 { 1249 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1250 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1251 1252 attr->qp_state = qp->state; 1253 attr->cur_qp_state = attr->qp_state; 1254 attr->path_mtu = qp->path_mtu; 1255 attr->path_mig_state = qp->s_mig_state; 1256 attr->qkey = qp->qkey; 1257 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; 1258 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; 1259 attr->dest_qp_num = qp->remote_qpn; 1260 attr->qp_access_flags = qp->qp_access_flags; 1261 attr->cap.max_send_wr = qp->s_size - 1; 1262 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 1263 attr->cap.max_send_sge = qp->s_max_sge; 1264 attr->cap.max_recv_sge = qp->r_rq.max_sge; 1265 attr->cap.max_inline_data = 0; 1266 attr->ah_attr = qp->remote_ah_attr; 1267 attr->alt_ah_attr = qp->alt_ah_attr; 1268 attr->pkey_index = qp->s_pkey_index; 1269 attr->alt_pkey_index = qp->s_alt_pkey_index; 1270 attr->en_sqd_async_notify = 0; 1271 attr->sq_draining = qp->s_draining; 1272 attr->max_rd_atomic = qp->s_max_rd_atomic; 1273 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 1274 attr->min_rnr_timer = qp->r_min_rnr_timer; 1275 attr->port_num = qp->port_num; 1276 attr->timeout = qp->timeout; 1277 attr->retry_cnt = qp->s_retry_cnt; 1278 attr->rnr_retry = qp->s_rnr_retry_cnt; 1279 attr->alt_port_num = qp->alt_ah_attr.port_num; 1280 attr->alt_timeout = qp->alt_timeout; 1281 1282 init_attr->event_handler = qp->ibqp.event_handler; 1283 init_attr->qp_context = qp->ibqp.qp_context; 1284 init_attr->send_cq = qp->ibqp.send_cq; 1285 init_attr->recv_cq = qp->ibqp.recv_cq; 1286 init_attr->srq = qp->ibqp.srq; 1287 init_attr->cap = attr->cap; 1288 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) 1289 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 1290 else 1291 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 1292 init_attr->qp_type = qp->ibqp.qp_type; 1293 init_attr->port_num = qp->port_num; 1294 return 0; 1295 } 1296 1297 /** 1298 * rvt_post_receive - post a receive on a QP 1299 * @ibqp: the QP to post the receive on 1300 * @wr: the WR to post 1301 * @bad_wr: the first bad WR is put here 1302 * 1303 * This may be called from interrupt context. 1304 */ 1305 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1306 struct ib_recv_wr **bad_wr) 1307 { 1308 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1309 struct rvt_rwq *wq = qp->r_rq.wq; 1310 unsigned long flags; 1311 1312 /* Check that state is OK to post receive. */ 1313 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { 1314 *bad_wr = wr; 1315 return -EINVAL; 1316 } 1317 1318 for (; wr; wr = wr->next) { 1319 struct rvt_rwqe *wqe; 1320 u32 next; 1321 int i; 1322 1323 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { 1324 *bad_wr = wr; 1325 return -EINVAL; 1326 } 1327 1328 spin_lock_irqsave(&qp->r_rq.lock, flags); 1329 next = wq->head + 1; 1330 if (next >= qp->r_rq.size) 1331 next = 0; 1332 if (next == wq->tail) { 1333 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1334 *bad_wr = wr; 1335 return -ENOMEM; 1336 } 1337 1338 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); 1339 wqe->wr_id = wr->wr_id; 1340 wqe->num_sge = wr->num_sge; 1341 for (i = 0; i < wr->num_sge; i++) 1342 wqe->sg_list[i] = wr->sg_list[i]; 1343 /* Make sure queue entry is written before the head index. */ 1344 smp_wmb(); 1345 wq->head = next; 1346 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1347 } 1348 return 0; 1349 } 1350 1351 /** 1352 * rvt_post_one_wr - post one RC, UC, or UD send work request 1353 * @qp: the QP to post on 1354 * @wr: the work request to send 1355 */ 1356 static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) 1357 { 1358 struct rvt_swqe *wqe; 1359 u32 next; 1360 int i; 1361 int j; 1362 int acc; 1363 struct rvt_lkey_table *rkt; 1364 struct rvt_pd *pd; 1365 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 1366 1367 /* IB spec says that num_sge == 0 is OK. */ 1368 if (unlikely(wr->num_sge > qp->s_max_sge)) 1369 return -EINVAL; 1370 1371 /* 1372 * Don't allow RDMA reads or atomic operations on UC or 1373 * undefined operations. 1374 * Make sure buffer is large enough to hold the result for atomics. 1375 */ 1376 if (qp->ibqp.qp_type == IB_QPT_UC) { 1377 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ) 1378 return -EINVAL; 1379 } else if (qp->ibqp.qp_type != IB_QPT_RC) { 1380 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 1381 if (wr->opcode != IB_WR_SEND && 1382 wr->opcode != IB_WR_SEND_WITH_IMM) 1383 return -EINVAL; 1384 /* Check UD destination address PD */ 1385 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) 1386 return -EINVAL; 1387 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { 1388 return -EINVAL; 1389 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 1390 (wr->num_sge == 0 || 1391 wr->sg_list[0].length < sizeof(u64) || 1392 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 1393 return -EINVAL; 1394 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { 1395 return -EINVAL; 1396 } 1397 1398 next = qp->s_head + 1; 1399 if (next >= qp->s_size) 1400 next = 0; 1401 if (next == qp->s_last) 1402 return -ENOMEM; 1403 1404 if (rdi->driver_f.check_send_wr && 1405 rdi->driver_f.check_send_wr(qp, wr)) 1406 return -EINVAL; 1407 1408 rkt = &rdi->lkey_table; 1409 pd = ibpd_to_rvtpd(qp->ibqp.pd); 1410 wqe = rvt_get_swqe_ptr(qp, qp->s_head); 1411 1412 if (qp->ibqp.qp_type != IB_QPT_UC && 1413 qp->ibqp.qp_type != IB_QPT_RC) 1414 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); 1415 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 1416 wr->opcode == IB_WR_RDMA_WRITE || 1417 wr->opcode == IB_WR_RDMA_READ) 1418 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); 1419 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1420 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1421 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); 1422 else 1423 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); 1424 1425 wqe->length = 0; 1426 j = 0; 1427 if (wr->num_sge) { 1428 acc = wr->opcode >= IB_WR_RDMA_READ ? 1429 IB_ACCESS_LOCAL_WRITE : 0; 1430 for (i = 0; i < wr->num_sge; i++) { 1431 u32 length = wr->sg_list[i].length; 1432 int ok; 1433 1434 if (length == 0) 1435 continue; 1436 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], 1437 &wr->sg_list[i], acc); 1438 if (!ok) 1439 goto bail_inval_free; 1440 wqe->length += length; 1441 j++; 1442 } 1443 wqe->wr.num_sge = j; 1444 } 1445 if (qp->ibqp.qp_type == IB_QPT_UC || 1446 qp->ibqp.qp_type == IB_QPT_RC) { 1447 if (wqe->length > 0x80000000U) 1448 goto bail_inval_free; 1449 } else { 1450 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 1451 } 1452 wqe->ssn = qp->s_ssn++; 1453 qp->s_head = next; 1454 1455 return 0; 1456 1457 bail_inval_free: 1458 /* release mr holds */ 1459 while (j) { 1460 struct rvt_sge *sge = &wqe->sg_list[--j]; 1461 1462 rvt_put_mr(sge->mr); 1463 } 1464 return -EINVAL; 1465 } 1466 1467 /** 1468 * rvt_post_send - post a send on a QP 1469 * @ibqp: the QP to post the send on 1470 * @wr: the list of work requests to post 1471 * @bad_wr: the first bad WR is put here 1472 * 1473 * This may be called from interrupt context. 1474 */ 1475 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1476 struct ib_send_wr **bad_wr) 1477 { 1478 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1479 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1480 unsigned long flags = 0; 1481 int call_send; 1482 unsigned nreq = 0; 1483 int err = 0; 1484 1485 spin_lock_irqsave(&qp->s_lock, flags); 1486 1487 /* 1488 * Ensure QP state is such that we can send. If not bail out early, 1489 * there is no need to do this every time we post a send. 1490 */ 1491 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { 1492 spin_unlock_irqrestore(&qp->s_lock, flags); 1493 return -EINVAL; 1494 } 1495 1496 /* 1497 * If the send queue is empty, and we only have a single WR then just go 1498 * ahead and kick the send engine into gear. Otherwise we will always 1499 * just schedule the send to happen later. 1500 */ 1501 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; 1502 1503 for (; wr; wr = wr->next) { 1504 err = rvt_post_one_wr(qp, wr); 1505 if (unlikely(err)) { 1506 *bad_wr = wr; 1507 goto bail; 1508 } 1509 nreq++; 1510 } 1511 bail: 1512 if (nreq && !call_send) 1513 rdi->driver_f.schedule_send(qp); 1514 spin_unlock_irqrestore(&qp->s_lock, flags); 1515 if (nreq && call_send) 1516 rdi->driver_f.do_send(qp); 1517 return err; 1518 } 1519 1520 /** 1521 * rvt_post_srq_receive - post a receive on a shared receive queue 1522 * @ibsrq: the SRQ to post the receive on 1523 * @wr: the list of work requests to post 1524 * @bad_wr: A pointer to the first WR to cause a problem is put here 1525 * 1526 * This may be called from interrupt context. 1527 */ 1528 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 1529 struct ib_recv_wr **bad_wr) 1530 { 1531 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 1532 struct rvt_rwq *wq; 1533 unsigned long flags; 1534 1535 for (; wr; wr = wr->next) { 1536 struct rvt_rwqe *wqe; 1537 u32 next; 1538 int i; 1539 1540 if ((unsigned)wr->num_sge > srq->rq.max_sge) { 1541 *bad_wr = wr; 1542 return -EINVAL; 1543 } 1544 1545 spin_lock_irqsave(&srq->rq.lock, flags); 1546 wq = srq->rq.wq; 1547 next = wq->head + 1; 1548 if (next >= srq->rq.size) 1549 next = 0; 1550 if (next == wq->tail) { 1551 spin_unlock_irqrestore(&srq->rq.lock, flags); 1552 *bad_wr = wr; 1553 return -ENOMEM; 1554 } 1555 1556 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); 1557 wqe->wr_id = wr->wr_id; 1558 wqe->num_sge = wr->num_sge; 1559 for (i = 0; i < wr->num_sge; i++) 1560 wqe->sg_list[i] = wr->sg_list[i]; 1561 /* Make sure queue entry is written before the head index. */ 1562 smp_wmb(); 1563 wq->head = next; 1564 spin_unlock_irqrestore(&srq->rq.lock, flags); 1565 } 1566 return 0; 1567 } 1568 1569 void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 1570 { 1571 struct rvt_qpn_map *map; 1572 1573 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 1574 if (map->page) 1575 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 1576 } 1577 EXPORT_SYMBOL(rvt_free_qpn); 1578 1579 void rvt_dec_qp_cnt(struct rvt_dev_info *rdi) 1580 { 1581 spin_lock(&rdi->n_qps_lock); 1582 rdi->n_qps_allocated--; 1583 spin_unlock(&rdi->n_qps_lock); 1584 } 1585 EXPORT_SYMBOL(rvt_dec_qp_cnt); 1586