1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/hash.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/vmalloc.h> 52 #include <linux/slab.h> 53 #include <rdma/ib_verbs.h> 54 #include "qp.h" 55 #include "vt.h" 56 #include "trace.h" 57 58 /* 59 * Note that it is OK to post send work requests in the SQE and ERR 60 * states; rvt_do_send() will process them and generate error 61 * completions as per IB 1.2 C10-96. 62 */ 63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { 64 [IB_QPS_RESET] = 0, 65 [IB_QPS_INIT] = RVT_POST_RECV_OK, 66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, 67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | 69 RVT_PROCESS_NEXT_SEND_OK, 70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, 72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 73 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | 75 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 76 }; 77 EXPORT_SYMBOL(ib_rvt_state_ops); 78 79 static void get_map_page(struct rvt_qpn_table *qpt, 80 struct rvt_qpn_map *map, 81 gfp_t gfp) 82 { 83 unsigned long page = get_zeroed_page(gfp); 84 85 /* 86 * Free the page if someone raced with us installing it. 87 */ 88 89 spin_lock(&qpt->lock); 90 if (map->page) 91 free_page(page); 92 else 93 map->page = (void *)page; 94 spin_unlock(&qpt->lock); 95 } 96 97 /** 98 * init_qpn_table - initialize the QP number table for a device 99 * @qpt: the QPN table 100 */ 101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) 102 { 103 u32 offset, i; 104 struct rvt_qpn_map *map; 105 int ret = 0; 106 107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) 108 return -EINVAL; 109 110 spin_lock_init(&qpt->lock); 111 112 qpt->last = rdi->dparms.qpn_start; 113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; 114 115 /* 116 * Drivers may want some QPs beyond what we need for verbs let them use 117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps 118 * for those. The reserved range must be *after* the range which verbs 119 * will pick from. 120 */ 121 122 /* Figure out number of bit maps needed before reserved range */ 123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; 124 125 /* This should always be zero */ 126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; 127 128 /* Starting with the first reserved bit map */ 129 map = &qpt->map[qpt->nmaps]; 130 131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", 132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 134 if (!map->page) { 135 get_map_page(qpt, map, GFP_KERNEL); 136 if (!map->page) { 137 ret = -ENOMEM; 138 break; 139 } 140 } 141 set_bit(offset, map->page); 142 offset++; 143 if (offset == RVT_BITS_PER_PAGE) { 144 /* next page */ 145 qpt->nmaps++; 146 map++; 147 offset = 0; 148 } 149 } 150 return ret; 151 } 152 153 /** 154 * free_qpn_table - free the QP number table for a device 155 * @qpt: the QPN table 156 */ 157 static void free_qpn_table(struct rvt_qpn_table *qpt) 158 { 159 int i; 160 161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 162 free_page((unsigned long)qpt->map[i].page); 163 } 164 165 int rvt_driver_qp_init(struct rvt_dev_info *rdi) 166 { 167 int i; 168 int ret = -ENOMEM; 169 170 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) { 171 rvt_pr_info(rdi, "Driver is doing QP init.\n"); 172 return 0; 173 } 174 175 if (!rdi->dparms.qp_table_size) 176 return -EINVAL; 177 178 /* 179 * If driver is not doing any QP allocation then make sure it is 180 * providing the necessary QP functions. 181 */ 182 if (!rdi->driver_f.free_all_qps || 183 !rdi->driver_f.qp_priv_alloc || 184 !rdi->driver_f.qp_priv_free || 185 !rdi->driver_f.notify_qp_reset) 186 return -EINVAL; 187 188 /* allocate parent object */ 189 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, 190 rdi->dparms.node); 191 if (!rdi->qp_dev) 192 return -ENOMEM; 193 194 /* allocate hash table */ 195 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; 196 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); 197 rdi->qp_dev->qp_table = 198 kmalloc_node(rdi->qp_dev->qp_table_size * 199 sizeof(*rdi->qp_dev->qp_table), 200 GFP_KERNEL, rdi->dparms.node); 201 if (!rdi->qp_dev->qp_table) 202 goto no_qp_table; 203 204 for (i = 0; i < rdi->qp_dev->qp_table_size; i++) 205 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); 206 207 spin_lock_init(&rdi->qp_dev->qpt_lock); 208 209 /* initialize qpn map */ 210 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) 211 goto fail_table; 212 213 spin_lock_init(&rdi->n_qps_lock); 214 215 return 0; 216 217 fail_table: 218 kfree(rdi->qp_dev->qp_table); 219 free_qpn_table(&rdi->qp_dev->qpn_table); 220 221 no_qp_table: 222 kfree(rdi->qp_dev); 223 224 return ret; 225 } 226 227 /** 228 * free_all_qps - check for QPs still in use 229 * @qpt: the QP table to empty 230 * 231 * There should not be any QPs still in use. 232 * Free memory for table. 233 */ 234 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) 235 { 236 unsigned long flags; 237 struct rvt_qp *qp; 238 unsigned n, qp_inuse = 0; 239 spinlock_t *ql; /* work around too long line below */ 240 241 if (rdi->driver_f.free_all_qps) 242 qp_inuse = rdi->driver_f.free_all_qps(rdi); 243 244 qp_inuse += rvt_mcast_tree_empty(rdi); 245 246 if (!rdi->qp_dev) 247 return qp_inuse; 248 249 ql = &rdi->qp_dev->qpt_lock; 250 spin_lock_irqsave(ql, flags); 251 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { 252 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], 253 lockdep_is_held(ql)); 254 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); 255 256 for (; qp; qp = rcu_dereference_protected(qp->next, 257 lockdep_is_held(ql))) 258 qp_inuse++; 259 } 260 spin_unlock_irqrestore(ql, flags); 261 synchronize_rcu(); 262 return qp_inuse; 263 } 264 265 void rvt_qp_exit(struct rvt_dev_info *rdi) 266 { 267 u32 qps_inuse = rvt_free_all_qps(rdi); 268 269 if (qps_inuse) 270 rvt_pr_err(rdi, "QP memory leak! %u still in use\n", 271 qps_inuse); 272 if (!rdi->qp_dev) 273 return; 274 275 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) 276 return; /* driver did the qp init so nothing else to do */ 277 278 kfree(rdi->qp_dev->qp_table); 279 free_qpn_table(&rdi->qp_dev->qpn_table); 280 kfree(rdi->qp_dev); 281 } 282 283 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 284 struct rvt_qpn_map *map, unsigned off) 285 { 286 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 287 } 288 289 /** 290 * alloc_qpn - Allocate the next available qpn or zero/one for QP type 291 * IB_QPT_SMI/IB_QPT_GSI 292 *@rdi: rvt device info structure 293 *@qpt: queue pair number table pointer 294 *@port_num: IB port number, 1 based, comes from core 295 * 296 * Return: The queue pair number 297 */ 298 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 299 enum ib_qp_type type, u8 port_num, gfp_t gfp) 300 { 301 u32 i, offset, max_scan, qpn; 302 struct rvt_qpn_map *map; 303 u32 ret; 304 305 if (rdi->driver_f.alloc_qpn) 306 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, 307 GFP_KERNEL); 308 309 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 310 unsigned n; 311 312 ret = type == IB_QPT_GSI; 313 n = 1 << (ret + 2 * (port_num - 1)); 314 spin_lock(&qpt->lock); 315 if (qpt->flags & n) 316 ret = -EINVAL; 317 else 318 qpt->flags |= n; 319 spin_unlock(&qpt->lock); 320 goto bail; 321 } 322 323 qpn = qpt->last + qpt->incr; 324 if (qpn >= RVT_QPN_MAX) 325 qpn = qpt->incr | ((qpt->last & 1) ^ 1); 326 /* offset carries bit 0 */ 327 offset = qpn & RVT_BITS_PER_PAGE_MASK; 328 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; 329 max_scan = qpt->nmaps - !offset; 330 for (i = 0;;) { 331 if (unlikely(!map->page)) { 332 get_map_page(qpt, map, gfp); 333 if (unlikely(!map->page)) 334 break; 335 } 336 do { 337 if (!test_and_set_bit(offset, map->page)) { 338 qpt->last = qpn; 339 ret = qpn; 340 goto bail; 341 } 342 offset += qpt->incr; 343 /* 344 * This qpn might be bogus if offset >= BITS_PER_PAGE. 345 * That is OK. It gets re-assigned below 346 */ 347 qpn = mk_qpn(qpt, map, offset); 348 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); 349 /* 350 * In order to keep the number of pages allocated to a 351 * minimum, we scan the all existing pages before increasing 352 * the size of the bitmap table. 353 */ 354 if (++i > max_scan) { 355 if (qpt->nmaps == RVT_QPNMAP_ENTRIES) 356 break; 357 map = &qpt->map[qpt->nmaps++]; 358 /* start at incr with current bit 0 */ 359 offset = qpt->incr | (offset & 1); 360 } else if (map < &qpt->map[qpt->nmaps]) { 361 ++map; 362 /* start at incr with current bit 0 */ 363 offset = qpt->incr | (offset & 1); 364 } else { 365 map = &qpt->map[0]; 366 /* wrap to first map page, invert bit 0 */ 367 offset = qpt->incr | ((offset & 1) ^ 1); 368 } 369 /* there can be no bits at shift and below */ 370 WARN_ON(offset & (rdi->dparms.qos_shift - 1)); 371 qpn = mk_qpn(qpt, map, offset); 372 } 373 374 ret = -ENOMEM; 375 376 bail: 377 return ret; 378 } 379 380 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 381 { 382 struct rvt_qpn_map *map; 383 384 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 385 if (map->page) 386 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 387 } 388 389 /** 390 * reset_qp - initialize the QP state to the reset state 391 * @qp: the QP to reset 392 * @type: the QP type 393 * r and s lock are required to be held by the caller 394 */ 395 void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 396 enum ib_qp_type type) 397 { 398 if (qp->state != IB_QPS_RESET) { 399 qp->state = IB_QPS_RESET; 400 401 /* Let drivers flush their waitlist */ 402 rdi->driver_f.flush_qp_waiters(qp); 403 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 404 spin_unlock(&qp->s_lock); 405 spin_unlock_irq(&qp->r_lock); 406 407 /* Stop the send queue and the retry timer */ 408 rdi->driver_f.stop_send_queue(qp); 409 del_timer_sync(&qp->s_timer); 410 411 /* Wait for things to stop */ 412 rdi->driver_f.quiesce_qp(qp); 413 414 /* take qp out the hash and wait for it to be unused */ 415 rvt_remove_qp(rdi, qp); 416 wait_event(qp->wait, !atomic_read(&qp->refcount)); 417 418 /* grab the lock b/c it was locked at call time */ 419 spin_lock_irq(&qp->r_lock); 420 spin_lock(&qp->s_lock); 421 422 rvt_clear_mr_refs(qp, 1); 423 } 424 425 /* 426 * Let the driver do any tear down it needs to for a qp 427 * that has been reset 428 */ 429 rdi->driver_f.notify_qp_reset(qp); 430 431 qp->remote_qpn = 0; 432 qp->qkey = 0; 433 qp->qp_access_flags = 0; 434 qp->s_flags &= RVT_S_SIGNAL_REQ_WR; 435 qp->s_hdrwords = 0; 436 qp->s_wqe = NULL; 437 qp->s_draining = 0; 438 qp->s_next_psn = 0; 439 qp->s_last_psn = 0; 440 qp->s_sending_psn = 0; 441 qp->s_sending_hpsn = 0; 442 qp->s_psn = 0; 443 qp->r_psn = 0; 444 qp->r_msn = 0; 445 if (type == IB_QPT_RC) { 446 qp->s_state = IB_OPCODE_RC_SEND_LAST; 447 qp->r_state = IB_OPCODE_RC_SEND_LAST; 448 } else { 449 qp->s_state = IB_OPCODE_UC_SEND_LAST; 450 qp->r_state = IB_OPCODE_UC_SEND_LAST; 451 } 452 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 453 qp->r_nak_state = 0; 454 qp->r_aflags = 0; 455 qp->r_flags = 0; 456 qp->s_head = 0; 457 qp->s_tail = 0; 458 qp->s_cur = 0; 459 qp->s_acked = 0; 460 qp->s_last = 0; 461 qp->s_ssn = 1; 462 qp->s_lsn = 0; 463 qp->s_mig_state = IB_MIG_MIGRATED; 464 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 465 qp->r_head_ack_queue = 0; 466 qp->s_tail_ack_queue = 0; 467 qp->s_num_rd_atomic = 0; 468 if (qp->r_rq.wq) { 469 qp->r_rq.wq->head = 0; 470 qp->r_rq.wq->tail = 0; 471 } 472 qp->r_sge.num_sge = 0; 473 } 474 EXPORT_SYMBOL(rvt_reset_qp); 475 476 /** 477 * rvt_create_qp - create a queue pair for a device 478 * @ibpd: the protection domain who's device we create the queue pair for 479 * @init_attr: the attributes of the queue pair 480 * @udata: user data for libibverbs.so 481 * 482 * Queue pair creation is mostly an rvt issue. However, drivers have their own 483 * unique idea of what queue pair numbers mean. For instance there is a reserved 484 * range for PSM. 485 * 486 * Returns the queue pair on success, otherwise returns an errno. 487 * 488 * Called by the ib_create_qp() core verbs function. 489 */ 490 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 491 struct ib_qp_init_attr *init_attr, 492 struct ib_udata *udata) 493 { 494 struct rvt_qp *qp; 495 int err; 496 struct rvt_swqe *swq = NULL; 497 size_t sz; 498 size_t sg_list_sz; 499 struct ib_qp *ret = ERR_PTR(-ENOMEM); 500 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 501 void *priv = NULL; 502 gfp_t gfp; 503 504 if (!rdi) 505 return ERR_PTR(-EINVAL); 506 507 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || 508 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 509 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 510 return ERR_PTR(-EINVAL); 511 512 /* GFP_NOIO is applicable to RC QP's only */ 513 514 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && 515 init_attr->qp_type != IB_QPT_RC) 516 return ERR_PTR(-EINVAL); 517 518 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? 519 GFP_NOIO : GFP_KERNEL; 520 521 /* Check receive queue parameters if no SRQ is specified. */ 522 if (!init_attr->srq) { 523 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || 524 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) 525 return ERR_PTR(-EINVAL); 526 527 if (init_attr->cap.max_send_sge + 528 init_attr->cap.max_send_wr + 529 init_attr->cap.max_recv_sge + 530 init_attr->cap.max_recv_wr == 0) 531 return ERR_PTR(-EINVAL); 532 } 533 534 switch (init_attr->qp_type) { 535 case IB_QPT_SMI: 536 case IB_QPT_GSI: 537 if (init_attr->port_num == 0 || 538 init_attr->port_num > ibpd->device->phys_port_cnt) 539 return ERR_PTR(-EINVAL); 540 case IB_QPT_UC: 541 case IB_QPT_RC: 542 case IB_QPT_UD: 543 sz = sizeof(struct rvt_sge) * 544 init_attr->cap.max_send_sge + 545 sizeof(struct rvt_swqe); 546 if (gfp == GFP_NOIO) 547 swq = __vmalloc( 548 (init_attr->cap.max_send_wr + 1) * sz, 549 gfp, PAGE_KERNEL); 550 else 551 swq = vmalloc_node( 552 (init_attr->cap.max_send_wr + 1) * sz, 553 rdi->dparms.node); 554 if (!swq) 555 return ERR_PTR(-ENOMEM); 556 557 sz = sizeof(*qp); 558 sg_list_sz = 0; 559 if (init_attr->srq) { 560 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); 561 562 if (srq->rq.max_sge > 1) 563 sg_list_sz = sizeof(*qp->r_sg_list) * 564 (srq->rq.max_sge - 1); 565 } else if (init_attr->cap.max_recv_sge > 1) 566 sg_list_sz = sizeof(*qp->r_sg_list) * 567 (init_attr->cap.max_recv_sge - 1); 568 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); 569 if (!qp) 570 goto bail_swq; 571 572 RCU_INIT_POINTER(qp->next, NULL); 573 574 /* 575 * Driver needs to set up it's private QP structure and do any 576 * initialization that is needed. 577 */ 578 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 579 if (!priv) 580 goto bail_qp; 581 qp->priv = priv; 582 qp->timeout_jiffies = 583 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 584 1000UL); 585 if (init_attr->srq) { 586 sz = 0; 587 } else { 588 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 589 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 590 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 591 sizeof(struct rvt_rwqe); 592 if (udata) 593 qp->r_rq.wq = vmalloc_user( 594 sizeof(struct rvt_rwq) + 595 qp->r_rq.size * sz); 596 else if (gfp == GFP_NOIO) 597 qp->r_rq.wq = __vmalloc( 598 sizeof(struct rvt_rwq) + 599 qp->r_rq.size * sz, 600 gfp, PAGE_KERNEL); 601 else 602 qp->r_rq.wq = vmalloc_node( 603 sizeof(struct rvt_rwq) + 604 qp->r_rq.size * sz, 605 rdi->dparms.node); 606 if (!qp->r_rq.wq) 607 goto bail_driver_priv; 608 } 609 610 /* 611 * ib_create_qp() will initialize qp->ibqp 612 * except for qp->ibqp.qp_num. 613 */ 614 spin_lock_init(&qp->r_lock); 615 spin_lock_init(&qp->s_lock); 616 spin_lock_init(&qp->r_rq.lock); 617 atomic_set(&qp->refcount, 0); 618 init_waitqueue_head(&qp->wait); 619 init_timer(&qp->s_timer); 620 qp->s_timer.data = (unsigned long)qp; 621 INIT_LIST_HEAD(&qp->rspwait); 622 qp->state = IB_QPS_RESET; 623 qp->s_wq = swq; 624 qp->s_size = init_attr->cap.max_send_wr + 1; 625 qp->s_max_sge = init_attr->cap.max_send_sge; 626 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 627 qp->s_flags = RVT_S_SIGNAL_REQ_WR; 628 629 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 630 init_attr->qp_type, 631 init_attr->port_num, gfp); 632 if (err < 0) { 633 ret = ERR_PTR(err); 634 goto bail_rq_wq; 635 } 636 qp->ibqp.qp_num = err; 637 qp->port_num = init_attr->port_num; 638 rvt_reset_qp(rdi, qp, init_attr->qp_type); 639 break; 640 641 default: 642 /* Don't support raw QPs */ 643 return ERR_PTR(-EINVAL); 644 } 645 646 init_attr->cap.max_inline_data = 0; 647 648 /* 649 * Return the address of the RWQ as the offset to mmap. 650 * See rvt_mmap() for details. 651 */ 652 if (udata && udata->outlen >= sizeof(__u64)) { 653 if (!qp->r_rq.wq) { 654 __u64 offset = 0; 655 656 err = ib_copy_to_udata(udata, &offset, 657 sizeof(offset)); 658 if (err) { 659 ret = ERR_PTR(err); 660 goto bail_qpn; 661 } 662 } else { 663 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 664 665 qp->ip = rvt_create_mmap_info(rdi, s, 666 ibpd->uobject->context, 667 qp->r_rq.wq); 668 if (!qp->ip) { 669 ret = ERR_PTR(-ENOMEM); 670 goto bail_qpn; 671 } 672 673 err = ib_copy_to_udata(udata, &qp->ip->offset, 674 sizeof(qp->ip->offset)); 675 if (err) { 676 ret = ERR_PTR(err); 677 goto bail_ip; 678 } 679 } 680 } 681 682 spin_lock(&rdi->n_qps_lock); 683 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 684 spin_unlock(&rdi->n_qps_lock); 685 ret = ERR_PTR(-ENOMEM); 686 goto bail_ip; 687 } 688 689 rdi->n_qps_allocated++; 690 spin_unlock(&rdi->n_qps_lock); 691 692 if (qp->ip) { 693 spin_lock_irq(&rdi->pending_lock); 694 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); 695 spin_unlock_irq(&rdi->pending_lock); 696 } 697 698 ret = &qp->ibqp; 699 700 /* 701 * We have our QP and its good, now keep track of what types of opcodes 702 * can be processed on this QP. We do this by keeping track of what the 703 * 3 high order bits of the opcode are. 704 */ 705 switch (init_attr->qp_type) { 706 case IB_QPT_SMI: 707 case IB_QPT_GSI: 708 case IB_QPT_UD: 709 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK; 710 break; 711 case IB_QPT_RC: 712 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK; 713 break; 714 case IB_QPT_UC: 715 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK; 716 break; 717 default: 718 ret = ERR_PTR(-EINVAL); 719 goto bail_ip; 720 } 721 722 return ret; 723 724 bail_ip: 725 kref_put(&qp->ip->ref, rvt_release_mmap_info); 726 727 bail_qpn: 728 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 729 730 bail_rq_wq: 731 vfree(qp->r_rq.wq); 732 733 bail_driver_priv: 734 rdi->driver_f.qp_priv_free(rdi, qp); 735 736 bail_qp: 737 kfree(qp); 738 739 bail_swq: 740 vfree(swq); 741 742 return ret; 743 } 744 745 void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) 746 { 747 unsigned n; 748 749 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 750 rvt_put_ss(&qp->s_rdma_read_sge); 751 752 rvt_put_ss(&qp->r_sge); 753 754 if (clr_sends) { 755 while (qp->s_last != qp->s_head) { 756 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); 757 unsigned i; 758 759 for (i = 0; i < wqe->wr.num_sge; i++) { 760 struct rvt_sge *sge = &wqe->sg_list[i]; 761 762 rvt_put_mr(sge->mr); 763 } 764 if (qp->ibqp.qp_type == IB_QPT_UD || 765 qp->ibqp.qp_type == IB_QPT_SMI || 766 qp->ibqp.qp_type == IB_QPT_GSI) 767 atomic_dec(&ibah_to_rvtah( 768 wqe->ud_wr.ah)->refcount); 769 if (++qp->s_last >= qp->s_size) 770 qp->s_last = 0; 771 } 772 if (qp->s_rdma_mr) { 773 rvt_put_mr(qp->s_rdma_mr); 774 qp->s_rdma_mr = NULL; 775 } 776 } 777 778 if (qp->ibqp.qp_type != IB_QPT_RC) 779 return; 780 781 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 782 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 783 784 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 785 e->rdma_sge.mr) { 786 rvt_put_mr(e->rdma_sge.mr); 787 e->rdma_sge.mr = NULL; 788 } 789 } 790 } 791 EXPORT_SYMBOL(rvt_clear_mr_refs); 792 793 /** 794 * rvt_error_qp - put a QP into the error state 795 * @qp: the QP to put into the error state 796 * @err: the receive completion error to signal if a RWQE is active 797 * 798 * Flushes both send and receive work queues. 799 * Returns true if last WQE event should be generated. 800 * The QP r_lock and s_lock should be held and interrupts disabled. 801 * If we are already in error state, just return. 802 */ 803 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 804 { 805 struct ib_wc wc; 806 int ret = 0; 807 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 808 809 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 810 goto bail; 811 812 qp->state = IB_QPS_ERR; 813 814 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 815 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 816 del_timer(&qp->s_timer); 817 } 818 819 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) 820 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; 821 822 rdi->driver_f.notify_error_qp(qp); 823 824 /* Schedule the sending tasklet to drain the send work queue. */ 825 if (qp->s_last != qp->s_head) 826 rdi->driver_f.schedule_send(qp); 827 828 rvt_clear_mr_refs(qp, 0); 829 830 memset(&wc, 0, sizeof(wc)); 831 wc.qp = &qp->ibqp; 832 wc.opcode = IB_WC_RECV; 833 834 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 835 wc.wr_id = qp->r_wr_id; 836 wc.status = err; 837 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 838 } 839 wc.status = IB_WC_WR_FLUSH_ERR; 840 841 if (qp->r_rq.wq) { 842 struct rvt_rwq *wq; 843 u32 head; 844 u32 tail; 845 846 spin_lock(&qp->r_rq.lock); 847 848 /* sanity check pointers before trusting them */ 849 wq = qp->r_rq.wq; 850 head = wq->head; 851 if (head >= qp->r_rq.size) 852 head = 0; 853 tail = wq->tail; 854 if (tail >= qp->r_rq.size) 855 tail = 0; 856 while (tail != head) { 857 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 858 if (++tail >= qp->r_rq.size) 859 tail = 0; 860 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 861 } 862 wq->tail = tail; 863 864 spin_unlock(&qp->r_rq.lock); 865 } else if (qp->ibqp.event_handler) { 866 ret = 1; 867 } 868 869 bail: 870 return ret; 871 } 872 EXPORT_SYMBOL(rvt_error_qp); 873 874 /* 875 * Put the QP into the hash table. 876 * The hash table holds a reference to the QP. 877 */ 878 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 879 { 880 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 881 unsigned long flags; 882 883 atomic_inc(&qp->refcount); 884 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 885 886 if (qp->ibqp.qp_num <= 1) { 887 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); 888 } else { 889 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 890 891 qp->next = rdi->qp_dev->qp_table[n]; 892 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); 893 trace_rvt_qpinsert(qp, n); 894 } 895 896 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 897 } 898 899 /* 900 * Remove the QP from the table so it can't be found asynchronously by 901 * the receive routine. 902 */ 903 void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 904 { 905 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 906 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 907 unsigned long flags; 908 int removed = 1; 909 910 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 911 912 if (rcu_dereference_protected(rvp->qp[0], 913 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 914 RCU_INIT_POINTER(rvp->qp[0], NULL); 915 } else if (rcu_dereference_protected(rvp->qp[1], 916 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 917 RCU_INIT_POINTER(rvp->qp[1], NULL); 918 } else { 919 struct rvt_qp *q; 920 struct rvt_qp __rcu **qpp; 921 922 removed = 0; 923 qpp = &rdi->qp_dev->qp_table[n]; 924 for (; (q = rcu_dereference_protected(*qpp, 925 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; 926 qpp = &q->next) { 927 if (q == qp) { 928 RCU_INIT_POINTER(*qpp, 929 rcu_dereference_protected(qp->next, 930 lockdep_is_held(&rdi->qp_dev->qpt_lock))); 931 removed = 1; 932 trace_rvt_qpremove(qp, n); 933 break; 934 } 935 } 936 } 937 938 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 939 if (removed) { 940 synchronize_rcu(); 941 if (atomic_dec_and_test(&qp->refcount)) 942 wake_up(&qp->wait); 943 } 944 } 945 EXPORT_SYMBOL(rvt_remove_qp); 946 947 /** 948 * qib_modify_qp - modify the attributes of a queue pair 949 * @ibqp: the queue pair who's attributes we're modifying 950 * @attr: the new attributes 951 * @attr_mask: the mask of attributes to modify 952 * @udata: user data for libibverbs.so 953 * 954 * Returns 0 on success, otherwise returns an errno. 955 */ 956 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 957 int attr_mask, struct ib_udata *udata) 958 { 959 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 960 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 961 enum ib_qp_state cur_state, new_state; 962 struct ib_event ev; 963 int lastwqe = 0; 964 int mig = 0; 965 int pmtu = 0; /* for gcc warning only */ 966 enum rdma_link_layer link; 967 968 link = rdma_port_get_link_layer(ibqp->device, qp->port_num); 969 970 spin_lock_irq(&qp->r_lock); 971 spin_lock(&qp->s_lock); 972 973 cur_state = attr_mask & IB_QP_CUR_STATE ? 974 attr->cur_qp_state : qp->state; 975 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 976 977 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 978 attr_mask, link)) 979 goto inval; 980 981 if (rdi->driver_f.check_modify_qp && 982 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) 983 goto inval; 984 985 if (attr_mask & IB_QP_AV) { 986 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 987 goto inval; 988 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 989 goto inval; 990 } 991 992 if (attr_mask & IB_QP_ALT_PATH) { 993 if (attr->alt_ah_attr.dlid >= 994 be16_to_cpu(IB_MULTICAST_LID_BASE)) 995 goto inval; 996 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 997 goto inval; 998 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) 999 goto inval; 1000 } 1001 1002 if (attr_mask & IB_QP_PKEY_INDEX) 1003 if (attr->pkey_index >= rvt_get_npkeys(rdi)) 1004 goto inval; 1005 1006 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1007 if (attr->min_rnr_timer > 31) 1008 goto inval; 1009 1010 if (attr_mask & IB_QP_PORT) 1011 if (qp->ibqp.qp_type == IB_QPT_SMI || 1012 qp->ibqp.qp_type == IB_QPT_GSI || 1013 attr->port_num == 0 || 1014 attr->port_num > ibqp->device->phys_port_cnt) 1015 goto inval; 1016 1017 if (attr_mask & IB_QP_DEST_QPN) 1018 if (attr->dest_qp_num > RVT_QPN_MASK) 1019 goto inval; 1020 1021 if (attr_mask & IB_QP_RETRY_CNT) 1022 if (attr->retry_cnt > 7) 1023 goto inval; 1024 1025 if (attr_mask & IB_QP_RNR_RETRY) 1026 if (attr->rnr_retry > 7) 1027 goto inval; 1028 1029 /* 1030 * Don't allow invalid path_mtu values. OK to set greater 1031 * than the active mtu (or even the max_cap, if we have tuned 1032 * that to a small mtu. We'll set qp->path_mtu 1033 * to the lesser of requested attribute mtu and active, 1034 * for packetizing messages. 1035 * Note that the QP port has to be set in INIT and MTU in RTR. 1036 */ 1037 if (attr_mask & IB_QP_PATH_MTU) { 1038 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); 1039 if (pmtu < 0) 1040 goto inval; 1041 } 1042 1043 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1044 if (attr->path_mig_state == IB_MIG_REARM) { 1045 if (qp->s_mig_state == IB_MIG_ARMED) 1046 goto inval; 1047 if (new_state != IB_QPS_RTS) 1048 goto inval; 1049 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 1050 if (qp->s_mig_state == IB_MIG_REARM) 1051 goto inval; 1052 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 1053 goto inval; 1054 if (qp->s_mig_state == IB_MIG_ARMED) 1055 mig = 1; 1056 } else { 1057 goto inval; 1058 } 1059 } 1060 1061 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1062 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) 1063 goto inval; 1064 1065 switch (new_state) { 1066 case IB_QPS_RESET: 1067 if (qp->state != IB_QPS_RESET) 1068 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1069 break; 1070 1071 case IB_QPS_RTR: 1072 /* Allow event to re-trigger if QP set to RTR more than once */ 1073 qp->r_flags &= ~RVT_R_COMM_EST; 1074 qp->state = new_state; 1075 break; 1076 1077 case IB_QPS_SQD: 1078 qp->s_draining = qp->s_last != qp->s_cur; 1079 qp->state = new_state; 1080 break; 1081 1082 case IB_QPS_SQE: 1083 if (qp->ibqp.qp_type == IB_QPT_RC) 1084 goto inval; 1085 qp->state = new_state; 1086 break; 1087 1088 case IB_QPS_ERR: 1089 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1090 break; 1091 1092 default: 1093 qp->state = new_state; 1094 break; 1095 } 1096 1097 if (attr_mask & IB_QP_PKEY_INDEX) 1098 qp->s_pkey_index = attr->pkey_index; 1099 1100 if (attr_mask & IB_QP_PORT) 1101 qp->port_num = attr->port_num; 1102 1103 if (attr_mask & IB_QP_DEST_QPN) 1104 qp->remote_qpn = attr->dest_qp_num; 1105 1106 if (attr_mask & IB_QP_SQ_PSN) { 1107 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; 1108 qp->s_psn = qp->s_next_psn; 1109 qp->s_sending_psn = qp->s_next_psn; 1110 qp->s_last_psn = qp->s_next_psn - 1; 1111 qp->s_sending_hpsn = qp->s_last_psn; 1112 } 1113 1114 if (attr_mask & IB_QP_RQ_PSN) 1115 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; 1116 1117 if (attr_mask & IB_QP_ACCESS_FLAGS) 1118 qp->qp_access_flags = attr->qp_access_flags; 1119 1120 if (attr_mask & IB_QP_AV) { 1121 qp->remote_ah_attr = attr->ah_attr; 1122 qp->s_srate = attr->ah_attr.static_rate; 1123 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); 1124 } 1125 1126 if (attr_mask & IB_QP_ALT_PATH) { 1127 qp->alt_ah_attr = attr->alt_ah_attr; 1128 qp->s_alt_pkey_index = attr->alt_pkey_index; 1129 } 1130 1131 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1132 qp->s_mig_state = attr->path_mig_state; 1133 if (mig) { 1134 qp->remote_ah_attr = qp->alt_ah_attr; 1135 qp->port_num = qp->alt_ah_attr.port_num; 1136 qp->s_pkey_index = qp->s_alt_pkey_index; 1137 1138 /* 1139 * Ignored by drivers which do not support it. Not 1140 * really worth creating a call back into the driver 1141 * just to set a flag. 1142 */ 1143 qp->s_flags |= RVT_S_AHG_CLEAR; 1144 } 1145 } 1146 1147 if (attr_mask & IB_QP_PATH_MTU) { 1148 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); 1149 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); 1150 } 1151 1152 if (attr_mask & IB_QP_RETRY_CNT) { 1153 qp->s_retry_cnt = attr->retry_cnt; 1154 qp->s_retry = attr->retry_cnt; 1155 } 1156 1157 if (attr_mask & IB_QP_RNR_RETRY) { 1158 qp->s_rnr_retry_cnt = attr->rnr_retry; 1159 qp->s_rnr_retry = attr->rnr_retry; 1160 } 1161 1162 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1163 qp->r_min_rnr_timer = attr->min_rnr_timer; 1164 1165 if (attr_mask & IB_QP_TIMEOUT) { 1166 qp->timeout = attr->timeout; 1167 qp->timeout_jiffies = 1168 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1169 1000UL); 1170 } 1171 1172 if (attr_mask & IB_QP_QKEY) 1173 qp->qkey = attr->qkey; 1174 1175 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1176 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 1177 1178 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 1179 qp->s_max_rd_atomic = attr->max_rd_atomic; 1180 1181 if (rdi->driver_f.modify_qp) 1182 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); 1183 1184 spin_unlock(&qp->s_lock); 1185 spin_unlock_irq(&qp->r_lock); 1186 1187 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1188 rvt_insert_qp(rdi, qp); 1189 1190 if (lastwqe) { 1191 ev.device = qp->ibqp.device; 1192 ev.element.qp = &qp->ibqp; 1193 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 1194 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1195 } 1196 if (mig) { 1197 ev.device = qp->ibqp.device; 1198 ev.element.qp = &qp->ibqp; 1199 ev.event = IB_EVENT_PATH_MIG; 1200 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1201 } 1202 return 0; 1203 1204 inval: 1205 spin_unlock(&qp->s_lock); 1206 spin_unlock_irq(&qp->r_lock); 1207 return -EINVAL; 1208 } 1209 1210 /** 1211 * rvt_destroy_qp - destroy a queue pair 1212 * @ibqp: the queue pair to destroy 1213 * 1214 * Returns 0 on success. 1215 * 1216 * Note that this can be called while the QP is actively sending or 1217 * receiving! 1218 */ 1219 int rvt_destroy_qp(struct ib_qp *ibqp) 1220 { 1221 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1222 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1223 1224 spin_lock_irq(&qp->r_lock); 1225 spin_lock(&qp->s_lock); 1226 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1227 spin_unlock(&qp->s_lock); 1228 spin_unlock_irq(&qp->r_lock); 1229 1230 /* qpn is now available for use again */ 1231 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1232 1233 spin_lock(&rdi->n_qps_lock); 1234 rdi->n_qps_allocated--; 1235 spin_unlock(&rdi->n_qps_lock); 1236 1237 if (qp->ip) 1238 kref_put(&qp->ip->ref, rvt_release_mmap_info); 1239 else 1240 vfree(qp->r_rq.wq); 1241 vfree(qp->s_wq); 1242 rdi->driver_f.qp_priv_free(rdi, qp); 1243 kfree(qp); 1244 return 0; 1245 } 1246 1247 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1248 int attr_mask, struct ib_qp_init_attr *init_attr) 1249 { 1250 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1251 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1252 1253 attr->qp_state = qp->state; 1254 attr->cur_qp_state = attr->qp_state; 1255 attr->path_mtu = qp->path_mtu; 1256 attr->path_mig_state = qp->s_mig_state; 1257 attr->qkey = qp->qkey; 1258 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; 1259 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; 1260 attr->dest_qp_num = qp->remote_qpn; 1261 attr->qp_access_flags = qp->qp_access_flags; 1262 attr->cap.max_send_wr = qp->s_size - 1; 1263 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 1264 attr->cap.max_send_sge = qp->s_max_sge; 1265 attr->cap.max_recv_sge = qp->r_rq.max_sge; 1266 attr->cap.max_inline_data = 0; 1267 attr->ah_attr = qp->remote_ah_attr; 1268 attr->alt_ah_attr = qp->alt_ah_attr; 1269 attr->pkey_index = qp->s_pkey_index; 1270 attr->alt_pkey_index = qp->s_alt_pkey_index; 1271 attr->en_sqd_async_notify = 0; 1272 attr->sq_draining = qp->s_draining; 1273 attr->max_rd_atomic = qp->s_max_rd_atomic; 1274 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 1275 attr->min_rnr_timer = qp->r_min_rnr_timer; 1276 attr->port_num = qp->port_num; 1277 attr->timeout = qp->timeout; 1278 attr->retry_cnt = qp->s_retry_cnt; 1279 attr->rnr_retry = qp->s_rnr_retry_cnt; 1280 attr->alt_port_num = qp->alt_ah_attr.port_num; 1281 attr->alt_timeout = qp->alt_timeout; 1282 1283 init_attr->event_handler = qp->ibqp.event_handler; 1284 init_attr->qp_context = qp->ibqp.qp_context; 1285 init_attr->send_cq = qp->ibqp.send_cq; 1286 init_attr->recv_cq = qp->ibqp.recv_cq; 1287 init_attr->srq = qp->ibqp.srq; 1288 init_attr->cap = attr->cap; 1289 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) 1290 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 1291 else 1292 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 1293 init_attr->qp_type = qp->ibqp.qp_type; 1294 init_attr->port_num = qp->port_num; 1295 return 0; 1296 } 1297 1298 /** 1299 * rvt_post_receive - post a receive on a QP 1300 * @ibqp: the QP to post the receive on 1301 * @wr: the WR to post 1302 * @bad_wr: the first bad WR is put here 1303 * 1304 * This may be called from interrupt context. 1305 */ 1306 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1307 struct ib_recv_wr **bad_wr) 1308 { 1309 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1310 struct rvt_rwq *wq = qp->r_rq.wq; 1311 unsigned long flags; 1312 1313 /* Check that state is OK to post receive. */ 1314 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { 1315 *bad_wr = wr; 1316 return -EINVAL; 1317 } 1318 1319 for (; wr; wr = wr->next) { 1320 struct rvt_rwqe *wqe; 1321 u32 next; 1322 int i; 1323 1324 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { 1325 *bad_wr = wr; 1326 return -EINVAL; 1327 } 1328 1329 spin_lock_irqsave(&qp->r_rq.lock, flags); 1330 next = wq->head + 1; 1331 if (next >= qp->r_rq.size) 1332 next = 0; 1333 if (next == wq->tail) { 1334 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1335 *bad_wr = wr; 1336 return -ENOMEM; 1337 } 1338 1339 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); 1340 wqe->wr_id = wr->wr_id; 1341 wqe->num_sge = wr->num_sge; 1342 for (i = 0; i < wr->num_sge; i++) 1343 wqe->sg_list[i] = wr->sg_list[i]; 1344 /* Make sure queue entry is written before the head index. */ 1345 smp_wmb(); 1346 wq->head = next; 1347 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1348 } 1349 return 0; 1350 } 1351 1352 /** 1353 * rvt_post_one_wr - post one RC, UC, or UD send work request 1354 * @qp: the QP to post on 1355 * @wr: the work request to send 1356 */ 1357 static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) 1358 { 1359 struct rvt_swqe *wqe; 1360 u32 next; 1361 int i; 1362 int j; 1363 int acc; 1364 struct rvt_lkey_table *rkt; 1365 struct rvt_pd *pd; 1366 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 1367 1368 /* IB spec says that num_sge == 0 is OK. */ 1369 if (unlikely(wr->num_sge > qp->s_max_sge)) 1370 return -EINVAL; 1371 1372 /* 1373 * Don't allow RDMA reads or atomic operations on UC or 1374 * undefined operations. 1375 * Make sure buffer is large enough to hold the result for atomics. 1376 */ 1377 if (qp->ibqp.qp_type == IB_QPT_UC) { 1378 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ) 1379 return -EINVAL; 1380 } else if (qp->ibqp.qp_type != IB_QPT_RC) { 1381 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 1382 if (wr->opcode != IB_WR_SEND && 1383 wr->opcode != IB_WR_SEND_WITH_IMM) 1384 return -EINVAL; 1385 /* Check UD destination address PD */ 1386 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) 1387 return -EINVAL; 1388 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { 1389 return -EINVAL; 1390 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 1391 (wr->num_sge == 0 || 1392 wr->sg_list[0].length < sizeof(u64) || 1393 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 1394 return -EINVAL; 1395 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { 1396 return -EINVAL; 1397 } 1398 1399 next = qp->s_head + 1; 1400 if (next >= qp->s_size) 1401 next = 0; 1402 if (next == qp->s_last) 1403 return -ENOMEM; 1404 1405 if (rdi->driver_f.check_send_wr && 1406 rdi->driver_f.check_send_wr(qp, wr)) 1407 return -EINVAL; 1408 1409 rkt = &rdi->lkey_table; 1410 pd = ibpd_to_rvtpd(qp->ibqp.pd); 1411 wqe = rvt_get_swqe_ptr(qp, qp->s_head); 1412 1413 if (qp->ibqp.qp_type != IB_QPT_UC && 1414 qp->ibqp.qp_type != IB_QPT_RC) 1415 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); 1416 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 1417 wr->opcode == IB_WR_RDMA_WRITE || 1418 wr->opcode == IB_WR_RDMA_READ) 1419 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); 1420 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1421 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1422 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); 1423 else 1424 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); 1425 1426 wqe->length = 0; 1427 j = 0; 1428 if (wr->num_sge) { 1429 acc = wr->opcode >= IB_WR_RDMA_READ ? 1430 IB_ACCESS_LOCAL_WRITE : 0; 1431 for (i = 0; i < wr->num_sge; i++) { 1432 u32 length = wr->sg_list[i].length; 1433 int ok; 1434 1435 if (length == 0) 1436 continue; 1437 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], 1438 &wr->sg_list[i], acc); 1439 if (!ok) 1440 goto bail_inval_free; 1441 wqe->length += length; 1442 j++; 1443 } 1444 wqe->wr.num_sge = j; 1445 } 1446 if (qp->ibqp.qp_type == IB_QPT_UC || 1447 qp->ibqp.qp_type == IB_QPT_RC) { 1448 if (wqe->length > 0x80000000U) 1449 goto bail_inval_free; 1450 } else { 1451 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 1452 } 1453 wqe->ssn = qp->s_ssn++; 1454 qp->s_head = next; 1455 1456 return 0; 1457 1458 bail_inval_free: 1459 /* release mr holds */ 1460 while (j) { 1461 struct rvt_sge *sge = &wqe->sg_list[--j]; 1462 1463 rvt_put_mr(sge->mr); 1464 } 1465 return -EINVAL; 1466 } 1467 1468 /** 1469 * rvt_post_send - post a send on a QP 1470 * @ibqp: the QP to post the send on 1471 * @wr: the list of work requests to post 1472 * @bad_wr: the first bad WR is put here 1473 * 1474 * This may be called from interrupt context. 1475 */ 1476 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1477 struct ib_send_wr **bad_wr) 1478 { 1479 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1480 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1481 unsigned long flags = 0; 1482 int call_send; 1483 unsigned nreq = 0; 1484 int err = 0; 1485 1486 spin_lock_irqsave(&qp->s_lock, flags); 1487 1488 /* 1489 * Ensure QP state is such that we can send. If not bail out early, 1490 * there is no need to do this every time we post a send. 1491 */ 1492 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { 1493 spin_unlock_irqrestore(&qp->s_lock, flags); 1494 return -EINVAL; 1495 } 1496 1497 /* 1498 * If the send queue is empty, and we only have a single WR then just go 1499 * ahead and kick the send engine into gear. Otherwise we will always 1500 * just schedule the send to happen later. 1501 */ 1502 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; 1503 1504 for (; wr; wr = wr->next) { 1505 err = rvt_post_one_wr(qp, wr); 1506 if (unlikely(err)) { 1507 *bad_wr = wr; 1508 goto bail; 1509 } 1510 nreq++; 1511 } 1512 bail: 1513 if (nreq && !call_send) 1514 rdi->driver_f.schedule_send(qp); 1515 spin_unlock_irqrestore(&qp->s_lock, flags); 1516 if (nreq && call_send) 1517 rdi->driver_f.do_send(qp); 1518 return err; 1519 } 1520 1521 /** 1522 * rvt_post_srq_receive - post a receive on a shared receive queue 1523 * @ibsrq: the SRQ to post the receive on 1524 * @wr: the list of work requests to post 1525 * @bad_wr: A pointer to the first WR to cause a problem is put here 1526 * 1527 * This may be called from interrupt context. 1528 */ 1529 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 1530 struct ib_recv_wr **bad_wr) 1531 { 1532 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 1533 struct rvt_rwq *wq; 1534 unsigned long flags; 1535 1536 for (; wr; wr = wr->next) { 1537 struct rvt_rwqe *wqe; 1538 u32 next; 1539 int i; 1540 1541 if ((unsigned)wr->num_sge > srq->rq.max_sge) { 1542 *bad_wr = wr; 1543 return -EINVAL; 1544 } 1545 1546 spin_lock_irqsave(&srq->rq.lock, flags); 1547 wq = srq->rq.wq; 1548 next = wq->head + 1; 1549 if (next >= srq->rq.size) 1550 next = 0; 1551 if (next == wq->tail) { 1552 spin_unlock_irqrestore(&srq->rq.lock, flags); 1553 *bad_wr = wr; 1554 return -ENOMEM; 1555 } 1556 1557 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); 1558 wqe->wr_id = wr->wr_id; 1559 wqe->num_sge = wr->num_sge; 1560 for (i = 0; i < wr->num_sge; i++) 1561 wqe->sg_list[i] = wr->sg_list[i]; 1562 /* Make sure queue entry is written before the head index. */ 1563 smp_wmb(); 1564 wq->head = next; 1565 spin_unlock_irqrestore(&srq->rq.lock, flags); 1566 } 1567 return 0; 1568 } 1569 1570 void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 1571 { 1572 struct rvt_qpn_map *map; 1573 1574 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 1575 if (map->page) 1576 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 1577 } 1578 EXPORT_SYMBOL(rvt_free_qpn); 1579 1580 void rvt_dec_qp_cnt(struct rvt_dev_info *rdi) 1581 { 1582 spin_lock(&rdi->n_qps_lock); 1583 rdi->n_qps_allocated--; 1584 spin_unlock(&rdi->n_qps_lock); 1585 } 1586 EXPORT_SYMBOL(rvt_dec_qp_cnt); 1587