1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/hash.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/vmalloc.h> 52 #include <linux/slab.h> 53 #include <rdma/ib_verbs.h> 54 #include "qp.h" 55 #include "vt.h" 56 #include "trace.h" 57 58 /* 59 * Note that it is OK to post send work requests in the SQE and ERR 60 * states; rvt_do_send() will process them and generate error 61 * completions as per IB 1.2 C10-96. 62 */ 63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { 64 [IB_QPS_RESET] = 0, 65 [IB_QPS_INIT] = RVT_POST_RECV_OK, 66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, 67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | 69 RVT_PROCESS_NEXT_SEND_OK, 70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, 72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 73 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | 75 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 76 }; 77 EXPORT_SYMBOL(ib_rvt_state_ops); 78 79 static void get_map_page(struct rvt_qpn_table *qpt, 80 struct rvt_qpn_map *map, 81 gfp_t gfp) 82 { 83 unsigned long page = get_zeroed_page(gfp); 84 85 /* 86 * Free the page if someone raced with us installing it. 87 */ 88 89 spin_lock(&qpt->lock); 90 if (map->page) 91 free_page(page); 92 else 93 map->page = (void *)page; 94 spin_unlock(&qpt->lock); 95 } 96 97 /** 98 * init_qpn_table - initialize the QP number table for a device 99 * @qpt: the QPN table 100 */ 101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) 102 { 103 u32 offset, i; 104 struct rvt_qpn_map *map; 105 int ret = 0; 106 107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) 108 return -EINVAL; 109 110 spin_lock_init(&qpt->lock); 111 112 qpt->last = rdi->dparms.qpn_start; 113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; 114 115 /* 116 * Drivers may want some QPs beyond what we need for verbs let them use 117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps 118 * for those. The reserved range must be *after* the range which verbs 119 * will pick from. 120 */ 121 122 /* Figure out number of bit maps needed before reserved range */ 123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; 124 125 /* This should always be zero */ 126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; 127 128 /* Starting with the first reserved bit map */ 129 map = &qpt->map[qpt->nmaps]; 130 131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", 132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 134 if (!map->page) { 135 get_map_page(qpt, map, GFP_KERNEL); 136 if (!map->page) { 137 ret = -ENOMEM; 138 break; 139 } 140 } 141 set_bit(offset, map->page); 142 offset++; 143 if (offset == RVT_BITS_PER_PAGE) { 144 /* next page */ 145 qpt->nmaps++; 146 map++; 147 offset = 0; 148 } 149 } 150 return ret; 151 } 152 153 /** 154 * free_qpn_table - free the QP number table for a device 155 * @qpt: the QPN table 156 */ 157 static void free_qpn_table(struct rvt_qpn_table *qpt) 158 { 159 int i; 160 161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 162 free_page((unsigned long)qpt->map[i].page); 163 } 164 165 int rvt_driver_qp_init(struct rvt_dev_info *rdi) 166 { 167 int i; 168 int ret = -ENOMEM; 169 170 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) { 171 rvt_pr_info(rdi, "Driver is doing QP init.\n"); 172 return 0; 173 } 174 175 if (!rdi->dparms.qp_table_size) 176 return -EINVAL; 177 178 /* 179 * If driver is not doing any QP allocation then make sure it is 180 * providing the necessary QP functions. 181 */ 182 if (!rdi->driver_f.free_all_qps || 183 !rdi->driver_f.qp_priv_alloc || 184 !rdi->driver_f.qp_priv_free || 185 !rdi->driver_f.notify_qp_reset) 186 return -EINVAL; 187 188 /* allocate parent object */ 189 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, 190 rdi->dparms.node); 191 if (!rdi->qp_dev) 192 return -ENOMEM; 193 194 /* allocate hash table */ 195 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; 196 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); 197 rdi->qp_dev->qp_table = 198 kmalloc_node(rdi->qp_dev->qp_table_size * 199 sizeof(*rdi->qp_dev->qp_table), 200 GFP_KERNEL, rdi->dparms.node); 201 if (!rdi->qp_dev->qp_table) 202 goto no_qp_table; 203 204 for (i = 0; i < rdi->qp_dev->qp_table_size; i++) 205 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); 206 207 spin_lock_init(&rdi->qp_dev->qpt_lock); 208 209 /* initialize qpn map */ 210 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) 211 goto fail_table; 212 213 spin_lock_init(&rdi->n_qps_lock); 214 215 return 0; 216 217 fail_table: 218 kfree(rdi->qp_dev->qp_table); 219 free_qpn_table(&rdi->qp_dev->qpn_table); 220 221 no_qp_table: 222 kfree(rdi->qp_dev); 223 224 return ret; 225 } 226 227 /** 228 * free_all_qps - check for QPs still in use 229 * @qpt: the QP table to empty 230 * 231 * There should not be any QPs still in use. 232 * Free memory for table. 233 */ 234 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) 235 { 236 unsigned long flags; 237 struct rvt_qp *qp; 238 unsigned n, qp_inuse = 0; 239 spinlock_t *ql; /* work around too long line below */ 240 241 if (rdi->driver_f.free_all_qps) 242 qp_inuse = rdi->driver_f.free_all_qps(rdi); 243 244 qp_inuse += rvt_mcast_tree_empty(rdi); 245 246 if (!rdi->qp_dev) 247 return qp_inuse; 248 249 ql = &rdi->qp_dev->qpt_lock; 250 spin_lock_irqsave(ql, flags); 251 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { 252 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], 253 lockdep_is_held(ql)); 254 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); 255 256 for (; qp; qp = rcu_dereference_protected(qp->next, 257 lockdep_is_held(ql))) 258 qp_inuse++; 259 } 260 spin_unlock_irqrestore(ql, flags); 261 synchronize_rcu(); 262 return qp_inuse; 263 } 264 265 void rvt_qp_exit(struct rvt_dev_info *rdi) 266 { 267 u32 qps_inuse = rvt_free_all_qps(rdi); 268 269 if (qps_inuse) 270 rvt_pr_err(rdi, "QP memory leak! %u still in use\n", 271 qps_inuse); 272 if (!rdi->qp_dev) 273 return; 274 275 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) 276 return; /* driver did the qp init so nothing else to do */ 277 278 kfree(rdi->qp_dev->qp_table); 279 free_qpn_table(&rdi->qp_dev->qpn_table); 280 kfree(rdi->qp_dev); 281 } 282 283 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 284 struct rvt_qpn_map *map, unsigned off) 285 { 286 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 287 } 288 289 /** 290 * alloc_qpn - Allocate the next available qpn or zero/one for QP type 291 * IB_QPT_SMI/IB_QPT_GSI 292 *@rdi: rvt device info structure 293 *@qpt: queue pair number table pointer 294 *@port_num: IB port number, 1 based, comes from core 295 * 296 * Return: The queue pair number 297 */ 298 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 299 enum ib_qp_type type, u8 port_num, gfp_t gfp) 300 { 301 u32 i, offset, max_scan, qpn; 302 struct rvt_qpn_map *map; 303 u32 ret; 304 305 if (rdi->driver_f.alloc_qpn) 306 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); 307 308 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 309 unsigned n; 310 311 ret = type == IB_QPT_GSI; 312 n = 1 << (ret + 2 * (port_num - 1)); 313 spin_lock(&qpt->lock); 314 if (qpt->flags & n) 315 ret = -EINVAL; 316 else 317 qpt->flags |= n; 318 spin_unlock(&qpt->lock); 319 goto bail; 320 } 321 322 qpn = qpt->last + qpt->incr; 323 if (qpn >= RVT_QPN_MAX) 324 qpn = qpt->incr | ((qpt->last & 1) ^ 1); 325 /* offset carries bit 0 */ 326 offset = qpn & RVT_BITS_PER_PAGE_MASK; 327 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; 328 max_scan = qpt->nmaps - !offset; 329 for (i = 0;;) { 330 if (unlikely(!map->page)) { 331 get_map_page(qpt, map, gfp); 332 if (unlikely(!map->page)) 333 break; 334 } 335 do { 336 if (!test_and_set_bit(offset, map->page)) { 337 qpt->last = qpn; 338 ret = qpn; 339 goto bail; 340 } 341 offset += qpt->incr; 342 /* 343 * This qpn might be bogus if offset >= BITS_PER_PAGE. 344 * That is OK. It gets re-assigned below 345 */ 346 qpn = mk_qpn(qpt, map, offset); 347 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); 348 /* 349 * In order to keep the number of pages allocated to a 350 * minimum, we scan the all existing pages before increasing 351 * the size of the bitmap table. 352 */ 353 if (++i > max_scan) { 354 if (qpt->nmaps == RVT_QPNMAP_ENTRIES) 355 break; 356 map = &qpt->map[qpt->nmaps++]; 357 /* start at incr with current bit 0 */ 358 offset = qpt->incr | (offset & 1); 359 } else if (map < &qpt->map[qpt->nmaps]) { 360 ++map; 361 /* start at incr with current bit 0 */ 362 offset = qpt->incr | (offset & 1); 363 } else { 364 map = &qpt->map[0]; 365 /* wrap to first map page, invert bit 0 */ 366 offset = qpt->incr | ((offset & 1) ^ 1); 367 } 368 /* there can be no bits at shift and below */ 369 WARN_ON(offset & (rdi->dparms.qos_shift - 1)); 370 qpn = mk_qpn(qpt, map, offset); 371 } 372 373 ret = -ENOMEM; 374 375 bail: 376 return ret; 377 } 378 379 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 380 { 381 struct rvt_qpn_map *map; 382 383 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 384 if (map->page) 385 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 386 } 387 388 /** 389 * reset_qp - initialize the QP state to the reset state 390 * @qp: the QP to reset 391 * @type: the QP type 392 * r and s lock are required to be held by the caller 393 */ 394 void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 395 enum ib_qp_type type) 396 { 397 if (qp->state != IB_QPS_RESET) { 398 qp->state = IB_QPS_RESET; 399 400 /* Let drivers flush their waitlist */ 401 rdi->driver_f.flush_qp_waiters(qp); 402 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 403 spin_unlock(&qp->s_lock); 404 spin_unlock_irq(&qp->r_lock); 405 406 /* Stop the send queue and the retry timer */ 407 rdi->driver_f.stop_send_queue(qp); 408 409 /* Wait for things to stop */ 410 rdi->driver_f.quiesce_qp(qp); 411 412 /* take qp out the hash and wait for it to be unused */ 413 rvt_remove_qp(rdi, qp); 414 wait_event(qp->wait, !atomic_read(&qp->refcount)); 415 416 /* grab the lock b/c it was locked at call time */ 417 spin_lock_irq(&qp->r_lock); 418 spin_lock(&qp->s_lock); 419 420 rvt_clear_mr_refs(qp, 1); 421 } 422 423 /* 424 * Let the driver do any tear down it needs to for a qp 425 * that has been reset 426 */ 427 rdi->driver_f.notify_qp_reset(qp); 428 429 qp->remote_qpn = 0; 430 qp->qkey = 0; 431 qp->qp_access_flags = 0; 432 qp->s_flags &= RVT_S_SIGNAL_REQ_WR; 433 qp->s_hdrwords = 0; 434 qp->s_wqe = NULL; 435 qp->s_draining = 0; 436 qp->s_next_psn = 0; 437 qp->s_last_psn = 0; 438 qp->s_sending_psn = 0; 439 qp->s_sending_hpsn = 0; 440 qp->s_psn = 0; 441 qp->r_psn = 0; 442 qp->r_msn = 0; 443 if (type == IB_QPT_RC) { 444 qp->s_state = IB_OPCODE_RC_SEND_LAST; 445 qp->r_state = IB_OPCODE_RC_SEND_LAST; 446 } else { 447 qp->s_state = IB_OPCODE_UC_SEND_LAST; 448 qp->r_state = IB_OPCODE_UC_SEND_LAST; 449 } 450 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 451 qp->r_nak_state = 0; 452 qp->r_aflags = 0; 453 qp->r_flags = 0; 454 qp->s_head = 0; 455 qp->s_tail = 0; 456 qp->s_cur = 0; 457 qp->s_acked = 0; 458 qp->s_last = 0; 459 qp->s_ssn = 1; 460 qp->s_lsn = 0; 461 qp->s_mig_state = IB_MIG_MIGRATED; 462 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 463 qp->r_head_ack_queue = 0; 464 qp->s_tail_ack_queue = 0; 465 qp->s_num_rd_atomic = 0; 466 if (qp->r_rq.wq) { 467 qp->r_rq.wq->head = 0; 468 qp->r_rq.wq->tail = 0; 469 } 470 qp->r_sge.num_sge = 0; 471 } 472 EXPORT_SYMBOL(rvt_reset_qp); 473 474 /** 475 * rvt_create_qp - create a queue pair for a device 476 * @ibpd: the protection domain who's device we create the queue pair for 477 * @init_attr: the attributes of the queue pair 478 * @udata: user data for libibverbs.so 479 * 480 * Queue pair creation is mostly an rvt issue. However, drivers have their own 481 * unique idea of what queue pair numbers mean. For instance there is a reserved 482 * range for PSM. 483 * 484 * Returns the queue pair on success, otherwise returns an errno. 485 * 486 * Called by the ib_create_qp() core verbs function. 487 */ 488 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 489 struct ib_qp_init_attr *init_attr, 490 struct ib_udata *udata) 491 { 492 struct rvt_qp *qp; 493 int err; 494 struct rvt_swqe *swq = NULL; 495 size_t sz; 496 size_t sg_list_sz; 497 struct ib_qp *ret = ERR_PTR(-ENOMEM); 498 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 499 void *priv = NULL; 500 gfp_t gfp; 501 502 if (!rdi) 503 return ERR_PTR(-EINVAL); 504 505 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || 506 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 507 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 508 return ERR_PTR(-EINVAL); 509 510 /* GFP_NOIO is applicable to RC QP's only */ 511 512 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && 513 init_attr->qp_type != IB_QPT_RC) 514 return ERR_PTR(-EINVAL); 515 516 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? 517 GFP_NOIO : GFP_KERNEL; 518 519 /* Check receive queue parameters if no SRQ is specified. */ 520 if (!init_attr->srq) { 521 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || 522 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) 523 return ERR_PTR(-EINVAL); 524 525 if (init_attr->cap.max_send_sge + 526 init_attr->cap.max_send_wr + 527 init_attr->cap.max_recv_sge + 528 init_attr->cap.max_recv_wr == 0) 529 return ERR_PTR(-EINVAL); 530 } 531 532 switch (init_attr->qp_type) { 533 case IB_QPT_SMI: 534 case IB_QPT_GSI: 535 if (init_attr->port_num == 0 || 536 init_attr->port_num > ibpd->device->phys_port_cnt) 537 return ERR_PTR(-EINVAL); 538 case IB_QPT_UC: 539 case IB_QPT_RC: 540 case IB_QPT_UD: 541 sz = sizeof(struct rvt_sge) * 542 init_attr->cap.max_send_sge + 543 sizeof(struct rvt_swqe); 544 if (gfp == GFP_NOIO) 545 swq = __vmalloc( 546 (init_attr->cap.max_send_wr + 1) * sz, 547 gfp, PAGE_KERNEL); 548 else 549 swq = vmalloc_node( 550 (init_attr->cap.max_send_wr + 1) * sz, 551 rdi->dparms.node); 552 if (!swq) 553 return ERR_PTR(-ENOMEM); 554 555 sz = sizeof(*qp); 556 sg_list_sz = 0; 557 if (init_attr->srq) { 558 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); 559 560 if (srq->rq.max_sge > 1) 561 sg_list_sz = sizeof(*qp->r_sg_list) * 562 (srq->rq.max_sge - 1); 563 } else if (init_attr->cap.max_recv_sge > 1) 564 sg_list_sz = sizeof(*qp->r_sg_list) * 565 (init_attr->cap.max_recv_sge - 1); 566 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); 567 if (!qp) 568 goto bail_swq; 569 570 RCU_INIT_POINTER(qp->next, NULL); 571 572 /* 573 * Driver needs to set up it's private QP structure and do any 574 * initialization that is needed. 575 */ 576 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 577 if (!priv) 578 goto bail_qp; 579 qp->priv = priv; 580 qp->timeout_jiffies = 581 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 582 1000UL); 583 if (init_attr->srq) { 584 sz = 0; 585 } else { 586 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 587 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 588 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 589 sizeof(struct rvt_rwqe); 590 if (udata) 591 qp->r_rq.wq = vmalloc_user( 592 sizeof(struct rvt_rwq) + 593 qp->r_rq.size * sz); 594 else if (gfp == GFP_NOIO) 595 qp->r_rq.wq = __vmalloc( 596 sizeof(struct rvt_rwq) + 597 qp->r_rq.size * sz, 598 gfp, PAGE_KERNEL); 599 else 600 qp->r_rq.wq = vmalloc_node( 601 sizeof(struct rvt_rwq) + 602 qp->r_rq.size * sz, 603 rdi->dparms.node); 604 if (!qp->r_rq.wq) 605 goto bail_driver_priv; 606 } 607 608 /* 609 * ib_create_qp() will initialize qp->ibqp 610 * except for qp->ibqp.qp_num. 611 */ 612 spin_lock_init(&qp->r_lock); 613 spin_lock_init(&qp->s_lock); 614 spin_lock_init(&qp->r_rq.lock); 615 atomic_set(&qp->refcount, 0); 616 init_waitqueue_head(&qp->wait); 617 init_timer(&qp->s_timer); 618 qp->s_timer.data = (unsigned long)qp; 619 INIT_LIST_HEAD(&qp->rspwait); 620 qp->state = IB_QPS_RESET; 621 qp->s_wq = swq; 622 qp->s_size = init_attr->cap.max_send_wr + 1; 623 qp->s_max_sge = init_attr->cap.max_send_sge; 624 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 625 qp->s_flags = RVT_S_SIGNAL_REQ_WR; 626 627 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 628 init_attr->qp_type, 629 init_attr->port_num, gfp); 630 if (err < 0) { 631 ret = ERR_PTR(err); 632 goto bail_rq_wq; 633 } 634 qp->ibqp.qp_num = err; 635 qp->port_num = init_attr->port_num; 636 rvt_reset_qp(rdi, qp, init_attr->qp_type); 637 break; 638 639 default: 640 /* Don't support raw QPs */ 641 return ERR_PTR(-EINVAL); 642 } 643 644 init_attr->cap.max_inline_data = 0; 645 646 /* 647 * Return the address of the RWQ as the offset to mmap. 648 * See rvt_mmap() for details. 649 */ 650 if (udata && udata->outlen >= sizeof(__u64)) { 651 if (!qp->r_rq.wq) { 652 __u64 offset = 0; 653 654 err = ib_copy_to_udata(udata, &offset, 655 sizeof(offset)); 656 if (err) { 657 ret = ERR_PTR(err); 658 goto bail_qpn; 659 } 660 } else { 661 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 662 663 qp->ip = rvt_create_mmap_info(rdi, s, 664 ibpd->uobject->context, 665 qp->r_rq.wq); 666 if (!qp->ip) { 667 ret = ERR_PTR(-ENOMEM); 668 goto bail_qpn; 669 } 670 671 err = ib_copy_to_udata(udata, &qp->ip->offset, 672 sizeof(qp->ip->offset)); 673 if (err) { 674 ret = ERR_PTR(err); 675 goto bail_ip; 676 } 677 } 678 } 679 680 spin_lock(&rdi->n_qps_lock); 681 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 682 spin_unlock(&rdi->n_qps_lock); 683 ret = ERR_PTR(-ENOMEM); 684 goto bail_ip; 685 } 686 687 rdi->n_qps_allocated++; 688 spin_unlock(&rdi->n_qps_lock); 689 690 if (qp->ip) { 691 spin_lock_irq(&rdi->pending_lock); 692 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); 693 spin_unlock_irq(&rdi->pending_lock); 694 } 695 696 ret = &qp->ibqp; 697 698 /* 699 * We have our QP and its good, now keep track of what types of opcodes 700 * can be processed on this QP. We do this by keeping track of what the 701 * 3 high order bits of the opcode are. 702 */ 703 switch (init_attr->qp_type) { 704 case IB_QPT_SMI: 705 case IB_QPT_GSI: 706 case IB_QPT_UD: 707 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK; 708 break; 709 case IB_QPT_RC: 710 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK; 711 break; 712 case IB_QPT_UC: 713 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK; 714 break; 715 default: 716 ret = ERR_PTR(-EINVAL); 717 goto bail_ip; 718 } 719 720 return ret; 721 722 bail_ip: 723 kref_put(&qp->ip->ref, rvt_release_mmap_info); 724 725 bail_qpn: 726 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 727 728 bail_rq_wq: 729 vfree(qp->r_rq.wq); 730 731 bail_driver_priv: 732 rdi->driver_f.qp_priv_free(rdi, qp); 733 734 bail_qp: 735 kfree(qp); 736 737 bail_swq: 738 vfree(swq); 739 740 return ret; 741 } 742 743 void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) 744 { 745 unsigned n; 746 747 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 748 rvt_put_ss(&qp->s_rdma_read_sge); 749 750 rvt_put_ss(&qp->r_sge); 751 752 if (clr_sends) { 753 while (qp->s_last != qp->s_head) { 754 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); 755 unsigned i; 756 757 for (i = 0; i < wqe->wr.num_sge; i++) { 758 struct rvt_sge *sge = &wqe->sg_list[i]; 759 760 rvt_put_mr(sge->mr); 761 } 762 if (qp->ibqp.qp_type == IB_QPT_UD || 763 qp->ibqp.qp_type == IB_QPT_SMI || 764 qp->ibqp.qp_type == IB_QPT_GSI) 765 atomic_dec(&ibah_to_rvtah( 766 wqe->ud_wr.ah)->refcount); 767 if (++qp->s_last >= qp->s_size) 768 qp->s_last = 0; 769 } 770 if (qp->s_rdma_mr) { 771 rvt_put_mr(qp->s_rdma_mr); 772 qp->s_rdma_mr = NULL; 773 } 774 } 775 776 if (qp->ibqp.qp_type != IB_QPT_RC) 777 return; 778 779 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 780 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 781 782 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 783 e->rdma_sge.mr) { 784 rvt_put_mr(e->rdma_sge.mr); 785 e->rdma_sge.mr = NULL; 786 } 787 } 788 } 789 EXPORT_SYMBOL(rvt_clear_mr_refs); 790 791 /** 792 * rvt_error_qp - put a QP into the error state 793 * @qp: the QP to put into the error state 794 * @err: the receive completion error to signal if a RWQE is active 795 * 796 * Flushes both send and receive work queues. 797 * Returns true if last WQE event should be generated. 798 * The QP r_lock and s_lock should be held and interrupts disabled. 799 * If we are already in error state, just return. 800 */ 801 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 802 { 803 struct ib_wc wc; 804 int ret = 0; 805 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 806 807 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 808 goto bail; 809 810 qp->state = IB_QPS_ERR; 811 812 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 813 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 814 del_timer(&qp->s_timer); 815 } 816 817 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) 818 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; 819 820 rdi->driver_f.notify_error_qp(qp); 821 822 /* Schedule the sending tasklet to drain the send work queue. */ 823 if (qp->s_last != qp->s_head) 824 rdi->driver_f.schedule_send(qp); 825 826 rvt_clear_mr_refs(qp, 0); 827 828 memset(&wc, 0, sizeof(wc)); 829 wc.qp = &qp->ibqp; 830 wc.opcode = IB_WC_RECV; 831 832 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 833 wc.wr_id = qp->r_wr_id; 834 wc.status = err; 835 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 836 } 837 wc.status = IB_WC_WR_FLUSH_ERR; 838 839 if (qp->r_rq.wq) { 840 struct rvt_rwq *wq; 841 u32 head; 842 u32 tail; 843 844 spin_lock(&qp->r_rq.lock); 845 846 /* sanity check pointers before trusting them */ 847 wq = qp->r_rq.wq; 848 head = wq->head; 849 if (head >= qp->r_rq.size) 850 head = 0; 851 tail = wq->tail; 852 if (tail >= qp->r_rq.size) 853 tail = 0; 854 while (tail != head) { 855 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 856 if (++tail >= qp->r_rq.size) 857 tail = 0; 858 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 859 } 860 wq->tail = tail; 861 862 spin_unlock(&qp->r_rq.lock); 863 } else if (qp->ibqp.event_handler) { 864 ret = 1; 865 } 866 867 bail: 868 return ret; 869 } 870 EXPORT_SYMBOL(rvt_error_qp); 871 872 /* 873 * Put the QP into the hash table. 874 * The hash table holds a reference to the QP. 875 */ 876 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 877 { 878 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 879 unsigned long flags; 880 881 atomic_inc(&qp->refcount); 882 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 883 884 if (qp->ibqp.qp_num <= 1) { 885 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); 886 } else { 887 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 888 889 qp->next = rdi->qp_dev->qp_table[n]; 890 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); 891 trace_rvt_qpinsert(qp, n); 892 } 893 894 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 895 } 896 897 /* 898 * Remove the QP from the table so it can't be found asynchronously by 899 * the receive routine. 900 */ 901 void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 902 { 903 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 904 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 905 unsigned long flags; 906 int removed = 1; 907 908 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 909 910 if (rcu_dereference_protected(rvp->qp[0], 911 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 912 RCU_INIT_POINTER(rvp->qp[0], NULL); 913 } else if (rcu_dereference_protected(rvp->qp[1], 914 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 915 RCU_INIT_POINTER(rvp->qp[1], NULL); 916 } else { 917 struct rvt_qp *q; 918 struct rvt_qp __rcu **qpp; 919 920 removed = 0; 921 qpp = &rdi->qp_dev->qp_table[n]; 922 for (; (q = rcu_dereference_protected(*qpp, 923 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; 924 qpp = &q->next) { 925 if (q == qp) { 926 RCU_INIT_POINTER(*qpp, 927 rcu_dereference_protected(qp->next, 928 lockdep_is_held(&rdi->qp_dev->qpt_lock))); 929 removed = 1; 930 trace_rvt_qpremove(qp, n); 931 break; 932 } 933 } 934 } 935 936 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 937 if (removed) { 938 synchronize_rcu(); 939 if (atomic_dec_and_test(&qp->refcount)) 940 wake_up(&qp->wait); 941 } 942 } 943 EXPORT_SYMBOL(rvt_remove_qp); 944 945 /** 946 * qib_modify_qp - modify the attributes of a queue pair 947 * @ibqp: the queue pair who's attributes we're modifying 948 * @attr: the new attributes 949 * @attr_mask: the mask of attributes to modify 950 * @udata: user data for libibverbs.so 951 * 952 * Returns 0 on success, otherwise returns an errno. 953 */ 954 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 955 int attr_mask, struct ib_udata *udata) 956 { 957 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 958 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 959 enum ib_qp_state cur_state, new_state; 960 struct ib_event ev; 961 int lastwqe = 0; 962 int mig = 0; 963 int pmtu = 0; /* for gcc warning only */ 964 enum rdma_link_layer link; 965 966 link = rdma_port_get_link_layer(ibqp->device, qp->port_num); 967 968 spin_lock_irq(&qp->r_lock); 969 spin_lock(&qp->s_lock); 970 971 cur_state = attr_mask & IB_QP_CUR_STATE ? 972 attr->cur_qp_state : qp->state; 973 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 974 975 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 976 attr_mask, link)) 977 goto inval; 978 979 if (rdi->driver_f.check_modify_qp && 980 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) 981 goto inval; 982 983 if (attr_mask & IB_QP_AV) { 984 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 985 goto inval; 986 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 987 goto inval; 988 } 989 990 if (attr_mask & IB_QP_ALT_PATH) { 991 if (attr->alt_ah_attr.dlid >= 992 be16_to_cpu(IB_MULTICAST_LID_BASE)) 993 goto inval; 994 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 995 goto inval; 996 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) 997 goto inval; 998 } 999 1000 if (attr_mask & IB_QP_PKEY_INDEX) 1001 if (attr->pkey_index >= rvt_get_npkeys(rdi)) 1002 goto inval; 1003 1004 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1005 if (attr->min_rnr_timer > 31) 1006 goto inval; 1007 1008 if (attr_mask & IB_QP_PORT) 1009 if (qp->ibqp.qp_type == IB_QPT_SMI || 1010 qp->ibqp.qp_type == IB_QPT_GSI || 1011 attr->port_num == 0 || 1012 attr->port_num > ibqp->device->phys_port_cnt) 1013 goto inval; 1014 1015 if (attr_mask & IB_QP_DEST_QPN) 1016 if (attr->dest_qp_num > RVT_QPN_MASK) 1017 goto inval; 1018 1019 if (attr_mask & IB_QP_RETRY_CNT) 1020 if (attr->retry_cnt > 7) 1021 goto inval; 1022 1023 if (attr_mask & IB_QP_RNR_RETRY) 1024 if (attr->rnr_retry > 7) 1025 goto inval; 1026 1027 /* 1028 * Don't allow invalid path_mtu values. OK to set greater 1029 * than the active mtu (or even the max_cap, if we have tuned 1030 * that to a small mtu. We'll set qp->path_mtu 1031 * to the lesser of requested attribute mtu and active, 1032 * for packetizing messages. 1033 * Note that the QP port has to be set in INIT and MTU in RTR. 1034 */ 1035 if (attr_mask & IB_QP_PATH_MTU) { 1036 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); 1037 if (pmtu < 0) 1038 goto inval; 1039 } 1040 1041 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1042 if (attr->path_mig_state == IB_MIG_REARM) { 1043 if (qp->s_mig_state == IB_MIG_ARMED) 1044 goto inval; 1045 if (new_state != IB_QPS_RTS) 1046 goto inval; 1047 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 1048 if (qp->s_mig_state == IB_MIG_REARM) 1049 goto inval; 1050 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 1051 goto inval; 1052 if (qp->s_mig_state == IB_MIG_ARMED) 1053 mig = 1; 1054 } else { 1055 goto inval; 1056 } 1057 } 1058 1059 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1060 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) 1061 goto inval; 1062 1063 switch (new_state) { 1064 case IB_QPS_RESET: 1065 if (qp->state != IB_QPS_RESET) 1066 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1067 break; 1068 1069 case IB_QPS_RTR: 1070 /* Allow event to re-trigger if QP set to RTR more than once */ 1071 qp->r_flags &= ~RVT_R_COMM_EST; 1072 qp->state = new_state; 1073 break; 1074 1075 case IB_QPS_SQD: 1076 qp->s_draining = qp->s_last != qp->s_cur; 1077 qp->state = new_state; 1078 break; 1079 1080 case IB_QPS_SQE: 1081 if (qp->ibqp.qp_type == IB_QPT_RC) 1082 goto inval; 1083 qp->state = new_state; 1084 break; 1085 1086 case IB_QPS_ERR: 1087 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1088 break; 1089 1090 default: 1091 qp->state = new_state; 1092 break; 1093 } 1094 1095 if (attr_mask & IB_QP_PKEY_INDEX) 1096 qp->s_pkey_index = attr->pkey_index; 1097 1098 if (attr_mask & IB_QP_PORT) 1099 qp->port_num = attr->port_num; 1100 1101 if (attr_mask & IB_QP_DEST_QPN) 1102 qp->remote_qpn = attr->dest_qp_num; 1103 1104 if (attr_mask & IB_QP_SQ_PSN) { 1105 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; 1106 qp->s_psn = qp->s_next_psn; 1107 qp->s_sending_psn = qp->s_next_psn; 1108 qp->s_last_psn = qp->s_next_psn - 1; 1109 qp->s_sending_hpsn = qp->s_last_psn; 1110 } 1111 1112 if (attr_mask & IB_QP_RQ_PSN) 1113 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; 1114 1115 if (attr_mask & IB_QP_ACCESS_FLAGS) 1116 qp->qp_access_flags = attr->qp_access_flags; 1117 1118 if (attr_mask & IB_QP_AV) { 1119 qp->remote_ah_attr = attr->ah_attr; 1120 qp->s_srate = attr->ah_attr.static_rate; 1121 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); 1122 } 1123 1124 if (attr_mask & IB_QP_ALT_PATH) { 1125 qp->alt_ah_attr = attr->alt_ah_attr; 1126 qp->s_alt_pkey_index = attr->alt_pkey_index; 1127 } 1128 1129 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1130 qp->s_mig_state = attr->path_mig_state; 1131 if (mig) { 1132 qp->remote_ah_attr = qp->alt_ah_attr; 1133 qp->port_num = qp->alt_ah_attr.port_num; 1134 qp->s_pkey_index = qp->s_alt_pkey_index; 1135 } 1136 } 1137 1138 if (attr_mask & IB_QP_PATH_MTU) { 1139 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); 1140 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); 1141 } 1142 1143 if (attr_mask & IB_QP_RETRY_CNT) { 1144 qp->s_retry_cnt = attr->retry_cnt; 1145 qp->s_retry = attr->retry_cnt; 1146 } 1147 1148 if (attr_mask & IB_QP_RNR_RETRY) { 1149 qp->s_rnr_retry_cnt = attr->rnr_retry; 1150 qp->s_rnr_retry = attr->rnr_retry; 1151 } 1152 1153 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1154 qp->r_min_rnr_timer = attr->min_rnr_timer; 1155 1156 if (attr_mask & IB_QP_TIMEOUT) { 1157 qp->timeout = attr->timeout; 1158 qp->timeout_jiffies = 1159 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1160 1000UL); 1161 } 1162 1163 if (attr_mask & IB_QP_QKEY) 1164 qp->qkey = attr->qkey; 1165 1166 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1167 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 1168 1169 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 1170 qp->s_max_rd_atomic = attr->max_rd_atomic; 1171 1172 if (rdi->driver_f.modify_qp) 1173 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); 1174 1175 spin_unlock(&qp->s_lock); 1176 spin_unlock_irq(&qp->r_lock); 1177 1178 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1179 rvt_insert_qp(rdi, qp); 1180 1181 if (lastwqe) { 1182 ev.device = qp->ibqp.device; 1183 ev.element.qp = &qp->ibqp; 1184 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 1185 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1186 } 1187 if (mig) { 1188 ev.device = qp->ibqp.device; 1189 ev.element.qp = &qp->ibqp; 1190 ev.event = IB_EVENT_PATH_MIG; 1191 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1192 } 1193 return 0; 1194 1195 inval: 1196 spin_unlock(&qp->s_lock); 1197 spin_unlock_irq(&qp->r_lock); 1198 return -EINVAL; 1199 } 1200 1201 /** 1202 * rvt_destroy_qp - destroy a queue pair 1203 * @ibqp: the queue pair to destroy 1204 * 1205 * Returns 0 on success. 1206 * 1207 * Note that this can be called while the QP is actively sending or 1208 * receiving! 1209 */ 1210 int rvt_destroy_qp(struct ib_qp *ibqp) 1211 { 1212 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1213 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1214 1215 spin_lock_irq(&qp->r_lock); 1216 spin_lock(&qp->s_lock); 1217 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1218 spin_unlock(&qp->s_lock); 1219 spin_unlock_irq(&qp->r_lock); 1220 1221 /* qpn is now available for use again */ 1222 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1223 1224 spin_lock(&rdi->n_qps_lock); 1225 rdi->n_qps_allocated--; 1226 spin_unlock(&rdi->n_qps_lock); 1227 1228 if (qp->ip) 1229 kref_put(&qp->ip->ref, rvt_release_mmap_info); 1230 else 1231 vfree(qp->r_rq.wq); 1232 vfree(qp->s_wq); 1233 rdi->driver_f.qp_priv_free(rdi, qp); 1234 kfree(qp); 1235 return 0; 1236 } 1237 1238 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1239 int attr_mask, struct ib_qp_init_attr *init_attr) 1240 { 1241 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1242 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1243 1244 attr->qp_state = qp->state; 1245 attr->cur_qp_state = attr->qp_state; 1246 attr->path_mtu = qp->path_mtu; 1247 attr->path_mig_state = qp->s_mig_state; 1248 attr->qkey = qp->qkey; 1249 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; 1250 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; 1251 attr->dest_qp_num = qp->remote_qpn; 1252 attr->qp_access_flags = qp->qp_access_flags; 1253 attr->cap.max_send_wr = qp->s_size - 1; 1254 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 1255 attr->cap.max_send_sge = qp->s_max_sge; 1256 attr->cap.max_recv_sge = qp->r_rq.max_sge; 1257 attr->cap.max_inline_data = 0; 1258 attr->ah_attr = qp->remote_ah_attr; 1259 attr->alt_ah_attr = qp->alt_ah_attr; 1260 attr->pkey_index = qp->s_pkey_index; 1261 attr->alt_pkey_index = qp->s_alt_pkey_index; 1262 attr->en_sqd_async_notify = 0; 1263 attr->sq_draining = qp->s_draining; 1264 attr->max_rd_atomic = qp->s_max_rd_atomic; 1265 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 1266 attr->min_rnr_timer = qp->r_min_rnr_timer; 1267 attr->port_num = qp->port_num; 1268 attr->timeout = qp->timeout; 1269 attr->retry_cnt = qp->s_retry_cnt; 1270 attr->rnr_retry = qp->s_rnr_retry_cnt; 1271 attr->alt_port_num = qp->alt_ah_attr.port_num; 1272 attr->alt_timeout = qp->alt_timeout; 1273 1274 init_attr->event_handler = qp->ibqp.event_handler; 1275 init_attr->qp_context = qp->ibqp.qp_context; 1276 init_attr->send_cq = qp->ibqp.send_cq; 1277 init_attr->recv_cq = qp->ibqp.recv_cq; 1278 init_attr->srq = qp->ibqp.srq; 1279 init_attr->cap = attr->cap; 1280 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) 1281 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 1282 else 1283 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 1284 init_attr->qp_type = qp->ibqp.qp_type; 1285 init_attr->port_num = qp->port_num; 1286 return 0; 1287 } 1288 1289 /** 1290 * rvt_post_receive - post a receive on a QP 1291 * @ibqp: the QP to post the receive on 1292 * @wr: the WR to post 1293 * @bad_wr: the first bad WR is put here 1294 * 1295 * This may be called from interrupt context. 1296 */ 1297 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1298 struct ib_recv_wr **bad_wr) 1299 { 1300 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1301 struct rvt_rwq *wq = qp->r_rq.wq; 1302 unsigned long flags; 1303 1304 /* Check that state is OK to post receive. */ 1305 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { 1306 *bad_wr = wr; 1307 return -EINVAL; 1308 } 1309 1310 for (; wr; wr = wr->next) { 1311 struct rvt_rwqe *wqe; 1312 u32 next; 1313 int i; 1314 1315 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { 1316 *bad_wr = wr; 1317 return -EINVAL; 1318 } 1319 1320 spin_lock_irqsave(&qp->r_rq.lock, flags); 1321 next = wq->head + 1; 1322 if (next >= qp->r_rq.size) 1323 next = 0; 1324 if (next == wq->tail) { 1325 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1326 *bad_wr = wr; 1327 return -ENOMEM; 1328 } 1329 1330 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); 1331 wqe->wr_id = wr->wr_id; 1332 wqe->num_sge = wr->num_sge; 1333 for (i = 0; i < wr->num_sge; i++) 1334 wqe->sg_list[i] = wr->sg_list[i]; 1335 /* Make sure queue entry is written before the head index. */ 1336 smp_wmb(); 1337 wq->head = next; 1338 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1339 } 1340 return 0; 1341 } 1342 1343 /** 1344 * rvt_post_one_wr - post one RC, UC, or UD send work request 1345 * @qp: the QP to post on 1346 * @wr: the work request to send 1347 */ 1348 static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) 1349 { 1350 struct rvt_swqe *wqe; 1351 u32 next; 1352 int i; 1353 int j; 1354 int acc; 1355 struct rvt_lkey_table *rkt; 1356 struct rvt_pd *pd; 1357 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 1358 1359 /* IB spec says that num_sge == 0 is OK. */ 1360 if (unlikely(wr->num_sge > qp->s_max_sge)) 1361 return -EINVAL; 1362 1363 /* 1364 * Don't allow RDMA reads or atomic operations on UC or 1365 * undefined operations. 1366 * Make sure buffer is large enough to hold the result for atomics. 1367 */ 1368 if (qp->ibqp.qp_type == IB_QPT_UC) { 1369 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ) 1370 return -EINVAL; 1371 } else if (qp->ibqp.qp_type != IB_QPT_RC) { 1372 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 1373 if (wr->opcode != IB_WR_SEND && 1374 wr->opcode != IB_WR_SEND_WITH_IMM) 1375 return -EINVAL; 1376 /* Check UD destination address PD */ 1377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) 1378 return -EINVAL; 1379 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { 1380 return -EINVAL; 1381 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 1382 (wr->num_sge == 0 || 1383 wr->sg_list[0].length < sizeof(u64) || 1384 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 1385 return -EINVAL; 1386 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { 1387 return -EINVAL; 1388 } 1389 1390 next = qp->s_head + 1; 1391 if (next >= qp->s_size) 1392 next = 0; 1393 if (next == qp->s_last) 1394 return -ENOMEM; 1395 1396 if (rdi->driver_f.check_send_wr && 1397 rdi->driver_f.check_send_wr(qp, wr)) 1398 return -EINVAL; 1399 1400 rkt = &rdi->lkey_table; 1401 pd = ibpd_to_rvtpd(qp->ibqp.pd); 1402 wqe = rvt_get_swqe_ptr(qp, qp->s_head); 1403 1404 if (qp->ibqp.qp_type != IB_QPT_UC && 1405 qp->ibqp.qp_type != IB_QPT_RC) 1406 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); 1407 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 1408 wr->opcode == IB_WR_RDMA_WRITE || 1409 wr->opcode == IB_WR_RDMA_READ) 1410 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); 1411 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1412 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1413 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); 1414 else 1415 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); 1416 1417 wqe->length = 0; 1418 j = 0; 1419 if (wr->num_sge) { 1420 acc = wr->opcode >= IB_WR_RDMA_READ ? 1421 IB_ACCESS_LOCAL_WRITE : 0; 1422 for (i = 0; i < wr->num_sge; i++) { 1423 u32 length = wr->sg_list[i].length; 1424 int ok; 1425 1426 if (length == 0) 1427 continue; 1428 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], 1429 &wr->sg_list[i], acc); 1430 if (!ok) 1431 goto bail_inval_free; 1432 wqe->length += length; 1433 j++; 1434 } 1435 wqe->wr.num_sge = j; 1436 } 1437 if (qp->ibqp.qp_type == IB_QPT_UC || 1438 qp->ibqp.qp_type == IB_QPT_RC) { 1439 if (wqe->length > 0x80000000U) 1440 goto bail_inval_free; 1441 } else { 1442 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 1443 } 1444 wqe->ssn = qp->s_ssn++; 1445 qp->s_head = next; 1446 1447 return 0; 1448 1449 bail_inval_free: 1450 /* release mr holds */ 1451 while (j) { 1452 struct rvt_sge *sge = &wqe->sg_list[--j]; 1453 1454 rvt_put_mr(sge->mr); 1455 } 1456 return -EINVAL; 1457 } 1458 1459 /** 1460 * rvt_post_send - post a send on a QP 1461 * @ibqp: the QP to post the send on 1462 * @wr: the list of work requests to post 1463 * @bad_wr: the first bad WR is put here 1464 * 1465 * This may be called from interrupt context. 1466 */ 1467 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1468 struct ib_send_wr **bad_wr) 1469 { 1470 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1471 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1472 unsigned long flags = 0; 1473 int call_send; 1474 unsigned nreq = 0; 1475 int err = 0; 1476 1477 spin_lock_irqsave(&qp->s_lock, flags); 1478 1479 /* 1480 * Ensure QP state is such that we can send. If not bail out early, 1481 * there is no need to do this every time we post a send. 1482 */ 1483 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { 1484 spin_unlock_irqrestore(&qp->s_lock, flags); 1485 return -EINVAL; 1486 } 1487 1488 /* 1489 * If the send queue is empty, and we only have a single WR then just go 1490 * ahead and kick the send engine into gear. Otherwise we will always 1491 * just schedule the send to happen later. 1492 */ 1493 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; 1494 1495 for (; wr; wr = wr->next) { 1496 err = rvt_post_one_wr(qp, wr); 1497 if (unlikely(err)) { 1498 *bad_wr = wr; 1499 goto bail; 1500 } 1501 nreq++; 1502 } 1503 bail: 1504 if (nreq && !call_send) 1505 rdi->driver_f.schedule_send(qp); 1506 spin_unlock_irqrestore(&qp->s_lock, flags); 1507 if (nreq && call_send) 1508 rdi->driver_f.do_send(qp); 1509 return err; 1510 } 1511 1512 /** 1513 * rvt_post_srq_receive - post a receive on a shared receive queue 1514 * @ibsrq: the SRQ to post the receive on 1515 * @wr: the list of work requests to post 1516 * @bad_wr: A pointer to the first WR to cause a problem is put here 1517 * 1518 * This may be called from interrupt context. 1519 */ 1520 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 1521 struct ib_recv_wr **bad_wr) 1522 { 1523 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 1524 struct rvt_rwq *wq; 1525 unsigned long flags; 1526 1527 for (; wr; wr = wr->next) { 1528 struct rvt_rwqe *wqe; 1529 u32 next; 1530 int i; 1531 1532 if ((unsigned)wr->num_sge > srq->rq.max_sge) { 1533 *bad_wr = wr; 1534 return -EINVAL; 1535 } 1536 1537 spin_lock_irqsave(&srq->rq.lock, flags); 1538 wq = srq->rq.wq; 1539 next = wq->head + 1; 1540 if (next >= srq->rq.size) 1541 next = 0; 1542 if (next == wq->tail) { 1543 spin_unlock_irqrestore(&srq->rq.lock, flags); 1544 *bad_wr = wr; 1545 return -ENOMEM; 1546 } 1547 1548 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); 1549 wqe->wr_id = wr->wr_id; 1550 wqe->num_sge = wr->num_sge; 1551 for (i = 0; i < wr->num_sge; i++) 1552 wqe->sg_list[i] = wr->sg_list[i]; 1553 /* Make sure queue entry is written before the head index. */ 1554 smp_wmb(); 1555 wq->head = next; 1556 spin_unlock_irqrestore(&srq->rq.lock, flags); 1557 } 1558 return 0; 1559 } 1560 1561 void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 1562 { 1563 struct rvt_qpn_map *map; 1564 1565 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 1566 if (map->page) 1567 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 1568 } 1569 EXPORT_SYMBOL(rvt_free_qpn); 1570 1571 void rvt_dec_qp_cnt(struct rvt_dev_info *rdi) 1572 { 1573 spin_lock(&rdi->n_qps_lock); 1574 rdi->n_qps_allocated--; 1575 spin_unlock(&rdi->n_qps_lock); 1576 } 1577 EXPORT_SYMBOL(rvt_dec_qp_cnt); 1578