1 /* 2 * Copyright(c) 2016 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/hash.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/vmalloc.h> 52 #include <linux/slab.h> 53 #include <rdma/ib_verbs.h> 54 #include <rdma/ib_hdrs.h> 55 #include <rdma/opa_addr.h> 56 #include <rdma/uverbs_ioctl.h> 57 #include "qp.h" 58 #include "vt.h" 59 #include "trace.h" 60 61 static void rvt_rc_timeout(struct timer_list *t); 62 63 /* 64 * Convert the AETH RNR timeout code into the number of microseconds. 65 */ 66 static const u32 ib_rvt_rnr_table[32] = { 67 655360, /* 00: 655.36 */ 68 10, /* 01: .01 */ 69 20, /* 02 .02 */ 70 30, /* 03: .03 */ 71 40, /* 04: .04 */ 72 60, /* 05: .06 */ 73 80, /* 06: .08 */ 74 120, /* 07: .12 */ 75 160, /* 08: .16 */ 76 240, /* 09: .24 */ 77 320, /* 0A: .32 */ 78 480, /* 0B: .48 */ 79 640, /* 0C: .64 */ 80 960, /* 0D: .96 */ 81 1280, /* 0E: 1.28 */ 82 1920, /* 0F: 1.92 */ 83 2560, /* 10: 2.56 */ 84 3840, /* 11: 3.84 */ 85 5120, /* 12: 5.12 */ 86 7680, /* 13: 7.68 */ 87 10240, /* 14: 10.24 */ 88 15360, /* 15: 15.36 */ 89 20480, /* 16: 20.48 */ 90 30720, /* 17: 30.72 */ 91 40960, /* 18: 40.96 */ 92 61440, /* 19: 61.44 */ 93 81920, /* 1A: 81.92 */ 94 122880, /* 1B: 122.88 */ 95 163840, /* 1C: 163.84 */ 96 245760, /* 1D: 245.76 */ 97 327680, /* 1E: 327.68 */ 98 491520 /* 1F: 491.52 */ 99 }; 100 101 /* 102 * Note that it is OK to post send work requests in the SQE and ERR 103 * states; rvt_do_send() will process them and generate error 104 * completions as per IB 1.2 C10-96. 105 */ 106 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { 107 [IB_QPS_RESET] = 0, 108 [IB_QPS_INIT] = RVT_POST_RECV_OK, 109 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, 110 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 111 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | 112 RVT_PROCESS_NEXT_SEND_OK, 113 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 114 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, 115 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | 116 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 117 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | 118 RVT_POST_SEND_OK | RVT_FLUSH_SEND, 119 }; 120 EXPORT_SYMBOL(ib_rvt_state_ops); 121 122 /* platform specific: return the last level cache (llc) size, in KiB */ 123 static int rvt_wss_llc_size(void) 124 { 125 /* assume that the boot CPU value is universal for all CPUs */ 126 return boot_cpu_data.x86_cache_size; 127 } 128 129 /* platform specific: cacheless copy */ 130 static void cacheless_memcpy(void *dst, void *src, size_t n) 131 { 132 /* 133 * Use the only available X64 cacheless copy. Add a __user cast 134 * to quiet sparse. The src agument is already in the kernel so 135 * there are no security issues. The extra fault recovery machinery 136 * is not invoked. 137 */ 138 __copy_user_nocache(dst, (void __user *)src, n, 0); 139 } 140 141 void rvt_wss_exit(struct rvt_dev_info *rdi) 142 { 143 struct rvt_wss *wss = rdi->wss; 144 145 if (!wss) 146 return; 147 148 /* coded to handle partially initialized and repeat callers */ 149 kfree(wss->entries); 150 wss->entries = NULL; 151 kfree(rdi->wss); 152 rdi->wss = NULL; 153 } 154 155 /** 156 * rvt_wss_init - Init wss data structures 157 * 158 * Return: 0 on success 159 */ 160 int rvt_wss_init(struct rvt_dev_info *rdi) 161 { 162 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; 163 unsigned int wss_threshold = rdi->dparms.wss_threshold; 164 unsigned int wss_clean_period = rdi->dparms.wss_clean_period; 165 long llc_size; 166 long llc_bits; 167 long table_size; 168 long table_bits; 169 struct rvt_wss *wss; 170 int node = rdi->dparms.node; 171 172 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) { 173 rdi->wss = NULL; 174 return 0; 175 } 176 177 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node); 178 if (!rdi->wss) 179 return -ENOMEM; 180 wss = rdi->wss; 181 182 /* check for a valid percent range - default to 80 if none or invalid */ 183 if (wss_threshold < 1 || wss_threshold > 100) 184 wss_threshold = 80; 185 186 /* reject a wildly large period */ 187 if (wss_clean_period > 1000000) 188 wss_clean_period = 256; 189 190 /* reject a zero period */ 191 if (wss_clean_period == 0) 192 wss_clean_period = 1; 193 194 /* 195 * Calculate the table size - the next power of 2 larger than the 196 * LLC size. LLC size is in KiB. 197 */ 198 llc_size = rvt_wss_llc_size() * 1024; 199 table_size = roundup_pow_of_two(llc_size); 200 201 /* one bit per page in rounded up table */ 202 llc_bits = llc_size / PAGE_SIZE; 203 table_bits = table_size / PAGE_SIZE; 204 wss->pages_mask = table_bits - 1; 205 wss->num_entries = table_bits / BITS_PER_LONG; 206 207 wss->threshold = (llc_bits * wss_threshold) / 100; 208 if (wss->threshold == 0) 209 wss->threshold = 1; 210 211 wss->clean_period = wss_clean_period; 212 atomic_set(&wss->clean_counter, wss_clean_period); 213 214 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries), 215 GFP_KERNEL, node); 216 if (!wss->entries) { 217 rvt_wss_exit(rdi); 218 return -ENOMEM; 219 } 220 221 return 0; 222 } 223 224 /* 225 * Advance the clean counter. When the clean period has expired, 226 * clean an entry. 227 * 228 * This is implemented in atomics to avoid locking. Because multiple 229 * variables are involved, it can be racy which can lead to slightly 230 * inaccurate information. Since this is only a heuristic, this is 231 * OK. Any innaccuracies will clean themselves out as the counter 232 * advances. That said, it is unlikely the entry clean operation will 233 * race - the next possible racer will not start until the next clean 234 * period. 235 * 236 * The clean counter is implemented as a decrement to zero. When zero 237 * is reached an entry is cleaned. 238 */ 239 static void wss_advance_clean_counter(struct rvt_wss *wss) 240 { 241 int entry; 242 int weight; 243 unsigned long bits; 244 245 /* become the cleaner if we decrement the counter to zero */ 246 if (atomic_dec_and_test(&wss->clean_counter)) { 247 /* 248 * Set, not add, the clean period. This avoids an issue 249 * where the counter could decrement below the clean period. 250 * Doing a set can result in lost decrements, slowing the 251 * clean advance. Since this a heuristic, this possible 252 * slowdown is OK. 253 * 254 * An alternative is to loop, advancing the counter by a 255 * clean period until the result is > 0. However, this could 256 * lead to several threads keeping another in the clean loop. 257 * This could be mitigated by limiting the number of times 258 * we stay in the loop. 259 */ 260 atomic_set(&wss->clean_counter, wss->clean_period); 261 262 /* 263 * Uniquely grab the entry to clean and move to next. 264 * The current entry is always the lower bits of 265 * wss.clean_entry. The table size, wss.num_entries, 266 * is always a power-of-2. 267 */ 268 entry = (atomic_inc_return(&wss->clean_entry) - 1) 269 & (wss->num_entries - 1); 270 271 /* clear the entry and count the bits */ 272 bits = xchg(&wss->entries[entry], 0); 273 weight = hweight64((u64)bits); 274 /* only adjust the contended total count if needed */ 275 if (weight) 276 atomic_sub(weight, &wss->total_count); 277 } 278 } 279 280 /* 281 * Insert the given address into the working set array. 282 */ 283 static void wss_insert(struct rvt_wss *wss, void *address) 284 { 285 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask; 286 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */ 287 u32 nr = page & (BITS_PER_LONG - 1); 288 289 if (!test_and_set_bit(nr, &wss->entries[entry])) 290 atomic_inc(&wss->total_count); 291 292 wss_advance_clean_counter(wss); 293 } 294 295 /* 296 * Is the working set larger than the threshold? 297 */ 298 static inline bool wss_exceeds_threshold(struct rvt_wss *wss) 299 { 300 return atomic_read(&wss->total_count) >= wss->threshold; 301 } 302 303 static void get_map_page(struct rvt_qpn_table *qpt, 304 struct rvt_qpn_map *map) 305 { 306 unsigned long page = get_zeroed_page(GFP_KERNEL); 307 308 /* 309 * Free the page if someone raced with us installing it. 310 */ 311 312 spin_lock(&qpt->lock); 313 if (map->page) 314 free_page(page); 315 else 316 map->page = (void *)page; 317 spin_unlock(&qpt->lock); 318 } 319 320 /** 321 * init_qpn_table - initialize the QP number table for a device 322 * @qpt: the QPN table 323 */ 324 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) 325 { 326 u32 offset, i; 327 struct rvt_qpn_map *map; 328 int ret = 0; 329 330 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) 331 return -EINVAL; 332 333 spin_lock_init(&qpt->lock); 334 335 qpt->last = rdi->dparms.qpn_start; 336 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; 337 338 /* 339 * Drivers may want some QPs beyond what we need for verbs let them use 340 * our qpn table. No need for two. Lets go ahead and mark the bitmaps 341 * for those. The reserved range must be *after* the range which verbs 342 * will pick from. 343 */ 344 345 /* Figure out number of bit maps needed before reserved range */ 346 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; 347 348 /* This should always be zero */ 349 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; 350 351 /* Starting with the first reserved bit map */ 352 map = &qpt->map[qpt->nmaps]; 353 354 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", 355 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 356 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 357 if (!map->page) { 358 get_map_page(qpt, map); 359 if (!map->page) { 360 ret = -ENOMEM; 361 break; 362 } 363 } 364 set_bit(offset, map->page); 365 offset++; 366 if (offset == RVT_BITS_PER_PAGE) { 367 /* next page */ 368 qpt->nmaps++; 369 map++; 370 offset = 0; 371 } 372 } 373 return ret; 374 } 375 376 /** 377 * free_qpn_table - free the QP number table for a device 378 * @qpt: the QPN table 379 */ 380 static void free_qpn_table(struct rvt_qpn_table *qpt) 381 { 382 int i; 383 384 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 385 free_page((unsigned long)qpt->map[i].page); 386 } 387 388 /** 389 * rvt_driver_qp_init - Init driver qp resources 390 * @rdi: rvt dev strucutre 391 * 392 * Return: 0 on success 393 */ 394 int rvt_driver_qp_init(struct rvt_dev_info *rdi) 395 { 396 int i; 397 int ret = -ENOMEM; 398 399 if (!rdi->dparms.qp_table_size) 400 return -EINVAL; 401 402 /* 403 * If driver is not doing any QP allocation then make sure it is 404 * providing the necessary QP functions. 405 */ 406 if (!rdi->driver_f.free_all_qps || 407 !rdi->driver_f.qp_priv_alloc || 408 !rdi->driver_f.qp_priv_free || 409 !rdi->driver_f.notify_qp_reset || 410 !rdi->driver_f.notify_restart_rc) 411 return -EINVAL; 412 413 /* allocate parent object */ 414 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, 415 rdi->dparms.node); 416 if (!rdi->qp_dev) 417 return -ENOMEM; 418 419 /* allocate hash table */ 420 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; 421 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); 422 rdi->qp_dev->qp_table = 423 kmalloc_array_node(rdi->qp_dev->qp_table_size, 424 sizeof(*rdi->qp_dev->qp_table), 425 GFP_KERNEL, rdi->dparms.node); 426 if (!rdi->qp_dev->qp_table) 427 goto no_qp_table; 428 429 for (i = 0; i < rdi->qp_dev->qp_table_size; i++) 430 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); 431 432 spin_lock_init(&rdi->qp_dev->qpt_lock); 433 434 /* initialize qpn map */ 435 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) 436 goto fail_table; 437 438 spin_lock_init(&rdi->n_qps_lock); 439 440 return 0; 441 442 fail_table: 443 kfree(rdi->qp_dev->qp_table); 444 free_qpn_table(&rdi->qp_dev->qpn_table); 445 446 no_qp_table: 447 kfree(rdi->qp_dev); 448 449 return ret; 450 } 451 452 /** 453 * free_all_qps - check for QPs still in use 454 * @rdi: rvt device info structure 455 * 456 * There should not be any QPs still in use. 457 * Free memory for table. 458 */ 459 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) 460 { 461 unsigned long flags; 462 struct rvt_qp *qp; 463 unsigned n, qp_inuse = 0; 464 spinlock_t *ql; /* work around too long line below */ 465 466 if (rdi->driver_f.free_all_qps) 467 qp_inuse = rdi->driver_f.free_all_qps(rdi); 468 469 qp_inuse += rvt_mcast_tree_empty(rdi); 470 471 if (!rdi->qp_dev) 472 return qp_inuse; 473 474 ql = &rdi->qp_dev->qpt_lock; 475 spin_lock_irqsave(ql, flags); 476 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { 477 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], 478 lockdep_is_held(ql)); 479 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); 480 481 for (; qp; qp = rcu_dereference_protected(qp->next, 482 lockdep_is_held(ql))) 483 qp_inuse++; 484 } 485 spin_unlock_irqrestore(ql, flags); 486 synchronize_rcu(); 487 return qp_inuse; 488 } 489 490 /** 491 * rvt_qp_exit - clean up qps on device exit 492 * @rdi: rvt dev structure 493 * 494 * Check for qp leaks and free resources. 495 */ 496 void rvt_qp_exit(struct rvt_dev_info *rdi) 497 { 498 u32 qps_inuse = rvt_free_all_qps(rdi); 499 500 if (qps_inuse) 501 rvt_pr_err(rdi, "QP memory leak! %u still in use\n", 502 qps_inuse); 503 if (!rdi->qp_dev) 504 return; 505 506 kfree(rdi->qp_dev->qp_table); 507 free_qpn_table(&rdi->qp_dev->qpn_table); 508 kfree(rdi->qp_dev); 509 } 510 511 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 512 struct rvt_qpn_map *map, unsigned off) 513 { 514 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 515 } 516 517 /** 518 * alloc_qpn - Allocate the next available qpn or zero/one for QP type 519 * IB_QPT_SMI/IB_QPT_GSI 520 * @rdi: rvt device info structure 521 * @qpt: queue pair number table pointer 522 * @port_num: IB port number, 1 based, comes from core 523 * 524 * Return: The queue pair number 525 */ 526 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 527 enum ib_qp_type type, u8 port_num) 528 { 529 u32 i, offset, max_scan, qpn; 530 struct rvt_qpn_map *map; 531 u32 ret; 532 533 if (rdi->driver_f.alloc_qpn) 534 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num); 535 536 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 537 unsigned n; 538 539 ret = type == IB_QPT_GSI; 540 n = 1 << (ret + 2 * (port_num - 1)); 541 spin_lock(&qpt->lock); 542 if (qpt->flags & n) 543 ret = -EINVAL; 544 else 545 qpt->flags |= n; 546 spin_unlock(&qpt->lock); 547 goto bail; 548 } 549 550 qpn = qpt->last + qpt->incr; 551 if (qpn >= RVT_QPN_MAX) 552 qpn = qpt->incr | ((qpt->last & 1) ^ 1); 553 /* offset carries bit 0 */ 554 offset = qpn & RVT_BITS_PER_PAGE_MASK; 555 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; 556 max_scan = qpt->nmaps - !offset; 557 for (i = 0;;) { 558 if (unlikely(!map->page)) { 559 get_map_page(qpt, map); 560 if (unlikely(!map->page)) 561 break; 562 } 563 do { 564 if (!test_and_set_bit(offset, map->page)) { 565 qpt->last = qpn; 566 ret = qpn; 567 goto bail; 568 } 569 offset += qpt->incr; 570 /* 571 * This qpn might be bogus if offset >= BITS_PER_PAGE. 572 * That is OK. It gets re-assigned below 573 */ 574 qpn = mk_qpn(qpt, map, offset); 575 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); 576 /* 577 * In order to keep the number of pages allocated to a 578 * minimum, we scan the all existing pages before increasing 579 * the size of the bitmap table. 580 */ 581 if (++i > max_scan) { 582 if (qpt->nmaps == RVT_QPNMAP_ENTRIES) 583 break; 584 map = &qpt->map[qpt->nmaps++]; 585 /* start at incr with current bit 0 */ 586 offset = qpt->incr | (offset & 1); 587 } else if (map < &qpt->map[qpt->nmaps]) { 588 ++map; 589 /* start at incr with current bit 0 */ 590 offset = qpt->incr | (offset & 1); 591 } else { 592 map = &qpt->map[0]; 593 /* wrap to first map page, invert bit 0 */ 594 offset = qpt->incr | ((offset & 1) ^ 1); 595 } 596 /* there can be no set bits in low-order QoS bits */ 597 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); 598 qpn = mk_qpn(qpt, map, offset); 599 } 600 601 ret = -ENOMEM; 602 603 bail: 604 return ret; 605 } 606 607 /** 608 * rvt_clear_mr_refs - Drop help mr refs 609 * @qp: rvt qp data structure 610 * @clr_sends: If shoudl clear send side or not 611 */ 612 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) 613 { 614 unsigned n; 615 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 616 617 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 618 rvt_put_ss(&qp->s_rdma_read_sge); 619 620 rvt_put_ss(&qp->r_sge); 621 622 if (clr_sends) { 623 while (qp->s_last != qp->s_head) { 624 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); 625 626 rvt_put_swqe(wqe); 627 628 if (qp->ibqp.qp_type == IB_QPT_UD || 629 qp->ibqp.qp_type == IB_QPT_SMI || 630 qp->ibqp.qp_type == IB_QPT_GSI) 631 atomic_dec(&ibah_to_rvtah( 632 wqe->ud_wr.ah)->refcount); 633 if (++qp->s_last >= qp->s_size) 634 qp->s_last = 0; 635 smp_wmb(); /* see qp_set_savail */ 636 } 637 if (qp->s_rdma_mr) { 638 rvt_put_mr(qp->s_rdma_mr); 639 qp->s_rdma_mr = NULL; 640 } 641 } 642 643 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) { 644 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 645 646 if (e->rdma_sge.mr) { 647 rvt_put_mr(e->rdma_sge.mr); 648 e->rdma_sge.mr = NULL; 649 } 650 } 651 } 652 653 /** 654 * rvt_swqe_has_lkey - return true if lkey is used by swqe 655 * @wqe - the send wqe 656 * @lkey - the lkey 657 * 658 * Test the swqe for using lkey 659 */ 660 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) 661 { 662 int i; 663 664 for (i = 0; i < wqe->wr.num_sge; i++) { 665 struct rvt_sge *sge = &wqe->sg_list[i]; 666 667 if (rvt_mr_has_lkey(sge->mr, lkey)) 668 return true; 669 } 670 return false; 671 } 672 673 /** 674 * rvt_qp_sends_has_lkey - return true is qp sends use lkey 675 * @qp - the rvt_qp 676 * @lkey - the lkey 677 */ 678 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey) 679 { 680 u32 s_last = qp->s_last; 681 682 while (s_last != qp->s_head) { 683 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last); 684 685 if (rvt_swqe_has_lkey(wqe, lkey)) 686 return true; 687 688 if (++s_last >= qp->s_size) 689 s_last = 0; 690 } 691 if (qp->s_rdma_mr) 692 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey)) 693 return true; 694 return false; 695 } 696 697 /** 698 * rvt_qp_acks_has_lkey - return true if acks have lkey 699 * @qp - the qp 700 * @lkey - the lkey 701 */ 702 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey) 703 { 704 int i; 705 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 706 707 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) { 708 struct rvt_ack_entry *e = &qp->s_ack_queue[i]; 709 710 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey)) 711 return true; 712 } 713 return false; 714 } 715 716 /* 717 * rvt_qp_mr_clean - clean up remote ops for lkey 718 * @qp - the qp 719 * @lkey - the lkey that is being de-registered 720 * 721 * This routine checks if the lkey is being used by 722 * the qp. 723 * 724 * If so, the qp is put into an error state to elminate 725 * any references from the qp. 726 */ 727 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey) 728 { 729 bool lastwqe = false; 730 731 if (qp->ibqp.qp_type == IB_QPT_SMI || 732 qp->ibqp.qp_type == IB_QPT_GSI) 733 /* avoid special QPs */ 734 return; 735 spin_lock_irq(&qp->r_lock); 736 spin_lock(&qp->s_hlock); 737 spin_lock(&qp->s_lock); 738 739 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 740 goto check_lwqe; 741 742 if (rvt_ss_has_lkey(&qp->r_sge, lkey) || 743 rvt_qp_sends_has_lkey(qp, lkey) || 744 rvt_qp_acks_has_lkey(qp, lkey)) 745 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR); 746 check_lwqe: 747 spin_unlock(&qp->s_lock); 748 spin_unlock(&qp->s_hlock); 749 spin_unlock_irq(&qp->r_lock); 750 if (lastwqe) { 751 struct ib_event ev; 752 753 ev.device = qp->ibqp.device; 754 ev.element.qp = &qp->ibqp; 755 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 756 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 757 } 758 } 759 760 /** 761 * rvt_remove_qp - remove qp form table 762 * @rdi: rvt dev struct 763 * @qp: qp to remove 764 * 765 * Remove the QP from the table so it can't be found asynchronously by 766 * the receive routine. 767 */ 768 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 769 { 770 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 771 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 772 unsigned long flags; 773 int removed = 1; 774 775 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 776 777 if (rcu_dereference_protected(rvp->qp[0], 778 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 779 RCU_INIT_POINTER(rvp->qp[0], NULL); 780 } else if (rcu_dereference_protected(rvp->qp[1], 781 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { 782 RCU_INIT_POINTER(rvp->qp[1], NULL); 783 } else { 784 struct rvt_qp *q; 785 struct rvt_qp __rcu **qpp; 786 787 removed = 0; 788 qpp = &rdi->qp_dev->qp_table[n]; 789 for (; (q = rcu_dereference_protected(*qpp, 790 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; 791 qpp = &q->next) { 792 if (q == qp) { 793 RCU_INIT_POINTER(*qpp, 794 rcu_dereference_protected(qp->next, 795 lockdep_is_held(&rdi->qp_dev->qpt_lock))); 796 removed = 1; 797 trace_rvt_qpremove(qp, n); 798 break; 799 } 800 } 801 } 802 803 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 804 if (removed) { 805 synchronize_rcu(); 806 rvt_put_qp(qp); 807 } 808 } 809 810 /** 811 * rvt_init_qp - initialize the QP state to the reset state 812 * @qp: the QP to init or reinit 813 * @type: the QP type 814 * 815 * This function is called from both rvt_create_qp() and 816 * rvt_reset_qp(). The difference is that the reset 817 * patch the necessary locks to protect against concurent 818 * access. 819 */ 820 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 821 enum ib_qp_type type) 822 { 823 qp->remote_qpn = 0; 824 qp->qkey = 0; 825 qp->qp_access_flags = 0; 826 qp->s_flags &= RVT_S_SIGNAL_REQ_WR; 827 qp->s_hdrwords = 0; 828 qp->s_wqe = NULL; 829 qp->s_draining = 0; 830 qp->s_next_psn = 0; 831 qp->s_last_psn = 0; 832 qp->s_sending_psn = 0; 833 qp->s_sending_hpsn = 0; 834 qp->s_psn = 0; 835 qp->r_psn = 0; 836 qp->r_msn = 0; 837 if (type == IB_QPT_RC) { 838 qp->s_state = IB_OPCODE_RC_SEND_LAST; 839 qp->r_state = IB_OPCODE_RC_SEND_LAST; 840 } else { 841 qp->s_state = IB_OPCODE_UC_SEND_LAST; 842 qp->r_state = IB_OPCODE_UC_SEND_LAST; 843 } 844 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 845 qp->r_nak_state = 0; 846 qp->r_aflags = 0; 847 qp->r_flags = 0; 848 qp->s_head = 0; 849 qp->s_tail = 0; 850 qp->s_cur = 0; 851 qp->s_acked = 0; 852 qp->s_last = 0; 853 qp->s_ssn = 1; 854 qp->s_lsn = 0; 855 qp->s_mig_state = IB_MIG_MIGRATED; 856 qp->r_head_ack_queue = 0; 857 qp->s_tail_ack_queue = 0; 858 qp->s_acked_ack_queue = 0; 859 qp->s_num_rd_atomic = 0; 860 if (qp->r_rq.wq) { 861 qp->r_rq.wq->head = 0; 862 qp->r_rq.wq->tail = 0; 863 } 864 qp->r_sge.num_sge = 0; 865 atomic_set(&qp->s_reserved_used, 0); 866 } 867 868 /** 869 * rvt_reset_qp - initialize the QP state to the reset state 870 * @qp: the QP to reset 871 * @type: the QP type 872 * 873 * r_lock, s_hlock, and s_lock are required to be held by the caller 874 */ 875 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 876 enum ib_qp_type type) 877 __must_hold(&qp->s_lock) 878 __must_hold(&qp->s_hlock) 879 __must_hold(&qp->r_lock) 880 { 881 lockdep_assert_held(&qp->r_lock); 882 lockdep_assert_held(&qp->s_hlock); 883 lockdep_assert_held(&qp->s_lock); 884 if (qp->state != IB_QPS_RESET) { 885 qp->state = IB_QPS_RESET; 886 887 /* Let drivers flush their waitlist */ 888 rdi->driver_f.flush_qp_waiters(qp); 889 rvt_stop_rc_timers(qp); 890 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 891 spin_unlock(&qp->s_lock); 892 spin_unlock(&qp->s_hlock); 893 spin_unlock_irq(&qp->r_lock); 894 895 /* Stop the send queue and the retry timer */ 896 rdi->driver_f.stop_send_queue(qp); 897 rvt_del_timers_sync(qp); 898 /* Wait for things to stop */ 899 rdi->driver_f.quiesce_qp(qp); 900 901 /* take qp out the hash and wait for it to be unused */ 902 rvt_remove_qp(rdi, qp); 903 904 /* grab the lock b/c it was locked at call time */ 905 spin_lock_irq(&qp->r_lock); 906 spin_lock(&qp->s_hlock); 907 spin_lock(&qp->s_lock); 908 909 rvt_clear_mr_refs(qp, 1); 910 /* 911 * Let the driver do any tear down or re-init it needs to for 912 * a qp that has been reset 913 */ 914 rdi->driver_f.notify_qp_reset(qp); 915 } 916 rvt_init_qp(rdi, qp, type); 917 lockdep_assert_held(&qp->r_lock); 918 lockdep_assert_held(&qp->s_hlock); 919 lockdep_assert_held(&qp->s_lock); 920 } 921 922 /** rvt_free_qpn - Free a qpn from the bit map 923 * @qpt: QP table 924 * @qpn: queue pair number to free 925 */ 926 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 927 { 928 struct rvt_qpn_map *map; 929 930 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE; 931 if (map->page) 932 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 933 } 934 935 /** 936 * rvt_create_qp - create a queue pair for a device 937 * @ibpd: the protection domain who's device we create the queue pair for 938 * @init_attr: the attributes of the queue pair 939 * @udata: user data for libibverbs.so 940 * 941 * Queue pair creation is mostly an rvt issue. However, drivers have their own 942 * unique idea of what queue pair numbers mean. For instance there is a reserved 943 * range for PSM. 944 * 945 * Return: the queue pair on success, otherwise returns an errno. 946 * 947 * Called by the ib_create_qp() core verbs function. 948 */ 949 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 950 struct ib_qp_init_attr *init_attr, 951 struct ib_udata *udata) 952 { 953 struct rvt_qp *qp; 954 int err; 955 struct rvt_swqe *swq = NULL; 956 size_t sz; 957 size_t sg_list_sz; 958 struct ib_qp *ret = ERR_PTR(-ENOMEM); 959 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 960 struct rvt_ucontext *ucontext = rdma_udata_to_drv_context( 961 udata, struct rvt_ucontext, ibucontext); 962 void *priv = NULL; 963 size_t sqsize; 964 965 if (!rdi) 966 return ERR_PTR(-EINVAL); 967 968 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge || 969 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 970 init_attr->create_flags) 971 return ERR_PTR(-EINVAL); 972 973 /* Check receive queue parameters if no SRQ is specified. */ 974 if (!init_attr->srq) { 975 if (init_attr->cap.max_recv_sge > 976 rdi->dparms.props.max_recv_sge || 977 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) 978 return ERR_PTR(-EINVAL); 979 980 if (init_attr->cap.max_send_sge + 981 init_attr->cap.max_send_wr + 982 init_attr->cap.max_recv_sge + 983 init_attr->cap.max_recv_wr == 0) 984 return ERR_PTR(-EINVAL); 985 } 986 sqsize = 987 init_attr->cap.max_send_wr + 1 + 988 rdi->dparms.reserved_operations; 989 switch (init_attr->qp_type) { 990 case IB_QPT_SMI: 991 case IB_QPT_GSI: 992 if (init_attr->port_num == 0 || 993 init_attr->port_num > ibpd->device->phys_port_cnt) 994 return ERR_PTR(-EINVAL); 995 /* fall through */ 996 case IB_QPT_UC: 997 case IB_QPT_RC: 998 case IB_QPT_UD: 999 sz = sizeof(struct rvt_sge) * 1000 init_attr->cap.max_send_sge + 1001 sizeof(struct rvt_swqe); 1002 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node); 1003 if (!swq) 1004 return ERR_PTR(-ENOMEM); 1005 1006 sz = sizeof(*qp); 1007 sg_list_sz = 0; 1008 if (init_attr->srq) { 1009 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); 1010 1011 if (srq->rq.max_sge > 1) 1012 sg_list_sz = sizeof(*qp->r_sg_list) * 1013 (srq->rq.max_sge - 1); 1014 } else if (init_attr->cap.max_recv_sge > 1) 1015 sg_list_sz = sizeof(*qp->r_sg_list) * 1016 (init_attr->cap.max_recv_sge - 1); 1017 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL, 1018 rdi->dparms.node); 1019 if (!qp) 1020 goto bail_swq; 1021 1022 RCU_INIT_POINTER(qp->next, NULL); 1023 if (init_attr->qp_type == IB_QPT_RC) { 1024 qp->s_ack_queue = 1025 kcalloc_node(rvt_max_atomic(rdi), 1026 sizeof(*qp->s_ack_queue), 1027 GFP_KERNEL, 1028 rdi->dparms.node); 1029 if (!qp->s_ack_queue) 1030 goto bail_qp; 1031 } 1032 /* initialize timers needed for rc qp */ 1033 timer_setup(&qp->s_timer, rvt_rc_timeout, 0); 1034 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC, 1035 HRTIMER_MODE_REL); 1036 qp->s_rnr_timer.function = rvt_rc_rnr_retry; 1037 1038 /* 1039 * Driver needs to set up it's private QP structure and do any 1040 * initialization that is needed. 1041 */ 1042 priv = rdi->driver_f.qp_priv_alloc(rdi, qp); 1043 if (IS_ERR(priv)) { 1044 ret = priv; 1045 goto bail_qp; 1046 } 1047 qp->priv = priv; 1048 qp->timeout_jiffies = 1049 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1050 1000UL); 1051 if (init_attr->srq) { 1052 sz = 0; 1053 } else { 1054 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 1055 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 1056 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 1057 sizeof(struct rvt_rwqe); 1058 if (udata) 1059 qp->r_rq.wq = vmalloc_user( 1060 sizeof(struct rvt_rwq) + 1061 qp->r_rq.size * sz); 1062 else 1063 qp->r_rq.wq = vzalloc_node( 1064 sizeof(struct rvt_rwq) + 1065 qp->r_rq.size * sz, 1066 rdi->dparms.node); 1067 if (!qp->r_rq.wq) 1068 goto bail_driver_priv; 1069 } 1070 1071 /* 1072 * ib_create_qp() will initialize qp->ibqp 1073 * except for qp->ibqp.qp_num. 1074 */ 1075 spin_lock_init(&qp->r_lock); 1076 spin_lock_init(&qp->s_hlock); 1077 spin_lock_init(&qp->s_lock); 1078 spin_lock_init(&qp->r_rq.lock); 1079 atomic_set(&qp->refcount, 0); 1080 atomic_set(&qp->local_ops_pending, 0); 1081 init_waitqueue_head(&qp->wait); 1082 INIT_LIST_HEAD(&qp->rspwait); 1083 qp->state = IB_QPS_RESET; 1084 qp->s_wq = swq; 1085 qp->s_size = sqsize; 1086 qp->s_avail = init_attr->cap.max_send_wr; 1087 qp->s_max_sge = init_attr->cap.max_send_sge; 1088 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 1089 qp->s_flags = RVT_S_SIGNAL_REQ_WR; 1090 1091 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 1092 init_attr->qp_type, 1093 init_attr->port_num); 1094 if (err < 0) { 1095 ret = ERR_PTR(err); 1096 goto bail_rq_wq; 1097 } 1098 qp->ibqp.qp_num = err; 1099 qp->port_num = init_attr->port_num; 1100 rvt_init_qp(rdi, qp, init_attr->qp_type); 1101 if (rdi->driver_f.qp_priv_init) { 1102 err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr); 1103 if (err) { 1104 ret = ERR_PTR(err); 1105 goto bail_rq_wq; 1106 } 1107 } 1108 break; 1109 1110 default: 1111 /* Don't support raw QPs */ 1112 return ERR_PTR(-EINVAL); 1113 } 1114 1115 init_attr->cap.max_inline_data = 0; 1116 1117 /* 1118 * Return the address of the RWQ as the offset to mmap. 1119 * See rvt_mmap() for details. 1120 */ 1121 if (udata && udata->outlen >= sizeof(__u64)) { 1122 if (!qp->r_rq.wq) { 1123 __u64 offset = 0; 1124 1125 err = ib_copy_to_udata(udata, &offset, 1126 sizeof(offset)); 1127 if (err) { 1128 ret = ERR_PTR(err); 1129 goto bail_qpn; 1130 } 1131 } else { 1132 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 1133 1134 qp->ip = rvt_create_mmap_info(rdi, s, 1135 &ucontext->ibucontext, 1136 qp->r_rq.wq); 1137 if (!qp->ip) { 1138 ret = ERR_PTR(-ENOMEM); 1139 goto bail_qpn; 1140 } 1141 1142 err = ib_copy_to_udata(udata, &qp->ip->offset, 1143 sizeof(qp->ip->offset)); 1144 if (err) { 1145 ret = ERR_PTR(err); 1146 goto bail_ip; 1147 } 1148 } 1149 qp->pid = current->pid; 1150 } 1151 1152 spin_lock(&rdi->n_qps_lock); 1153 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 1154 spin_unlock(&rdi->n_qps_lock); 1155 ret = ERR_PTR(-ENOMEM); 1156 goto bail_ip; 1157 } 1158 1159 rdi->n_qps_allocated++; 1160 /* 1161 * Maintain a busy_jiffies variable that will be added to the timeout 1162 * period in mod_retry_timer and add_retry_timer. This busy jiffies 1163 * is scaled by the number of rc qps created for the device to reduce 1164 * the number of timeouts occurring when there is a large number of 1165 * qps. busy_jiffies is incremented every rc qp scaling interval. 1166 * The scaling interval is selected based on extensive performance 1167 * evaluation of targeted workloads. 1168 */ 1169 if (init_attr->qp_type == IB_QPT_RC) { 1170 rdi->n_rc_qps++; 1171 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; 1172 } 1173 spin_unlock(&rdi->n_qps_lock); 1174 1175 if (qp->ip) { 1176 spin_lock_irq(&rdi->pending_lock); 1177 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); 1178 spin_unlock_irq(&rdi->pending_lock); 1179 } 1180 1181 ret = &qp->ibqp; 1182 1183 /* 1184 * We have our QP and its good, now keep track of what types of opcodes 1185 * can be processed on this QP. We do this by keeping track of what the 1186 * 3 high order bits of the opcode are. 1187 */ 1188 switch (init_attr->qp_type) { 1189 case IB_QPT_SMI: 1190 case IB_QPT_GSI: 1191 case IB_QPT_UD: 1192 qp->allowed_ops = IB_OPCODE_UD; 1193 break; 1194 case IB_QPT_RC: 1195 qp->allowed_ops = IB_OPCODE_RC; 1196 break; 1197 case IB_QPT_UC: 1198 qp->allowed_ops = IB_OPCODE_UC; 1199 break; 1200 default: 1201 ret = ERR_PTR(-EINVAL); 1202 goto bail_ip; 1203 } 1204 1205 return ret; 1206 1207 bail_ip: 1208 if (qp->ip) 1209 kref_put(&qp->ip->ref, rvt_release_mmap_info); 1210 1211 bail_qpn: 1212 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1213 1214 bail_rq_wq: 1215 if (!qp->ip) 1216 vfree(qp->r_rq.wq); 1217 1218 bail_driver_priv: 1219 rdi->driver_f.qp_priv_free(rdi, qp); 1220 1221 bail_qp: 1222 kfree(qp->s_ack_queue); 1223 kfree(qp); 1224 1225 bail_swq: 1226 vfree(swq); 1227 1228 return ret; 1229 } 1230 1231 /** 1232 * rvt_error_qp - put a QP into the error state 1233 * @qp: the QP to put into the error state 1234 * @err: the receive completion error to signal if a RWQE is active 1235 * 1236 * Flushes both send and receive work queues. 1237 * 1238 * Return: true if last WQE event should be generated. 1239 * The QP r_lock and s_lock should be held and interrupts disabled. 1240 * If we are already in error state, just return. 1241 */ 1242 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 1243 { 1244 struct ib_wc wc; 1245 int ret = 0; 1246 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 1247 1248 lockdep_assert_held(&qp->r_lock); 1249 lockdep_assert_held(&qp->s_lock); 1250 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 1251 goto bail; 1252 1253 qp->state = IB_QPS_ERR; 1254 1255 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 1256 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 1257 del_timer(&qp->s_timer); 1258 } 1259 1260 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) 1261 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; 1262 1263 rdi->driver_f.notify_error_qp(qp); 1264 1265 /* Schedule the sending tasklet to drain the send work queue. */ 1266 if (READ_ONCE(qp->s_last) != qp->s_head) 1267 rdi->driver_f.schedule_send(qp); 1268 1269 rvt_clear_mr_refs(qp, 0); 1270 1271 memset(&wc, 0, sizeof(wc)); 1272 wc.qp = &qp->ibqp; 1273 wc.opcode = IB_WC_RECV; 1274 1275 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 1276 wc.wr_id = qp->r_wr_id; 1277 wc.status = err; 1278 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 1279 } 1280 wc.status = IB_WC_WR_FLUSH_ERR; 1281 1282 if (qp->r_rq.wq) { 1283 struct rvt_rwq *wq; 1284 u32 head; 1285 u32 tail; 1286 1287 spin_lock(&qp->r_rq.lock); 1288 1289 /* sanity check pointers before trusting them */ 1290 wq = qp->r_rq.wq; 1291 head = wq->head; 1292 if (head >= qp->r_rq.size) 1293 head = 0; 1294 tail = wq->tail; 1295 if (tail >= qp->r_rq.size) 1296 tail = 0; 1297 while (tail != head) { 1298 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 1299 if (++tail >= qp->r_rq.size) 1300 tail = 0; 1301 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 1302 } 1303 wq->tail = tail; 1304 1305 spin_unlock(&qp->r_rq.lock); 1306 } else if (qp->ibqp.event_handler) { 1307 ret = 1; 1308 } 1309 1310 bail: 1311 return ret; 1312 } 1313 EXPORT_SYMBOL(rvt_error_qp); 1314 1315 /* 1316 * Put the QP into the hash table. 1317 * The hash table holds a reference to the QP. 1318 */ 1319 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) 1320 { 1321 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 1322 unsigned long flags; 1323 1324 rvt_get_qp(qp); 1325 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); 1326 1327 if (qp->ibqp.qp_num <= 1) { 1328 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); 1329 } else { 1330 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); 1331 1332 qp->next = rdi->qp_dev->qp_table[n]; 1333 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); 1334 trace_rvt_qpinsert(qp, n); 1335 } 1336 1337 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); 1338 } 1339 1340 /** 1341 * rvt_modify_qp - modify the attributes of a queue pair 1342 * @ibqp: the queue pair who's attributes we're modifying 1343 * @attr: the new attributes 1344 * @attr_mask: the mask of attributes to modify 1345 * @udata: user data for libibverbs.so 1346 * 1347 * Return: 0 on success, otherwise returns an errno. 1348 */ 1349 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1350 int attr_mask, struct ib_udata *udata) 1351 { 1352 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1353 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1354 enum ib_qp_state cur_state, new_state; 1355 struct ib_event ev; 1356 int lastwqe = 0; 1357 int mig = 0; 1358 int pmtu = 0; /* for gcc warning only */ 1359 int opa_ah; 1360 1361 spin_lock_irq(&qp->r_lock); 1362 spin_lock(&qp->s_hlock); 1363 spin_lock(&qp->s_lock); 1364 1365 cur_state = attr_mask & IB_QP_CUR_STATE ? 1366 attr->cur_qp_state : qp->state; 1367 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1368 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num); 1369 1370 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1371 attr_mask)) 1372 goto inval; 1373 1374 if (rdi->driver_f.check_modify_qp && 1375 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) 1376 goto inval; 1377 1378 if (attr_mask & IB_QP_AV) { 1379 if (opa_ah) { 1380 if (rdma_ah_get_dlid(&attr->ah_attr) >= 1381 opa_get_mcast_base(OPA_MCAST_NR)) 1382 goto inval; 1383 } else { 1384 if (rdma_ah_get_dlid(&attr->ah_attr) >= 1385 be16_to_cpu(IB_MULTICAST_LID_BASE)) 1386 goto inval; 1387 } 1388 1389 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 1390 goto inval; 1391 } 1392 1393 if (attr_mask & IB_QP_ALT_PATH) { 1394 if (opa_ah) { 1395 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >= 1396 opa_get_mcast_base(OPA_MCAST_NR)) 1397 goto inval; 1398 } else { 1399 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >= 1400 be16_to_cpu(IB_MULTICAST_LID_BASE)) 1401 goto inval; 1402 } 1403 1404 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 1405 goto inval; 1406 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) 1407 goto inval; 1408 } 1409 1410 if (attr_mask & IB_QP_PKEY_INDEX) 1411 if (attr->pkey_index >= rvt_get_npkeys(rdi)) 1412 goto inval; 1413 1414 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1415 if (attr->min_rnr_timer > 31) 1416 goto inval; 1417 1418 if (attr_mask & IB_QP_PORT) 1419 if (qp->ibqp.qp_type == IB_QPT_SMI || 1420 qp->ibqp.qp_type == IB_QPT_GSI || 1421 attr->port_num == 0 || 1422 attr->port_num > ibqp->device->phys_port_cnt) 1423 goto inval; 1424 1425 if (attr_mask & IB_QP_DEST_QPN) 1426 if (attr->dest_qp_num > RVT_QPN_MASK) 1427 goto inval; 1428 1429 if (attr_mask & IB_QP_RETRY_CNT) 1430 if (attr->retry_cnt > 7) 1431 goto inval; 1432 1433 if (attr_mask & IB_QP_RNR_RETRY) 1434 if (attr->rnr_retry > 7) 1435 goto inval; 1436 1437 /* 1438 * Don't allow invalid path_mtu values. OK to set greater 1439 * than the active mtu (or even the max_cap, if we have tuned 1440 * that to a small mtu. We'll set qp->path_mtu 1441 * to the lesser of requested attribute mtu and active, 1442 * for packetizing messages. 1443 * Note that the QP port has to be set in INIT and MTU in RTR. 1444 */ 1445 if (attr_mask & IB_QP_PATH_MTU) { 1446 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); 1447 if (pmtu < 0) 1448 goto inval; 1449 } 1450 1451 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1452 if (attr->path_mig_state == IB_MIG_REARM) { 1453 if (qp->s_mig_state == IB_MIG_ARMED) 1454 goto inval; 1455 if (new_state != IB_QPS_RTS) 1456 goto inval; 1457 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 1458 if (qp->s_mig_state == IB_MIG_REARM) 1459 goto inval; 1460 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 1461 goto inval; 1462 if (qp->s_mig_state == IB_MIG_ARMED) 1463 mig = 1; 1464 } else { 1465 goto inval; 1466 } 1467 } 1468 1469 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1470 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) 1471 goto inval; 1472 1473 switch (new_state) { 1474 case IB_QPS_RESET: 1475 if (qp->state != IB_QPS_RESET) 1476 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1477 break; 1478 1479 case IB_QPS_RTR: 1480 /* Allow event to re-trigger if QP set to RTR more than once */ 1481 qp->r_flags &= ~RVT_R_COMM_EST; 1482 qp->state = new_state; 1483 break; 1484 1485 case IB_QPS_SQD: 1486 qp->s_draining = qp->s_last != qp->s_cur; 1487 qp->state = new_state; 1488 break; 1489 1490 case IB_QPS_SQE: 1491 if (qp->ibqp.qp_type == IB_QPT_RC) 1492 goto inval; 1493 qp->state = new_state; 1494 break; 1495 1496 case IB_QPS_ERR: 1497 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1498 break; 1499 1500 default: 1501 qp->state = new_state; 1502 break; 1503 } 1504 1505 if (attr_mask & IB_QP_PKEY_INDEX) 1506 qp->s_pkey_index = attr->pkey_index; 1507 1508 if (attr_mask & IB_QP_PORT) 1509 qp->port_num = attr->port_num; 1510 1511 if (attr_mask & IB_QP_DEST_QPN) 1512 qp->remote_qpn = attr->dest_qp_num; 1513 1514 if (attr_mask & IB_QP_SQ_PSN) { 1515 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; 1516 qp->s_psn = qp->s_next_psn; 1517 qp->s_sending_psn = qp->s_next_psn; 1518 qp->s_last_psn = qp->s_next_psn - 1; 1519 qp->s_sending_hpsn = qp->s_last_psn; 1520 } 1521 1522 if (attr_mask & IB_QP_RQ_PSN) 1523 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; 1524 1525 if (attr_mask & IB_QP_ACCESS_FLAGS) 1526 qp->qp_access_flags = attr->qp_access_flags; 1527 1528 if (attr_mask & IB_QP_AV) { 1529 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr); 1530 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr); 1531 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); 1532 } 1533 1534 if (attr_mask & IB_QP_ALT_PATH) { 1535 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr); 1536 qp->s_alt_pkey_index = attr->alt_pkey_index; 1537 } 1538 1539 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1540 qp->s_mig_state = attr->path_mig_state; 1541 if (mig) { 1542 qp->remote_ah_attr = qp->alt_ah_attr; 1543 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); 1544 qp->s_pkey_index = qp->s_alt_pkey_index; 1545 } 1546 } 1547 1548 if (attr_mask & IB_QP_PATH_MTU) { 1549 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); 1550 qp->log_pmtu = ilog2(qp->pmtu); 1551 } 1552 1553 if (attr_mask & IB_QP_RETRY_CNT) { 1554 qp->s_retry_cnt = attr->retry_cnt; 1555 qp->s_retry = attr->retry_cnt; 1556 } 1557 1558 if (attr_mask & IB_QP_RNR_RETRY) { 1559 qp->s_rnr_retry_cnt = attr->rnr_retry; 1560 qp->s_rnr_retry = attr->rnr_retry; 1561 } 1562 1563 if (attr_mask & IB_QP_MIN_RNR_TIMER) 1564 qp->r_min_rnr_timer = attr->min_rnr_timer; 1565 1566 if (attr_mask & IB_QP_TIMEOUT) { 1567 qp->timeout = attr->timeout; 1568 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout); 1569 } 1570 1571 if (attr_mask & IB_QP_QKEY) 1572 qp->qkey = attr->qkey; 1573 1574 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1575 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 1576 1577 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 1578 qp->s_max_rd_atomic = attr->max_rd_atomic; 1579 1580 if (rdi->driver_f.modify_qp) 1581 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); 1582 1583 spin_unlock(&qp->s_lock); 1584 spin_unlock(&qp->s_hlock); 1585 spin_unlock_irq(&qp->r_lock); 1586 1587 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1588 rvt_insert_qp(rdi, qp); 1589 1590 if (lastwqe) { 1591 ev.device = qp->ibqp.device; 1592 ev.element.qp = &qp->ibqp; 1593 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 1594 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1595 } 1596 if (mig) { 1597 ev.device = qp->ibqp.device; 1598 ev.element.qp = &qp->ibqp; 1599 ev.event = IB_EVENT_PATH_MIG; 1600 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1601 } 1602 return 0; 1603 1604 inval: 1605 spin_unlock(&qp->s_lock); 1606 spin_unlock(&qp->s_hlock); 1607 spin_unlock_irq(&qp->r_lock); 1608 return -EINVAL; 1609 } 1610 1611 /** 1612 * rvt_destroy_qp - destroy a queue pair 1613 * @ibqp: the queue pair to destroy 1614 * 1615 * Note that this can be called while the QP is actively sending or 1616 * receiving! 1617 * 1618 * Return: 0 on success. 1619 */ 1620 int rvt_destroy_qp(struct ib_qp *ibqp) 1621 { 1622 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1623 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1624 1625 spin_lock_irq(&qp->r_lock); 1626 spin_lock(&qp->s_hlock); 1627 spin_lock(&qp->s_lock); 1628 rvt_reset_qp(rdi, qp, ibqp->qp_type); 1629 spin_unlock(&qp->s_lock); 1630 spin_unlock(&qp->s_hlock); 1631 spin_unlock_irq(&qp->r_lock); 1632 1633 wait_event(qp->wait, !atomic_read(&qp->refcount)); 1634 /* qpn is now available for use again */ 1635 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1636 1637 spin_lock(&rdi->n_qps_lock); 1638 rdi->n_qps_allocated--; 1639 if (qp->ibqp.qp_type == IB_QPT_RC) { 1640 rdi->n_rc_qps--; 1641 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; 1642 } 1643 spin_unlock(&rdi->n_qps_lock); 1644 1645 if (qp->ip) 1646 kref_put(&qp->ip->ref, rvt_release_mmap_info); 1647 else 1648 vfree(qp->r_rq.wq); 1649 rdi->driver_f.qp_priv_free(rdi, qp); 1650 kfree(qp->s_ack_queue); 1651 rdma_destroy_ah_attr(&qp->remote_ah_attr); 1652 rdma_destroy_ah_attr(&qp->alt_ah_attr); 1653 vfree(qp->s_wq); 1654 kfree(qp); 1655 return 0; 1656 } 1657 1658 /** 1659 * rvt_query_qp - query an ipbq 1660 * @ibqp: IB qp to query 1661 * @attr: attr struct to fill in 1662 * @attr_mask: attr mask ignored 1663 * @init_attr: struct to fill in 1664 * 1665 * Return: always 0 1666 */ 1667 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1668 int attr_mask, struct ib_qp_init_attr *init_attr) 1669 { 1670 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1671 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1672 1673 attr->qp_state = qp->state; 1674 attr->cur_qp_state = attr->qp_state; 1675 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); 1676 attr->path_mig_state = qp->s_mig_state; 1677 attr->qkey = qp->qkey; 1678 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; 1679 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; 1680 attr->dest_qp_num = qp->remote_qpn; 1681 attr->qp_access_flags = qp->qp_access_flags; 1682 attr->cap.max_send_wr = qp->s_size - 1 - 1683 rdi->dparms.reserved_operations; 1684 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 1685 attr->cap.max_send_sge = qp->s_max_sge; 1686 attr->cap.max_recv_sge = qp->r_rq.max_sge; 1687 attr->cap.max_inline_data = 0; 1688 attr->ah_attr = qp->remote_ah_attr; 1689 attr->alt_ah_attr = qp->alt_ah_attr; 1690 attr->pkey_index = qp->s_pkey_index; 1691 attr->alt_pkey_index = qp->s_alt_pkey_index; 1692 attr->en_sqd_async_notify = 0; 1693 attr->sq_draining = qp->s_draining; 1694 attr->max_rd_atomic = qp->s_max_rd_atomic; 1695 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 1696 attr->min_rnr_timer = qp->r_min_rnr_timer; 1697 attr->port_num = qp->port_num; 1698 attr->timeout = qp->timeout; 1699 attr->retry_cnt = qp->s_retry_cnt; 1700 attr->rnr_retry = qp->s_rnr_retry_cnt; 1701 attr->alt_port_num = 1702 rdma_ah_get_port_num(&qp->alt_ah_attr); 1703 attr->alt_timeout = qp->alt_timeout; 1704 1705 init_attr->event_handler = qp->ibqp.event_handler; 1706 init_attr->qp_context = qp->ibqp.qp_context; 1707 init_attr->send_cq = qp->ibqp.send_cq; 1708 init_attr->recv_cq = qp->ibqp.recv_cq; 1709 init_attr->srq = qp->ibqp.srq; 1710 init_attr->cap = attr->cap; 1711 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) 1712 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 1713 else 1714 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 1715 init_attr->qp_type = qp->ibqp.qp_type; 1716 init_attr->port_num = qp->port_num; 1717 return 0; 1718 } 1719 1720 /** 1721 * rvt_post_receive - post a receive on a QP 1722 * @ibqp: the QP to post the receive on 1723 * @wr: the WR to post 1724 * @bad_wr: the first bad WR is put here 1725 * 1726 * This may be called from interrupt context. 1727 * 1728 * Return: 0 on success otherwise errno 1729 */ 1730 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 1731 const struct ib_recv_wr **bad_wr) 1732 { 1733 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1734 struct rvt_rwq *wq = qp->r_rq.wq; 1735 unsigned long flags; 1736 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) && 1737 !qp->ibqp.srq; 1738 1739 /* Check that state is OK to post receive. */ 1740 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { 1741 *bad_wr = wr; 1742 return -EINVAL; 1743 } 1744 1745 for (; wr; wr = wr->next) { 1746 struct rvt_rwqe *wqe; 1747 u32 next; 1748 int i; 1749 1750 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { 1751 *bad_wr = wr; 1752 return -EINVAL; 1753 } 1754 1755 spin_lock_irqsave(&qp->r_rq.lock, flags); 1756 next = wq->head + 1; 1757 if (next >= qp->r_rq.size) 1758 next = 0; 1759 if (next == wq->tail) { 1760 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1761 *bad_wr = wr; 1762 return -ENOMEM; 1763 } 1764 if (unlikely(qp_err_flush)) { 1765 struct ib_wc wc; 1766 1767 memset(&wc, 0, sizeof(wc)); 1768 wc.qp = &qp->ibqp; 1769 wc.opcode = IB_WC_RECV; 1770 wc.wr_id = wr->wr_id; 1771 wc.status = IB_WC_WR_FLUSH_ERR; 1772 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 1773 } else { 1774 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); 1775 wqe->wr_id = wr->wr_id; 1776 wqe->num_sge = wr->num_sge; 1777 for (i = 0; i < wr->num_sge; i++) 1778 wqe->sg_list[i] = wr->sg_list[i]; 1779 /* 1780 * Make sure queue entry is written 1781 * before the head index. 1782 */ 1783 smp_wmb(); 1784 wq->head = next; 1785 } 1786 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 1787 } 1788 return 0; 1789 } 1790 1791 /** 1792 * rvt_qp_valid_operation - validate post send wr request 1793 * @qp - the qp 1794 * @post-parms - the post send table for the driver 1795 * @wr - the work request 1796 * 1797 * The routine validates the operation based on the 1798 * validation table an returns the length of the operation 1799 * which can extend beyond the ib_send_bw. Operation 1800 * dependent flags key atomic operation validation. 1801 * 1802 * There is an exception for UD qps that validates the pd and 1803 * overrides the length to include the additional UD specific 1804 * length. 1805 * 1806 * Returns a negative error or the length of the work request 1807 * for building the swqe. 1808 */ 1809 static inline int rvt_qp_valid_operation( 1810 struct rvt_qp *qp, 1811 const struct rvt_operation_params *post_parms, 1812 const struct ib_send_wr *wr) 1813 { 1814 int len; 1815 1816 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length) 1817 return -EINVAL; 1818 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type))) 1819 return -EINVAL; 1820 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) && 1821 ibpd_to_rvtpd(qp->ibqp.pd)->user) 1822 return -EINVAL; 1823 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE && 1824 (wr->num_sge == 0 || 1825 wr->sg_list[0].length < sizeof(u64) || 1826 wr->sg_list[0].addr & (sizeof(u64) - 1))) 1827 return -EINVAL; 1828 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC && 1829 !qp->s_max_rd_atomic) 1830 return -EINVAL; 1831 len = post_parms[wr->opcode].length; 1832 /* UD specific */ 1833 if (qp->ibqp.qp_type != IB_QPT_UC && 1834 qp->ibqp.qp_type != IB_QPT_RC) { 1835 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) 1836 return -EINVAL; 1837 len = sizeof(struct ib_ud_wr); 1838 } 1839 return len; 1840 } 1841 1842 /** 1843 * rvt_qp_is_avail - determine queue capacity 1844 * @qp: the qp 1845 * @rdi: the rdmavt device 1846 * @reserved_op: is reserved operation 1847 * 1848 * This assumes the s_hlock is held but the s_last 1849 * qp variable is uncontrolled. 1850 * 1851 * For non reserved operations, the qp->s_avail 1852 * may be changed. 1853 * 1854 * The return value is zero or a -ENOMEM. 1855 */ 1856 static inline int rvt_qp_is_avail( 1857 struct rvt_qp *qp, 1858 struct rvt_dev_info *rdi, 1859 bool reserved_op) 1860 { 1861 u32 slast; 1862 u32 avail; 1863 u32 reserved_used; 1864 1865 /* see rvt_qp_wqe_unreserve() */ 1866 smp_mb__before_atomic(); 1867 reserved_used = atomic_read(&qp->s_reserved_used); 1868 if (unlikely(reserved_op)) { 1869 /* see rvt_qp_wqe_unreserve() */ 1870 smp_mb__before_atomic(); 1871 if (reserved_used >= rdi->dparms.reserved_operations) 1872 return -ENOMEM; 1873 return 0; 1874 } 1875 /* non-reserved operations */ 1876 if (likely(qp->s_avail)) 1877 return 0; 1878 slast = READ_ONCE(qp->s_last); 1879 if (qp->s_head >= slast) 1880 avail = qp->s_size - (qp->s_head - slast); 1881 else 1882 avail = slast - qp->s_head; 1883 1884 /* see rvt_qp_wqe_unreserve() */ 1885 smp_mb__before_atomic(); 1886 reserved_used = atomic_read(&qp->s_reserved_used); 1887 avail = avail - 1 - 1888 (rdi->dparms.reserved_operations - reserved_used); 1889 /* insure we don't assign a negative s_avail */ 1890 if ((s32)avail <= 0) 1891 return -ENOMEM; 1892 qp->s_avail = avail; 1893 if (WARN_ON(qp->s_avail > 1894 (qp->s_size - 1 - rdi->dparms.reserved_operations))) 1895 rvt_pr_err(rdi, 1896 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u", 1897 qp->ibqp.qp_num, qp->s_size, qp->s_avail, 1898 qp->s_head, qp->s_tail, qp->s_cur, 1899 qp->s_acked, qp->s_last); 1900 return 0; 1901 } 1902 1903 /** 1904 * rvt_post_one_wr - post one RC, UC, or UD send work request 1905 * @qp: the QP to post on 1906 * @wr: the work request to send 1907 */ 1908 static int rvt_post_one_wr(struct rvt_qp *qp, 1909 const struct ib_send_wr *wr, 1910 bool *call_send) 1911 { 1912 struct rvt_swqe *wqe; 1913 u32 next; 1914 int i; 1915 int j; 1916 int acc; 1917 struct rvt_lkey_table *rkt; 1918 struct rvt_pd *pd; 1919 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 1920 u8 log_pmtu; 1921 int ret; 1922 size_t cplen; 1923 bool reserved_op; 1924 int local_ops_delayed = 0; 1925 1926 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE)); 1927 1928 /* IB spec says that num_sge == 0 is OK. */ 1929 if (unlikely(wr->num_sge > qp->s_max_sge)) 1930 return -EINVAL; 1931 1932 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr); 1933 if (ret < 0) 1934 return ret; 1935 cplen = ret; 1936 1937 /* 1938 * Local operations include fast register and local invalidate. 1939 * Fast register needs to be processed immediately because the 1940 * registered lkey may be used by following work requests and the 1941 * lkey needs to be valid at the time those requests are posted. 1942 * Local invalidate can be processed immediately if fencing is 1943 * not required and no previous local invalidate ops are pending. 1944 * Signaled local operations that have been processed immediately 1945 * need to have requests with "completion only" flags set posted 1946 * to the send queue in order to generate completions. 1947 */ 1948 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) { 1949 switch (wr->opcode) { 1950 case IB_WR_REG_MR: 1951 ret = rvt_fast_reg_mr(qp, 1952 reg_wr(wr)->mr, 1953 reg_wr(wr)->key, 1954 reg_wr(wr)->access); 1955 if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) 1956 return ret; 1957 break; 1958 case IB_WR_LOCAL_INV: 1959 if ((wr->send_flags & IB_SEND_FENCE) || 1960 atomic_read(&qp->local_ops_pending)) { 1961 local_ops_delayed = 1; 1962 } else { 1963 ret = rvt_invalidate_rkey( 1964 qp, wr->ex.invalidate_rkey); 1965 if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) 1966 return ret; 1967 } 1968 break; 1969 default: 1970 return -EINVAL; 1971 } 1972 } 1973 1974 reserved_op = rdi->post_parms[wr->opcode].flags & 1975 RVT_OPERATION_USE_RESERVE; 1976 /* check for avail */ 1977 ret = rvt_qp_is_avail(qp, rdi, reserved_op); 1978 if (ret) 1979 return ret; 1980 next = qp->s_head + 1; 1981 if (next >= qp->s_size) 1982 next = 0; 1983 1984 rkt = &rdi->lkey_table; 1985 pd = ibpd_to_rvtpd(qp->ibqp.pd); 1986 wqe = rvt_get_swqe_ptr(qp, qp->s_head); 1987 1988 /* cplen has length from above */ 1989 memcpy(&wqe->wr, wr, cplen); 1990 1991 wqe->length = 0; 1992 j = 0; 1993 if (wr->num_sge) { 1994 struct rvt_sge *last_sge = NULL; 1995 1996 acc = wr->opcode >= IB_WR_RDMA_READ ? 1997 IB_ACCESS_LOCAL_WRITE : 0; 1998 for (i = 0; i < wr->num_sge; i++) { 1999 u32 length = wr->sg_list[i].length; 2000 2001 if (length == 0) 2002 continue; 2003 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge, 2004 &wr->sg_list[i], acc); 2005 if (unlikely(ret < 0)) 2006 goto bail_inval_free; 2007 wqe->length += length; 2008 if (ret) 2009 last_sge = &wqe->sg_list[j]; 2010 j += ret; 2011 } 2012 wqe->wr.num_sge = j; 2013 } 2014 2015 /* 2016 * Calculate and set SWQE PSN values prior to handing it off 2017 * to the driver's check routine. This give the driver the 2018 * opportunity to adjust PSN values based on internal checks. 2019 */ 2020 log_pmtu = qp->log_pmtu; 2021 if (qp->ibqp.qp_type != IB_QPT_UC && 2022 qp->ibqp.qp_type != IB_QPT_RC) { 2023 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah); 2024 2025 log_pmtu = ah->log_pmtu; 2026 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 2027 } 2028 2029 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) { 2030 if (local_ops_delayed) 2031 atomic_inc(&qp->local_ops_pending); 2032 else 2033 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY; 2034 wqe->ssn = 0; 2035 wqe->psn = 0; 2036 wqe->lpsn = 0; 2037 } else { 2038 wqe->ssn = qp->s_ssn++; 2039 wqe->psn = qp->s_next_psn; 2040 wqe->lpsn = wqe->psn + 2041 (wqe->length ? 2042 ((wqe->length - 1) >> log_pmtu) : 2043 0); 2044 } 2045 2046 /* general part of wqe valid - allow for driver checks */ 2047 if (rdi->driver_f.setup_wqe) { 2048 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send); 2049 if (ret < 0) 2050 goto bail_inval_free_ref; 2051 } 2052 2053 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) 2054 qp->s_next_psn = wqe->lpsn + 1; 2055 2056 if (unlikely(reserved_op)) { 2057 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; 2058 rvt_qp_wqe_reserve(qp, wqe); 2059 } else { 2060 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; 2061 qp->s_avail--; 2062 } 2063 trace_rvt_post_one_wr(qp, wqe, wr->num_sge); 2064 smp_wmb(); /* see request builders */ 2065 qp->s_head = next; 2066 2067 return 0; 2068 2069 bail_inval_free_ref: 2070 if (qp->ibqp.qp_type != IB_QPT_UC && 2071 qp->ibqp.qp_type != IB_QPT_RC) 2072 atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 2073 bail_inval_free: 2074 /* release mr holds */ 2075 while (j) { 2076 struct rvt_sge *sge = &wqe->sg_list[--j]; 2077 2078 rvt_put_mr(sge->mr); 2079 } 2080 return ret; 2081 } 2082 2083 /** 2084 * rvt_post_send - post a send on a QP 2085 * @ibqp: the QP to post the send on 2086 * @wr: the list of work requests to post 2087 * @bad_wr: the first bad WR is put here 2088 * 2089 * This may be called from interrupt context. 2090 * 2091 * Return: 0 on success else errno 2092 */ 2093 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 2094 const struct ib_send_wr **bad_wr) 2095 { 2096 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 2097 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 2098 unsigned long flags = 0; 2099 bool call_send; 2100 unsigned nreq = 0; 2101 int err = 0; 2102 2103 spin_lock_irqsave(&qp->s_hlock, flags); 2104 2105 /* 2106 * Ensure QP state is such that we can send. If not bail out early, 2107 * there is no need to do this every time we post a send. 2108 */ 2109 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { 2110 spin_unlock_irqrestore(&qp->s_hlock, flags); 2111 return -EINVAL; 2112 } 2113 2114 /* 2115 * If the send queue is empty, and we only have a single WR then just go 2116 * ahead and kick the send engine into gear. Otherwise we will always 2117 * just schedule the send to happen later. 2118 */ 2119 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; 2120 2121 for (; wr; wr = wr->next) { 2122 err = rvt_post_one_wr(qp, wr, &call_send); 2123 if (unlikely(err)) { 2124 *bad_wr = wr; 2125 goto bail; 2126 } 2127 nreq++; 2128 } 2129 bail: 2130 spin_unlock_irqrestore(&qp->s_hlock, flags); 2131 if (nreq) { 2132 /* 2133 * Only call do_send if there is exactly one packet, and the 2134 * driver said it was ok. 2135 */ 2136 if (nreq == 1 && call_send) 2137 rdi->driver_f.do_send(qp); 2138 else 2139 rdi->driver_f.schedule_send_no_lock(qp); 2140 } 2141 return err; 2142 } 2143 2144 /** 2145 * rvt_post_srq_receive - post a receive on a shared receive queue 2146 * @ibsrq: the SRQ to post the receive on 2147 * @wr: the list of work requests to post 2148 * @bad_wr: A pointer to the first WR to cause a problem is put here 2149 * 2150 * This may be called from interrupt context. 2151 * 2152 * Return: 0 on success else errno 2153 */ 2154 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 2155 const struct ib_recv_wr **bad_wr) 2156 { 2157 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 2158 struct rvt_rwq *wq; 2159 unsigned long flags; 2160 2161 for (; wr; wr = wr->next) { 2162 struct rvt_rwqe *wqe; 2163 u32 next; 2164 int i; 2165 2166 if ((unsigned)wr->num_sge > srq->rq.max_sge) { 2167 *bad_wr = wr; 2168 return -EINVAL; 2169 } 2170 2171 spin_lock_irqsave(&srq->rq.lock, flags); 2172 wq = srq->rq.wq; 2173 next = wq->head + 1; 2174 if (next >= srq->rq.size) 2175 next = 0; 2176 if (next == wq->tail) { 2177 spin_unlock_irqrestore(&srq->rq.lock, flags); 2178 *bad_wr = wr; 2179 return -ENOMEM; 2180 } 2181 2182 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); 2183 wqe->wr_id = wr->wr_id; 2184 wqe->num_sge = wr->num_sge; 2185 for (i = 0; i < wr->num_sge; i++) 2186 wqe->sg_list[i] = wr->sg_list[i]; 2187 /* Make sure queue entry is written before the head index. */ 2188 smp_wmb(); 2189 wq->head = next; 2190 spin_unlock_irqrestore(&srq->rq.lock, flags); 2191 } 2192 return 0; 2193 } 2194 2195 /* 2196 * Validate a RWQE and fill in the SGE state. 2197 * Return 1 if OK. 2198 */ 2199 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) 2200 { 2201 int i, j, ret; 2202 struct ib_wc wc; 2203 struct rvt_lkey_table *rkt; 2204 struct rvt_pd *pd; 2205 struct rvt_sge_state *ss; 2206 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 2207 2208 rkt = &rdi->lkey_table; 2209 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); 2210 ss = &qp->r_sge; 2211 ss->sg_list = qp->r_sg_list; 2212 qp->r_len = 0; 2213 for (i = j = 0; i < wqe->num_sge; i++) { 2214 if (wqe->sg_list[i].length == 0) 2215 continue; 2216 /* Check LKEY */ 2217 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, 2218 NULL, &wqe->sg_list[i], 2219 IB_ACCESS_LOCAL_WRITE); 2220 if (unlikely(ret <= 0)) 2221 goto bad_lkey; 2222 qp->r_len += wqe->sg_list[i].length; 2223 j++; 2224 } 2225 ss->num_sge = j; 2226 ss->total_len = qp->r_len; 2227 return 1; 2228 2229 bad_lkey: 2230 while (j) { 2231 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; 2232 2233 rvt_put_mr(sge->mr); 2234 } 2235 ss->num_sge = 0; 2236 memset(&wc, 0, sizeof(wc)); 2237 wc.wr_id = wqe->wr_id; 2238 wc.status = IB_WC_LOC_PROT_ERR; 2239 wc.opcode = IB_WC_RECV; 2240 wc.qp = &qp->ibqp; 2241 /* Signal solicited completion event. */ 2242 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 2243 return 0; 2244 } 2245 2246 /** 2247 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE 2248 * @qp: the QP 2249 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge 2250 * 2251 * Return -1 if there is a local error, 0 if no RWQE is available, 2252 * otherwise return 1. 2253 * 2254 * Can be called from interrupt level. 2255 */ 2256 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) 2257 { 2258 unsigned long flags; 2259 struct rvt_rq *rq; 2260 struct rvt_rwq *wq; 2261 struct rvt_srq *srq; 2262 struct rvt_rwqe *wqe; 2263 void (*handler)(struct ib_event *, void *); 2264 u32 tail; 2265 int ret; 2266 2267 if (qp->ibqp.srq) { 2268 srq = ibsrq_to_rvtsrq(qp->ibqp.srq); 2269 handler = srq->ibsrq.event_handler; 2270 rq = &srq->rq; 2271 } else { 2272 srq = NULL; 2273 handler = NULL; 2274 rq = &qp->r_rq; 2275 } 2276 2277 spin_lock_irqsave(&rq->lock, flags); 2278 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { 2279 ret = 0; 2280 goto unlock; 2281 } 2282 2283 wq = rq->wq; 2284 tail = wq->tail; 2285 /* Validate tail before using it since it is user writable. */ 2286 if (tail >= rq->size) 2287 tail = 0; 2288 if (unlikely(tail == wq->head)) { 2289 ret = 0; 2290 goto unlock; 2291 } 2292 /* Make sure entry is read after head index is read. */ 2293 smp_rmb(); 2294 wqe = rvt_get_rwqe_ptr(rq, tail); 2295 /* 2296 * Even though we update the tail index in memory, the verbs 2297 * consumer is not supposed to post more entries until a 2298 * completion is generated. 2299 */ 2300 if (++tail >= rq->size) 2301 tail = 0; 2302 wq->tail = tail; 2303 if (!wr_id_only && !init_sge(qp, wqe)) { 2304 ret = -1; 2305 goto unlock; 2306 } 2307 qp->r_wr_id = wqe->wr_id; 2308 2309 ret = 1; 2310 set_bit(RVT_R_WRID_VALID, &qp->r_aflags); 2311 if (handler) { 2312 u32 n; 2313 2314 /* 2315 * Validate head pointer value and compute 2316 * the number of remaining WQEs. 2317 */ 2318 n = wq->head; 2319 if (n >= rq->size) 2320 n = 0; 2321 if (n < tail) 2322 n += rq->size - tail; 2323 else 2324 n -= tail; 2325 if (n < srq->limit) { 2326 struct ib_event ev; 2327 2328 srq->limit = 0; 2329 spin_unlock_irqrestore(&rq->lock, flags); 2330 ev.device = qp->ibqp.device; 2331 ev.element.srq = qp->ibqp.srq; 2332 ev.event = IB_EVENT_SRQ_LIMIT_REACHED; 2333 handler(&ev, srq->ibsrq.srq_context); 2334 goto bail; 2335 } 2336 } 2337 unlock: 2338 spin_unlock_irqrestore(&rq->lock, flags); 2339 bail: 2340 return ret; 2341 } 2342 EXPORT_SYMBOL(rvt_get_rwqe); 2343 2344 /** 2345 * qp_comm_est - handle trap with QP established 2346 * @qp: the QP 2347 */ 2348 void rvt_comm_est(struct rvt_qp *qp) 2349 { 2350 qp->r_flags |= RVT_R_COMM_EST; 2351 if (qp->ibqp.event_handler) { 2352 struct ib_event ev; 2353 2354 ev.device = qp->ibqp.device; 2355 ev.element.qp = &qp->ibqp; 2356 ev.event = IB_EVENT_COMM_EST; 2357 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 2358 } 2359 } 2360 EXPORT_SYMBOL(rvt_comm_est); 2361 2362 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err) 2363 { 2364 unsigned long flags; 2365 int lastwqe; 2366 2367 spin_lock_irqsave(&qp->s_lock, flags); 2368 lastwqe = rvt_error_qp(qp, err); 2369 spin_unlock_irqrestore(&qp->s_lock, flags); 2370 2371 if (lastwqe) { 2372 struct ib_event ev; 2373 2374 ev.device = qp->ibqp.device; 2375 ev.element.qp = &qp->ibqp; 2376 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 2377 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 2378 } 2379 } 2380 EXPORT_SYMBOL(rvt_rc_error); 2381 2382 /* 2383 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table 2384 * @index - the index 2385 * return usec from an index into ib_rvt_rnr_table 2386 */ 2387 unsigned long rvt_rnr_tbl_to_usec(u32 index) 2388 { 2389 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)]; 2390 } 2391 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec); 2392 2393 static inline unsigned long rvt_aeth_to_usec(u32 aeth) 2394 { 2395 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) & 2396 IB_AETH_CREDIT_MASK]; 2397 } 2398 2399 /* 2400 * rvt_add_retry_timer_ext - add/start a retry timer 2401 * @qp - the QP 2402 * @shift - timeout shift to wait for multiple packets 2403 * add a retry timer on the QP 2404 */ 2405 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift) 2406 { 2407 struct ib_qp *ibqp = &qp->ibqp; 2408 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 2409 2410 lockdep_assert_held(&qp->s_lock); 2411 qp->s_flags |= RVT_S_TIMER; 2412 /* 4.096 usec. * (1 << qp->timeout) */ 2413 qp->s_timer.expires = jiffies + rdi->busy_jiffies + 2414 (qp->timeout_jiffies << shift); 2415 add_timer(&qp->s_timer); 2416 } 2417 EXPORT_SYMBOL(rvt_add_retry_timer_ext); 2418 2419 /** 2420 * rvt_add_rnr_timer - add/start an rnr timer 2421 * @qp - the QP 2422 * @aeth - aeth of RNR timeout, simulated aeth for loopback 2423 * add an rnr timer on the QP 2424 */ 2425 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth) 2426 { 2427 u32 to; 2428 2429 lockdep_assert_held(&qp->s_lock); 2430 qp->s_flags |= RVT_S_WAIT_RNR; 2431 to = rvt_aeth_to_usec(aeth); 2432 trace_rvt_rnrnak_add(qp, to); 2433 hrtimer_start(&qp->s_rnr_timer, 2434 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED); 2435 } 2436 EXPORT_SYMBOL(rvt_add_rnr_timer); 2437 2438 /** 2439 * rvt_stop_rc_timers - stop all timers 2440 * @qp - the QP 2441 * stop any pending timers 2442 */ 2443 void rvt_stop_rc_timers(struct rvt_qp *qp) 2444 { 2445 lockdep_assert_held(&qp->s_lock); 2446 /* Remove QP from all timers */ 2447 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 2448 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 2449 del_timer(&qp->s_timer); 2450 hrtimer_try_to_cancel(&qp->s_rnr_timer); 2451 } 2452 } 2453 EXPORT_SYMBOL(rvt_stop_rc_timers); 2454 2455 /** 2456 * rvt_stop_rnr_timer - stop an rnr timer 2457 * @qp - the QP 2458 * 2459 * stop an rnr timer and return if the timer 2460 * had been pending. 2461 */ 2462 static void rvt_stop_rnr_timer(struct rvt_qp *qp) 2463 { 2464 lockdep_assert_held(&qp->s_lock); 2465 /* Remove QP from rnr timer */ 2466 if (qp->s_flags & RVT_S_WAIT_RNR) { 2467 qp->s_flags &= ~RVT_S_WAIT_RNR; 2468 trace_rvt_rnrnak_stop(qp, 0); 2469 } 2470 } 2471 2472 /** 2473 * rvt_del_timers_sync - wait for any timeout routines to exit 2474 * @qp - the QP 2475 */ 2476 void rvt_del_timers_sync(struct rvt_qp *qp) 2477 { 2478 del_timer_sync(&qp->s_timer); 2479 hrtimer_cancel(&qp->s_rnr_timer); 2480 } 2481 EXPORT_SYMBOL(rvt_del_timers_sync); 2482 2483 /** 2484 * This is called from s_timer for missing responses. 2485 */ 2486 static void rvt_rc_timeout(struct timer_list *t) 2487 { 2488 struct rvt_qp *qp = from_timer(qp, t, s_timer); 2489 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 2490 unsigned long flags; 2491 2492 spin_lock_irqsave(&qp->r_lock, flags); 2493 spin_lock(&qp->s_lock); 2494 if (qp->s_flags & RVT_S_TIMER) { 2495 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; 2496 2497 qp->s_flags &= ~RVT_S_TIMER; 2498 rvp->n_rc_timeouts++; 2499 del_timer(&qp->s_timer); 2500 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1); 2501 if (rdi->driver_f.notify_restart_rc) 2502 rdi->driver_f.notify_restart_rc(qp, 2503 qp->s_last_psn + 1, 2504 1); 2505 rdi->driver_f.schedule_send(qp); 2506 } 2507 spin_unlock(&qp->s_lock); 2508 spin_unlock_irqrestore(&qp->r_lock, flags); 2509 } 2510 2511 /* 2512 * This is called from s_timer for RNR timeouts. 2513 */ 2514 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t) 2515 { 2516 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer); 2517 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 2518 unsigned long flags; 2519 2520 spin_lock_irqsave(&qp->s_lock, flags); 2521 rvt_stop_rnr_timer(qp); 2522 trace_rvt_rnrnak_timeout(qp, 0); 2523 rdi->driver_f.schedule_send(qp); 2524 spin_unlock_irqrestore(&qp->s_lock, flags); 2525 return HRTIMER_NORESTART; 2526 } 2527 EXPORT_SYMBOL(rvt_rc_rnr_retry); 2528 2529 /** 2530 * rvt_qp_iter_init - initial for QP iteration 2531 * @rdi: rvt devinfo 2532 * @v: u64 value 2533 * 2534 * This returns an iterator suitable for iterating QPs 2535 * in the system. 2536 * 2537 * The @cb is a user defined callback and @v is a 64 2538 * bit value passed to and relevant for processing in the 2539 * @cb. An example use case would be to alter QP processing 2540 * based on criteria not part of the rvt_qp. 2541 * 2542 * Use cases that require memory allocation to succeed 2543 * must preallocate appropriately. 2544 * 2545 * Return: a pointer to an rvt_qp_iter or NULL 2546 */ 2547 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, 2548 u64 v, 2549 void (*cb)(struct rvt_qp *qp, u64 v)) 2550 { 2551 struct rvt_qp_iter *i; 2552 2553 i = kzalloc(sizeof(*i), GFP_KERNEL); 2554 if (!i) 2555 return NULL; 2556 2557 i->rdi = rdi; 2558 /* number of special QPs (SMI/GSI) for device */ 2559 i->specials = rdi->ibdev.phys_port_cnt * 2; 2560 i->v = v; 2561 i->cb = cb; 2562 2563 return i; 2564 } 2565 EXPORT_SYMBOL(rvt_qp_iter_init); 2566 2567 /** 2568 * rvt_qp_iter_next - return the next QP in iter 2569 * @iter - the iterator 2570 * 2571 * Fine grained QP iterator suitable for use 2572 * with debugfs seq_file mechanisms. 2573 * 2574 * Updates iter->qp with the current QP when the return 2575 * value is 0. 2576 * 2577 * Return: 0 - iter->qp is valid 1 - no more QPs 2578 */ 2579 int rvt_qp_iter_next(struct rvt_qp_iter *iter) 2580 __must_hold(RCU) 2581 { 2582 int n = iter->n; 2583 int ret = 1; 2584 struct rvt_qp *pqp = iter->qp; 2585 struct rvt_qp *qp; 2586 struct rvt_dev_info *rdi = iter->rdi; 2587 2588 /* 2589 * The approach is to consider the special qps 2590 * as additional table entries before the 2591 * real hash table. Since the qp code sets 2592 * the qp->next hash link to NULL, this works just fine. 2593 * 2594 * iter->specials is 2 * # ports 2595 * 2596 * n = 0..iter->specials is the special qp indices 2597 * 2598 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are 2599 * the potential hash bucket entries 2600 * 2601 */ 2602 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) { 2603 if (pqp) { 2604 qp = rcu_dereference(pqp->next); 2605 } else { 2606 if (n < iter->specials) { 2607 struct rvt_ibport *rvp; 2608 int pidx; 2609 2610 pidx = n % rdi->ibdev.phys_port_cnt; 2611 rvp = rdi->ports[pidx]; 2612 qp = rcu_dereference(rvp->qp[n & 1]); 2613 } else { 2614 qp = rcu_dereference( 2615 rdi->qp_dev->qp_table[ 2616 (n - iter->specials)]); 2617 } 2618 } 2619 pqp = qp; 2620 if (qp) { 2621 iter->qp = qp; 2622 iter->n = n; 2623 return 0; 2624 } 2625 } 2626 return ret; 2627 } 2628 EXPORT_SYMBOL(rvt_qp_iter_next); 2629 2630 /** 2631 * rvt_qp_iter - iterate all QPs 2632 * @rdi - rvt devinfo 2633 * @v - a 64 bit value 2634 * @cb - a callback 2635 * 2636 * This provides a way for iterating all QPs. 2637 * 2638 * The @cb is a user defined callback and @v is a 64 2639 * bit value passed to and relevant for processing in the 2640 * cb. An example use case would be to alter QP processing 2641 * based on criteria not part of the rvt_qp. 2642 * 2643 * The code has an internal iterator to simplify 2644 * non seq_file use cases. 2645 */ 2646 void rvt_qp_iter(struct rvt_dev_info *rdi, 2647 u64 v, 2648 void (*cb)(struct rvt_qp *qp, u64 v)) 2649 { 2650 int ret; 2651 struct rvt_qp_iter i = { 2652 .rdi = rdi, 2653 .specials = rdi->ibdev.phys_port_cnt * 2, 2654 .v = v, 2655 .cb = cb 2656 }; 2657 2658 rcu_read_lock(); 2659 do { 2660 ret = rvt_qp_iter_next(&i); 2661 if (!ret) { 2662 rvt_get_qp(i.qp); 2663 rcu_read_unlock(); 2664 i.cb(i.qp, i.v); 2665 rcu_read_lock(); 2666 rvt_put_qp(i.qp); 2667 } 2668 } while (!ret); 2669 rcu_read_unlock(); 2670 } 2671 EXPORT_SYMBOL(rvt_qp_iter); 2672 2673 /* 2674 * This should be called with s_lock held. 2675 */ 2676 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 2677 enum ib_wc_status status) 2678 { 2679 u32 old_last, last; 2680 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 2681 2682 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 2683 return; 2684 2685 last = qp->s_last; 2686 old_last = last; 2687 trace_rvt_qp_send_completion(qp, wqe, last); 2688 if (++last >= qp->s_size) 2689 last = 0; 2690 trace_rvt_qp_send_completion(qp, wqe, last); 2691 qp->s_last = last; 2692 /* See post_send() */ 2693 barrier(); 2694 rvt_put_swqe(wqe); 2695 if (qp->ibqp.qp_type == IB_QPT_UD || 2696 qp->ibqp.qp_type == IB_QPT_SMI || 2697 qp->ibqp.qp_type == IB_QPT_GSI) 2698 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); 2699 2700 rvt_qp_swqe_complete(qp, 2701 wqe, 2702 rdi->wc_opcode[wqe->wr.opcode], 2703 status); 2704 2705 if (qp->s_acked == old_last) 2706 qp->s_acked = last; 2707 if (qp->s_cur == old_last) 2708 qp->s_cur = last; 2709 if (qp->s_tail == old_last) 2710 qp->s_tail = last; 2711 if (qp->state == IB_QPS_SQD && last == qp->s_cur) 2712 qp->s_draining = 0; 2713 } 2714 EXPORT_SYMBOL(rvt_send_complete); 2715 2716 /** 2717 * rvt_copy_sge - copy data to SGE memory 2718 * @qp: associated QP 2719 * @ss: the SGE state 2720 * @data: the data to copy 2721 * @length: the length of the data 2722 * @release: boolean to release MR 2723 * @copy_last: do a separate copy of the last 8 bytes 2724 */ 2725 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, 2726 void *data, u32 length, 2727 bool release, bool copy_last) 2728 { 2729 struct rvt_sge *sge = &ss->sge; 2730 int i; 2731 bool in_last = false; 2732 bool cacheless_copy = false; 2733 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 2734 struct rvt_wss *wss = rdi->wss; 2735 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; 2736 2737 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) { 2738 cacheless_copy = length >= PAGE_SIZE; 2739 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) { 2740 if (length >= PAGE_SIZE) { 2741 /* 2742 * NOTE: this *assumes*: 2743 * o The first vaddr is the dest. 2744 * o If multiple pages, then vaddr is sequential. 2745 */ 2746 wss_insert(wss, sge->vaddr); 2747 if (length >= (2 * PAGE_SIZE)) 2748 wss_insert(wss, (sge->vaddr + PAGE_SIZE)); 2749 2750 cacheless_copy = wss_exceeds_threshold(wss); 2751 } else { 2752 wss_advance_clean_counter(wss); 2753 } 2754 } 2755 2756 if (copy_last) { 2757 if (length > 8) { 2758 length -= 8; 2759 } else { 2760 copy_last = false; 2761 in_last = true; 2762 } 2763 } 2764 2765 again: 2766 while (length) { 2767 u32 len = rvt_get_sge_length(sge, length); 2768 2769 WARN_ON_ONCE(len == 0); 2770 if (unlikely(in_last)) { 2771 /* enforce byte transfer ordering */ 2772 for (i = 0; i < len; i++) 2773 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; 2774 } else if (cacheless_copy) { 2775 cacheless_memcpy(sge->vaddr, data, len); 2776 } else { 2777 memcpy(sge->vaddr, data, len); 2778 } 2779 rvt_update_sge(ss, len, release); 2780 data += len; 2781 length -= len; 2782 } 2783 2784 if (copy_last) { 2785 copy_last = false; 2786 in_last = true; 2787 length = 8; 2788 goto again; 2789 } 2790 } 2791 EXPORT_SYMBOL(rvt_copy_sge); 2792 2793 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp, 2794 struct rvt_qp *sqp) 2795 { 2796 rvp->n_pkt_drops++; 2797 /* 2798 * For RC, the requester would timeout and retry so 2799 * shortcut the timeouts and just signal too many retries. 2800 */ 2801 return sqp->ibqp.qp_type == IB_QPT_RC ? 2802 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS; 2803 } 2804 2805 /** 2806 * ruc_loopback - handle UC and RC loopback requests 2807 * @sqp: the sending QP 2808 * 2809 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI 2810 * Note that although we are single threaded due to the send engine, we still 2811 * have to protect against post_send(). We don't have to worry about 2812 * receive interrupts since this is a connected protocol and all packets 2813 * will pass through here. 2814 */ 2815 void rvt_ruc_loopback(struct rvt_qp *sqp) 2816 { 2817 struct rvt_ibport *rvp = NULL; 2818 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device); 2819 struct rvt_qp *qp; 2820 struct rvt_swqe *wqe; 2821 struct rvt_sge *sge; 2822 unsigned long flags; 2823 struct ib_wc wc; 2824 u64 sdata; 2825 atomic64_t *maddr; 2826 enum ib_wc_status send_status; 2827 bool release; 2828 int ret; 2829 bool copy_last = false; 2830 int local_ops = 0; 2831 2832 rcu_read_lock(); 2833 rvp = rdi->ports[sqp->port_num - 1]; 2834 2835 /* 2836 * Note that we check the responder QP state after 2837 * checking the requester's state. 2838 */ 2839 2840 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp, 2841 sqp->remote_qpn); 2842 2843 spin_lock_irqsave(&sqp->s_lock, flags); 2844 2845 /* Return if we are already busy processing a work request. */ 2846 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || 2847 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 2848 goto unlock; 2849 2850 sqp->s_flags |= RVT_S_BUSY; 2851 2852 again: 2853 if (sqp->s_last == READ_ONCE(sqp->s_head)) 2854 goto clr_busy; 2855 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); 2856 2857 /* Return if it is not OK to start a new work request. */ 2858 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { 2859 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND)) 2860 goto clr_busy; 2861 /* We are in the error state, flush the work request. */ 2862 send_status = IB_WC_WR_FLUSH_ERR; 2863 goto flush_send; 2864 } 2865 2866 /* 2867 * We can rely on the entry not changing without the s_lock 2868 * being held until we update s_last. 2869 * We increment s_cur to indicate s_last is in progress. 2870 */ 2871 if (sqp->s_last == sqp->s_cur) { 2872 if (++sqp->s_cur >= sqp->s_size) 2873 sqp->s_cur = 0; 2874 } 2875 spin_unlock_irqrestore(&sqp->s_lock, flags); 2876 2877 if (!qp) { 2878 send_status = loopback_qp_drop(rvp, sqp); 2879 goto serr_no_r_lock; 2880 } 2881 spin_lock_irqsave(&qp->r_lock, flags); 2882 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) || 2883 qp->ibqp.qp_type != sqp->ibqp.qp_type) { 2884 send_status = loopback_qp_drop(rvp, sqp); 2885 goto serr; 2886 } 2887 2888 memset(&wc, 0, sizeof(wc)); 2889 send_status = IB_WC_SUCCESS; 2890 2891 release = true; 2892 sqp->s_sge.sge = wqe->sg_list[0]; 2893 sqp->s_sge.sg_list = wqe->sg_list + 1; 2894 sqp->s_sge.num_sge = wqe->wr.num_sge; 2895 sqp->s_len = wqe->length; 2896 switch (wqe->wr.opcode) { 2897 case IB_WR_REG_MR: 2898 goto send_comp; 2899 2900 case IB_WR_LOCAL_INV: 2901 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { 2902 if (rvt_invalidate_rkey(sqp, 2903 wqe->wr.ex.invalidate_rkey)) 2904 send_status = IB_WC_LOC_PROT_ERR; 2905 local_ops = 1; 2906 } 2907 goto send_comp; 2908 2909 case IB_WR_SEND_WITH_INV: 2910 case IB_WR_SEND_WITH_IMM: 2911 case IB_WR_SEND: 2912 ret = rvt_get_rwqe(qp, false); 2913 if (ret < 0) 2914 goto op_err; 2915 if (!ret) 2916 goto rnr_nak; 2917 if (wqe->length > qp->r_len) 2918 goto inv_err; 2919 switch (wqe->wr.opcode) { 2920 case IB_WR_SEND_WITH_INV: 2921 if (!rvt_invalidate_rkey(qp, 2922 wqe->wr.ex.invalidate_rkey)) { 2923 wc.wc_flags = IB_WC_WITH_INVALIDATE; 2924 wc.ex.invalidate_rkey = 2925 wqe->wr.ex.invalidate_rkey; 2926 } 2927 break; 2928 case IB_WR_SEND_WITH_IMM: 2929 wc.wc_flags = IB_WC_WITH_IMM; 2930 wc.ex.imm_data = wqe->wr.ex.imm_data; 2931 break; 2932 default: 2933 break; 2934 } 2935 break; 2936 2937 case IB_WR_RDMA_WRITE_WITH_IMM: 2938 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 2939 goto inv_err; 2940 wc.wc_flags = IB_WC_WITH_IMM; 2941 wc.ex.imm_data = wqe->wr.ex.imm_data; 2942 ret = rvt_get_rwqe(qp, true); 2943 if (ret < 0) 2944 goto op_err; 2945 if (!ret) 2946 goto rnr_nak; 2947 /* skip copy_last set and qp_access_flags recheck */ 2948 goto do_write; 2949 case IB_WR_RDMA_WRITE: 2950 copy_last = rvt_is_user_qp(qp); 2951 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 2952 goto inv_err; 2953 do_write: 2954 if (wqe->length == 0) 2955 break; 2956 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, 2957 wqe->rdma_wr.remote_addr, 2958 wqe->rdma_wr.rkey, 2959 IB_ACCESS_REMOTE_WRITE))) 2960 goto acc_err; 2961 qp->r_sge.sg_list = NULL; 2962 qp->r_sge.num_sge = 1; 2963 qp->r_sge.total_len = wqe->length; 2964 break; 2965 2966 case IB_WR_RDMA_READ: 2967 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 2968 goto inv_err; 2969 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, 2970 wqe->rdma_wr.remote_addr, 2971 wqe->rdma_wr.rkey, 2972 IB_ACCESS_REMOTE_READ))) 2973 goto acc_err; 2974 release = false; 2975 sqp->s_sge.sg_list = NULL; 2976 sqp->s_sge.num_sge = 1; 2977 qp->r_sge.sge = wqe->sg_list[0]; 2978 qp->r_sge.sg_list = wqe->sg_list + 1; 2979 qp->r_sge.num_sge = wqe->wr.num_sge; 2980 qp->r_sge.total_len = wqe->length; 2981 break; 2982 2983 case IB_WR_ATOMIC_CMP_AND_SWP: 2984 case IB_WR_ATOMIC_FETCH_AND_ADD: 2985 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 2986 goto inv_err; 2987 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 2988 wqe->atomic_wr.remote_addr, 2989 wqe->atomic_wr.rkey, 2990 IB_ACCESS_REMOTE_ATOMIC))) 2991 goto acc_err; 2992 /* Perform atomic OP and save result. */ 2993 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; 2994 sdata = wqe->atomic_wr.compare_add; 2995 *(u64 *)sqp->s_sge.sge.vaddr = 2996 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? 2997 (u64)atomic64_add_return(sdata, maddr) - sdata : 2998 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, 2999 sdata, wqe->atomic_wr.swap); 3000 rvt_put_mr(qp->r_sge.sge.mr); 3001 qp->r_sge.num_sge = 0; 3002 goto send_comp; 3003 3004 default: 3005 send_status = IB_WC_LOC_QP_OP_ERR; 3006 goto serr; 3007 } 3008 3009 sge = &sqp->s_sge.sge; 3010 while (sqp->s_len) { 3011 u32 len = rvt_get_sge_length(sge, sqp->s_len); 3012 3013 WARN_ON_ONCE(len == 0); 3014 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, 3015 len, release, copy_last); 3016 rvt_update_sge(&sqp->s_sge, len, !release); 3017 sqp->s_len -= len; 3018 } 3019 if (release) 3020 rvt_put_ss(&qp->r_sge); 3021 3022 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 3023 goto send_comp; 3024 3025 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) 3026 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 3027 else 3028 wc.opcode = IB_WC_RECV; 3029 wc.wr_id = qp->r_wr_id; 3030 wc.status = IB_WC_SUCCESS; 3031 wc.byte_len = wqe->length; 3032 wc.qp = &qp->ibqp; 3033 wc.src_qp = qp->remote_qpn; 3034 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; 3035 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); 3036 wc.port_num = 1; 3037 /* Signal completion event if the solicited bit is set. */ 3038 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 3039 wqe->wr.send_flags & IB_SEND_SOLICITED); 3040 3041 send_comp: 3042 spin_unlock_irqrestore(&qp->r_lock, flags); 3043 spin_lock_irqsave(&sqp->s_lock, flags); 3044 rvp->n_loop_pkts++; 3045 flush_send: 3046 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 3047 rvt_send_complete(sqp, wqe, send_status); 3048 if (local_ops) { 3049 atomic_dec(&sqp->local_ops_pending); 3050 local_ops = 0; 3051 } 3052 goto again; 3053 3054 rnr_nak: 3055 /* Handle RNR NAK */ 3056 if (qp->ibqp.qp_type == IB_QPT_UC) 3057 goto send_comp; 3058 rvp->n_rnr_naks++; 3059 /* 3060 * Note: we don't need the s_lock held since the BUSY flag 3061 * makes this single threaded. 3062 */ 3063 if (sqp->s_rnr_retry == 0) { 3064 send_status = IB_WC_RNR_RETRY_EXC_ERR; 3065 goto serr; 3066 } 3067 if (sqp->s_rnr_retry_cnt < 7) 3068 sqp->s_rnr_retry--; 3069 spin_unlock_irqrestore(&qp->r_lock, flags); 3070 spin_lock_irqsave(&sqp->s_lock, flags); 3071 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) 3072 goto clr_busy; 3073 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer << 3074 IB_AETH_CREDIT_SHIFT); 3075 goto clr_busy; 3076 3077 op_err: 3078 send_status = IB_WC_REM_OP_ERR; 3079 wc.status = IB_WC_LOC_QP_OP_ERR; 3080 goto err; 3081 3082 inv_err: 3083 send_status = 3084 sqp->ibqp.qp_type == IB_QPT_RC ? 3085 IB_WC_REM_INV_REQ_ERR : 3086 IB_WC_SUCCESS; 3087 wc.status = IB_WC_LOC_QP_OP_ERR; 3088 goto err; 3089 3090 acc_err: 3091 send_status = IB_WC_REM_ACCESS_ERR; 3092 wc.status = IB_WC_LOC_PROT_ERR; 3093 err: 3094 /* responder goes to error state */ 3095 rvt_rc_error(qp, wc.status); 3096 3097 serr: 3098 spin_unlock_irqrestore(&qp->r_lock, flags); 3099 serr_no_r_lock: 3100 spin_lock_irqsave(&sqp->s_lock, flags); 3101 rvt_send_complete(sqp, wqe, send_status); 3102 if (sqp->ibqp.qp_type == IB_QPT_RC) { 3103 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); 3104 3105 sqp->s_flags &= ~RVT_S_BUSY; 3106 spin_unlock_irqrestore(&sqp->s_lock, flags); 3107 if (lastwqe) { 3108 struct ib_event ev; 3109 3110 ev.device = sqp->ibqp.device; 3111 ev.element.qp = &sqp->ibqp; 3112 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 3113 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); 3114 } 3115 goto done; 3116 } 3117 clr_busy: 3118 sqp->s_flags &= ~RVT_S_BUSY; 3119 unlock: 3120 spin_unlock_irqrestore(&sqp->s_lock, flags); 3121 done: 3122 rcu_read_unlock(); 3123 } 3124 EXPORT_SYMBOL(rvt_ruc_loopback); 3125