1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/err.h> 49 #include <linux/vmalloc.h> 50 #include <linux/hash.h> 51 #include <linux/module.h> 52 #include <linux/seq_file.h> 53 #include <rdma/rdma_vt.h> 54 #include <rdma/rdmavt_qp.h> 55 #include <rdma/ib_verbs.h> 56 57 #include "hfi.h" 58 #include "qp.h" 59 #include "trace.h" 60 #include "verbs_txreq.h" 61 62 unsigned int hfi1_qp_table_size = 256; 63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); 64 MODULE_PARM_DESC(qp_table_size, "QP table size"); 65 66 static void flush_tx_list(struct rvt_qp *qp); 67 static int iowait_sleep( 68 struct sdma_engine *sde, 69 struct iowait *wait, 70 struct sdma_txreq *stx, 71 unsigned seq); 72 static void iowait_wakeup(struct iowait *wait, int reason); 73 static void iowait_sdma_drained(struct iowait *wait); 74 static void qp_pio_drain(struct rvt_qp *qp); 75 76 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 77 struct rvt_qpn_map *map, unsigned off) 78 { 79 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 80 } 81 82 /* 83 * Convert the AETH credit code into the number of credits. 84 */ 85 static const u16 credit_table[31] = { 86 0, /* 0 */ 87 1, /* 1 */ 88 2, /* 2 */ 89 3, /* 3 */ 90 4, /* 4 */ 91 6, /* 5 */ 92 8, /* 6 */ 93 12, /* 7 */ 94 16, /* 8 */ 95 24, /* 9 */ 96 32, /* A */ 97 48, /* B */ 98 64, /* C */ 99 96, /* D */ 100 128, /* E */ 101 192, /* F */ 102 256, /* 10 */ 103 384, /* 11 */ 104 512, /* 12 */ 105 768, /* 13 */ 106 1024, /* 14 */ 107 1536, /* 15 */ 108 2048, /* 16 */ 109 3072, /* 17 */ 110 4096, /* 18 */ 111 6144, /* 19 */ 112 8192, /* 1A */ 113 12288, /* 1B */ 114 16384, /* 1C */ 115 24576, /* 1D */ 116 32768 /* 1E */ 117 }; 118 119 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { 120 [IB_WR_RDMA_WRITE] = { 121 .length = sizeof(struct ib_rdma_wr), 122 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 123 }, 124 125 [IB_WR_RDMA_READ] = { 126 .length = sizeof(struct ib_rdma_wr), 127 .qpt_support = BIT(IB_QPT_RC), 128 .flags = RVT_OPERATION_ATOMIC, 129 }, 130 131 [IB_WR_ATOMIC_CMP_AND_SWP] = { 132 .length = sizeof(struct ib_atomic_wr), 133 .qpt_support = BIT(IB_QPT_RC), 134 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 135 }, 136 137 [IB_WR_ATOMIC_FETCH_AND_ADD] = { 138 .length = sizeof(struct ib_atomic_wr), 139 .qpt_support = BIT(IB_QPT_RC), 140 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 141 }, 142 143 [IB_WR_RDMA_WRITE_WITH_IMM] = { 144 .length = sizeof(struct ib_rdma_wr), 145 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 146 }, 147 148 [IB_WR_SEND] = { 149 .length = sizeof(struct ib_send_wr), 150 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 151 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 152 }, 153 154 [IB_WR_SEND_WITH_IMM] = { 155 .length = sizeof(struct ib_send_wr), 156 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 157 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 158 }, 159 160 [IB_WR_REG_MR] = { 161 .length = sizeof(struct ib_reg_wr), 162 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 163 .flags = RVT_OPERATION_LOCAL, 164 }, 165 166 [IB_WR_LOCAL_INV] = { 167 .length = sizeof(struct ib_send_wr), 168 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 169 .flags = RVT_OPERATION_LOCAL, 170 }, 171 172 [IB_WR_SEND_WITH_INV] = { 173 .length = sizeof(struct ib_send_wr), 174 .qpt_support = BIT(IB_QPT_RC), 175 }, 176 177 }; 178 179 static void flush_tx_list(struct rvt_qp *qp) 180 { 181 struct hfi1_qp_priv *priv = qp->priv; 182 183 while (!list_empty(&priv->s_iowait.tx_head)) { 184 struct sdma_txreq *tx; 185 186 tx = list_first_entry( 187 &priv->s_iowait.tx_head, 188 struct sdma_txreq, 189 list); 190 list_del_init(&tx->list); 191 hfi1_put_txreq( 192 container_of(tx, struct verbs_txreq, txreq)); 193 } 194 } 195 196 static void flush_iowait(struct rvt_qp *qp) 197 { 198 struct hfi1_qp_priv *priv = qp->priv; 199 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 200 unsigned long flags; 201 202 write_seqlock_irqsave(&dev->iowait_lock, flags); 203 if (!list_empty(&priv->s_iowait.list)) { 204 list_del_init(&priv->s_iowait.list); 205 if (atomic_dec_and_test(&qp->refcount)) 206 wake_up(&qp->wait); 207 } 208 write_sequnlock_irqrestore(&dev->iowait_lock, flags); 209 } 210 211 static inline int opa_mtu_enum_to_int(int mtu) 212 { 213 switch (mtu) { 214 case OPA_MTU_8192: return 8192; 215 case OPA_MTU_10240: return 10240; 216 default: return -1; 217 } 218 } 219 220 /** 221 * This function is what we would push to the core layer if we wanted to be a 222 * "first class citizen". Instead we hide this here and rely on Verbs ULPs 223 * to blindly pass the MTU enum value from the PathRecord to us. 224 */ 225 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) 226 { 227 int val; 228 229 /* Constraining 10KB packets to 8KB packets */ 230 if (mtu == (enum ib_mtu)OPA_MTU_10240) 231 mtu = OPA_MTU_8192; 232 val = opa_mtu_enum_to_int((int)mtu); 233 if (val > 0) 234 return val; 235 return ib_mtu_enum_to_int(mtu); 236 } 237 238 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 239 int attr_mask, struct ib_udata *udata) 240 { 241 struct ib_qp *ibqp = &qp->ibqp; 242 struct hfi1_ibdev *dev = to_idev(ibqp->device); 243 struct hfi1_devdata *dd = dd_from_dev(dev); 244 u8 sc; 245 246 if (attr_mask & IB_QP_AV) { 247 sc = ah_to_sc(ibqp->device, &attr->ah_attr); 248 if (sc == 0xf) 249 return -EINVAL; 250 251 if (!qp_to_sdma_engine(qp, sc) && 252 dd->flags & HFI1_HAS_SEND_DMA) 253 return -EINVAL; 254 255 if (!qp_to_send_context(qp, sc)) 256 return -EINVAL; 257 } 258 259 if (attr_mask & IB_QP_ALT_PATH) { 260 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); 261 if (sc == 0xf) 262 return -EINVAL; 263 264 if (!qp_to_sdma_engine(qp, sc) && 265 dd->flags & HFI1_HAS_SEND_DMA) 266 return -EINVAL; 267 268 if (!qp_to_send_context(qp, sc)) 269 return -EINVAL; 270 } 271 272 return 0; 273 } 274 275 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 276 int attr_mask, struct ib_udata *udata) 277 { 278 struct ib_qp *ibqp = &qp->ibqp; 279 struct hfi1_qp_priv *priv = qp->priv; 280 281 if (attr_mask & IB_QP_AV) { 282 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 283 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 284 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 285 } 286 287 if (attr_mask & IB_QP_PATH_MIG_STATE && 288 attr->path_mig_state == IB_MIG_MIGRATED && 289 qp->s_mig_state == IB_MIG_ARMED) { 290 qp->s_flags |= RVT_S_AHG_CLEAR; 291 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 292 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 293 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 294 } 295 } 296 297 /** 298 * hfi1_check_send_wqe - validate wqe 299 * @qp - The qp 300 * @wqe - The built wqe 301 * 302 * validate wqe. This is called 303 * prior to inserting the wqe into 304 * the ring but after the wqe has been 305 * setup. 306 * 307 * Returns 0 on success, -EINVAL on failure 308 * 309 */ 310 int hfi1_check_send_wqe(struct rvt_qp *qp, 311 struct rvt_swqe *wqe) 312 { 313 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 314 struct rvt_ah *ah; 315 316 switch (qp->ibqp.qp_type) { 317 case IB_QPT_RC: 318 case IB_QPT_UC: 319 if (wqe->length > 0x80000000U) 320 return -EINVAL; 321 break; 322 case IB_QPT_SMI: 323 ah = ibah_to_rvtah(wqe->ud_wr.ah); 324 if (wqe->length > (1 << ah->log_pmtu)) 325 return -EINVAL; 326 break; 327 case IB_QPT_GSI: 328 case IB_QPT_UD: 329 ah = ibah_to_rvtah(wqe->ud_wr.ah); 330 if (wqe->length > (1 << ah->log_pmtu)) 331 return -EINVAL; 332 if (ibp->sl_to_sc[ah->attr.sl] == 0xf) 333 return -EINVAL; 334 default: 335 break; 336 } 337 return wqe->length <= piothreshold; 338 } 339 340 /** 341 * hfi1_compute_aeth - compute the AETH (syndrome + MSN) 342 * @qp: the queue pair to compute the AETH for 343 * 344 * Returns the AETH. 345 */ 346 __be32 hfi1_compute_aeth(struct rvt_qp *qp) 347 { 348 u32 aeth = qp->r_msn & HFI1_MSN_MASK; 349 350 if (qp->ibqp.srq) { 351 /* 352 * Shared receive queues don't generate credits. 353 * Set the credit field to the invalid value. 354 */ 355 aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT; 356 } else { 357 u32 min, max, x; 358 u32 credits; 359 struct rvt_rwq *wq = qp->r_rq.wq; 360 u32 head; 361 u32 tail; 362 363 /* sanity check pointers before trusting them */ 364 head = wq->head; 365 if (head >= qp->r_rq.size) 366 head = 0; 367 tail = wq->tail; 368 if (tail >= qp->r_rq.size) 369 tail = 0; 370 /* 371 * Compute the number of credits available (RWQEs). 372 * There is a small chance that the pair of reads are 373 * not atomic, which is OK, since the fuzziness is 374 * resolved as further ACKs go out. 375 */ 376 credits = head - tail; 377 if ((int)credits < 0) 378 credits += qp->r_rq.size; 379 /* 380 * Binary search the credit table to find the code to 381 * use. 382 */ 383 min = 0; 384 max = 31; 385 for (;;) { 386 x = (min + max) / 2; 387 if (credit_table[x] == credits) 388 break; 389 if (credit_table[x] > credits) { 390 max = x; 391 } else { 392 if (min == x) 393 break; 394 min = x; 395 } 396 } 397 aeth |= x << HFI1_AETH_CREDIT_SHIFT; 398 } 399 return cpu_to_be32(aeth); 400 } 401 402 /** 403 * _hfi1_schedule_send - schedule progress 404 * @qp: the QP 405 * 406 * This schedules qp progress w/o regard to the s_flags. 407 * 408 * It is only used in the post send, which doesn't hold 409 * the s_lock. 410 */ 411 void _hfi1_schedule_send(struct rvt_qp *qp) 412 { 413 struct hfi1_qp_priv *priv = qp->priv; 414 struct hfi1_ibport *ibp = 415 to_iport(qp->ibqp.device, qp->port_num); 416 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 417 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 418 419 iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, 420 priv->s_sde ? 421 priv->s_sde->cpu : 422 cpumask_first(cpumask_of_node(dd->node))); 423 } 424 425 static void qp_pio_drain(struct rvt_qp *qp) 426 { 427 struct hfi1_ibdev *dev; 428 struct hfi1_qp_priv *priv = qp->priv; 429 430 if (!priv->s_sendcontext) 431 return; 432 dev = to_idev(qp->ibqp.device); 433 while (iowait_pio_pending(&priv->s_iowait)) { 434 write_seqlock_irq(&dev->iowait_lock); 435 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); 436 write_sequnlock_irq(&dev->iowait_lock); 437 iowait_pio_drain(&priv->s_iowait); 438 write_seqlock_irq(&dev->iowait_lock); 439 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); 440 write_sequnlock_irq(&dev->iowait_lock); 441 } 442 } 443 444 /** 445 * hfi1_schedule_send - schedule progress 446 * @qp: the QP 447 * 448 * This schedules qp progress and caller should hold 449 * the s_lock. 450 */ 451 void hfi1_schedule_send(struct rvt_qp *qp) 452 { 453 if (hfi1_send_ok(qp)) 454 _hfi1_schedule_send(qp); 455 } 456 457 /** 458 * hfi1_get_credit - flush the send work queue of a QP 459 * @qp: the qp who's send work queue to flush 460 * @aeth: the Acknowledge Extended Transport Header 461 * 462 * The QP s_lock should be held. 463 */ 464 void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) 465 { 466 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK; 467 468 /* 469 * If the credit is invalid, we can send 470 * as many packets as we like. Otherwise, we have to 471 * honor the credit field. 472 */ 473 if (credit == HFI1_AETH_CREDIT_INVAL) { 474 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { 475 qp->s_flags |= RVT_S_UNLIMITED_CREDIT; 476 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { 477 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; 478 hfi1_schedule_send(qp); 479 } 480 } 481 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { 482 /* Compute new LSN (i.e., MSN + credit) */ 483 credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK; 484 if (cmp_msn(credit, qp->s_lsn) > 0) { 485 qp->s_lsn = credit; 486 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { 487 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; 488 hfi1_schedule_send(qp); 489 } 490 } 491 } 492 } 493 494 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) 495 { 496 unsigned long flags; 497 498 spin_lock_irqsave(&qp->s_lock, flags); 499 if (qp->s_flags & flag) { 500 qp->s_flags &= ~flag; 501 trace_hfi1_qpwakeup(qp, flag); 502 hfi1_schedule_send(qp); 503 } 504 spin_unlock_irqrestore(&qp->s_lock, flags); 505 /* Notify hfi1_destroy_qp() if it is waiting. */ 506 if (atomic_dec_and_test(&qp->refcount)) 507 wake_up(&qp->wait); 508 } 509 510 static int iowait_sleep( 511 struct sdma_engine *sde, 512 struct iowait *wait, 513 struct sdma_txreq *stx, 514 unsigned seq) 515 { 516 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); 517 struct rvt_qp *qp; 518 struct hfi1_qp_priv *priv; 519 unsigned long flags; 520 int ret = 0; 521 struct hfi1_ibdev *dev; 522 523 qp = tx->qp; 524 priv = qp->priv; 525 526 spin_lock_irqsave(&qp->s_lock, flags); 527 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 528 /* 529 * If we couldn't queue the DMA request, save the info 530 * and try again later rather than destroying the 531 * buffer and undoing the side effects of the copy. 532 */ 533 /* Make a common routine? */ 534 dev = &sde->dd->verbs_dev; 535 list_add_tail(&stx->list, &wait->tx_head); 536 write_seqlock(&dev->iowait_lock); 537 if (sdma_progress(sde, seq, stx)) 538 goto eagain; 539 if (list_empty(&priv->s_iowait.list)) { 540 struct hfi1_ibport *ibp = 541 to_iport(qp->ibqp.device, qp->port_num); 542 543 ibp->rvp.n_dmawait++; 544 qp->s_flags |= RVT_S_WAIT_DMA_DESC; 545 list_add_tail(&priv->s_iowait.list, &sde->dmawait); 546 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); 547 atomic_inc(&qp->refcount); 548 } 549 write_sequnlock(&dev->iowait_lock); 550 qp->s_flags &= ~RVT_S_BUSY; 551 spin_unlock_irqrestore(&qp->s_lock, flags); 552 ret = -EBUSY; 553 } else { 554 spin_unlock_irqrestore(&qp->s_lock, flags); 555 hfi1_put_txreq(tx); 556 } 557 return ret; 558 eagain: 559 write_sequnlock(&dev->iowait_lock); 560 spin_unlock_irqrestore(&qp->s_lock, flags); 561 list_del_init(&stx->list); 562 return -EAGAIN; 563 } 564 565 static void iowait_wakeup(struct iowait *wait, int reason) 566 { 567 struct rvt_qp *qp = iowait_to_qp(wait); 568 569 WARN_ON(reason != SDMA_AVAIL_REASON); 570 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); 571 } 572 573 static void iowait_sdma_drained(struct iowait *wait) 574 { 575 struct rvt_qp *qp = iowait_to_qp(wait); 576 unsigned long flags; 577 578 /* 579 * This happens when the send engine notes 580 * a QP in the error state and cannot 581 * do the flush work until that QP's 582 * sdma work has finished. 583 */ 584 spin_lock_irqsave(&qp->s_lock, flags); 585 if (qp->s_flags & RVT_S_WAIT_DMA) { 586 qp->s_flags &= ~RVT_S_WAIT_DMA; 587 hfi1_schedule_send(qp); 588 } 589 spin_unlock_irqrestore(&qp->s_lock, flags); 590 } 591 592 /** 593 * 594 * qp_to_sdma_engine - map a qp to a send engine 595 * @qp: the QP 596 * @sc5: the 5 bit sc 597 * 598 * Return: 599 * A send engine for the qp or NULL for SMI type qp. 600 */ 601 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) 602 { 603 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 604 struct sdma_engine *sde; 605 606 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 607 return NULL; 608 switch (qp->ibqp.qp_type) { 609 case IB_QPT_SMI: 610 return NULL; 611 default: 612 break; 613 } 614 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); 615 return sde; 616 } 617 618 /* 619 * qp_to_send_context - map a qp to a send context 620 * @qp: the QP 621 * @sc5: the 5 bit sc 622 * 623 * Return: 624 * A send context for the qp 625 */ 626 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) 627 { 628 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 629 630 switch (qp->ibqp.qp_type) { 631 case IB_QPT_SMI: 632 /* SMA packets to VL15 */ 633 return dd->vld[15].sc; 634 default: 635 break; 636 } 637 638 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, 639 sc5); 640 } 641 642 struct qp_iter { 643 struct hfi1_ibdev *dev; 644 struct rvt_qp *qp; 645 int specials; 646 int n; 647 }; 648 649 struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) 650 { 651 struct qp_iter *iter; 652 653 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 654 if (!iter) 655 return NULL; 656 657 iter->dev = dev; 658 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; 659 if (qp_iter_next(iter)) { 660 kfree(iter); 661 return NULL; 662 } 663 664 return iter; 665 } 666 667 int qp_iter_next(struct qp_iter *iter) 668 { 669 struct hfi1_ibdev *dev = iter->dev; 670 int n = iter->n; 671 int ret = 1; 672 struct rvt_qp *pqp = iter->qp; 673 struct rvt_qp *qp; 674 675 /* 676 * The approach is to consider the special qps 677 * as an additional table entries before the 678 * real hash table. Since the qp code sets 679 * the qp->next hash link to NULL, this works just fine. 680 * 681 * iter->specials is 2 * # ports 682 * 683 * n = 0..iter->specials is the special qp indices 684 * 685 * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are 686 * the potential hash bucket entries 687 * 688 */ 689 for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) { 690 if (pqp) { 691 qp = rcu_dereference(pqp->next); 692 } else { 693 if (n < iter->specials) { 694 struct hfi1_pportdata *ppd; 695 struct hfi1_ibport *ibp; 696 int pidx; 697 698 pidx = n % dev->rdi.ibdev.phys_port_cnt; 699 ppd = &dd_from_dev(dev)->pport[pidx]; 700 ibp = &ppd->ibport_data; 701 702 if (!(n & 1)) 703 qp = rcu_dereference(ibp->rvp.qp[0]); 704 else 705 qp = rcu_dereference(ibp->rvp.qp[1]); 706 } else { 707 qp = rcu_dereference( 708 dev->rdi.qp_dev->qp_table[ 709 (n - iter->specials)]); 710 } 711 } 712 pqp = qp; 713 if (qp) { 714 iter->qp = qp; 715 iter->n = n; 716 return 0; 717 } 718 } 719 return ret; 720 } 721 722 static const char * const qp_type_str[] = { 723 "SMI", "GSI", "RC", "UC", "UD", 724 }; 725 726 static int qp_idle(struct rvt_qp *qp) 727 { 728 return 729 qp->s_last == qp->s_acked && 730 qp->s_acked == qp->s_cur && 731 qp->s_cur == qp->s_tail && 732 qp->s_tail == qp->s_head; 733 } 734 735 void qp_iter_print(struct seq_file *s, struct qp_iter *iter) 736 { 737 struct rvt_swqe *wqe; 738 struct rvt_qp *qp = iter->qp; 739 struct hfi1_qp_priv *priv = qp->priv; 740 struct sdma_engine *sde; 741 struct send_context *send_context; 742 743 sde = qp_to_sdma_engine(qp, priv->s_sc); 744 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 745 send_context = qp_to_send_context(qp, priv->s_sc); 746 seq_printf(s, 747 "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n", 748 iter->n, 749 qp_idle(qp) ? "I" : "B", 750 qp->ibqp.qp_num, 751 atomic_read(&qp->refcount), 752 qp_type_str[qp->ibqp.qp_type], 753 qp->state, 754 wqe ? wqe->wr.opcode : 0, 755 qp->s_hdrwords, 756 qp->s_flags, 757 iowait_sdma_pending(&priv->s_iowait), 758 iowait_pio_pending(&priv->s_iowait), 759 !list_empty(&priv->s_iowait.list), 760 qp->timeout, 761 wqe ? wqe->ssn : 0, 762 qp->s_lsn, 763 qp->s_last_psn, 764 qp->s_psn, qp->s_next_psn, 765 qp->s_sending_psn, qp->s_sending_hpsn, 766 qp->s_last, qp->s_acked, qp->s_cur, 767 qp->s_tail, qp->s_head, qp->s_size, 768 qp->s_avail, 769 qp->remote_qpn, 770 qp->remote_ah_attr.dlid, 771 qp->remote_ah_attr.sl, 772 qp->pmtu, 773 qp->s_retry, 774 qp->s_retry_cnt, 775 qp->s_rnr_retry_cnt, 776 sde, 777 sde ? sde->this_idx : 0, 778 send_context, 779 send_context ? send_context->sw_index : 0, 780 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, 781 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, 782 qp->pid); 783 } 784 785 void qp_comm_est(struct rvt_qp *qp) 786 { 787 qp->r_flags |= RVT_R_COMM_EST; 788 if (qp->ibqp.event_handler) { 789 struct ib_event ev; 790 791 ev.device = qp->ibqp.device; 792 ev.element.qp = &qp->ibqp; 793 ev.event = IB_EVENT_COMM_EST; 794 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 795 } 796 } 797 798 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 799 gfp_t gfp) 800 { 801 struct hfi1_qp_priv *priv; 802 803 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); 804 if (!priv) 805 return ERR_PTR(-ENOMEM); 806 807 priv->owner = qp; 808 809 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, 810 rdi->dparms.node); 811 if (!priv->s_ahg) { 812 kfree(priv); 813 return ERR_PTR(-ENOMEM); 814 } 815 setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp); 816 qp->s_timer.function = hfi1_rc_timeout; 817 return priv; 818 } 819 820 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) 821 { 822 struct hfi1_qp_priv *priv = qp->priv; 823 824 kfree(priv->s_ahg); 825 kfree(priv); 826 } 827 828 unsigned free_all_qps(struct rvt_dev_info *rdi) 829 { 830 struct hfi1_ibdev *verbs_dev = container_of(rdi, 831 struct hfi1_ibdev, 832 rdi); 833 struct hfi1_devdata *dd = container_of(verbs_dev, 834 struct hfi1_devdata, 835 verbs_dev); 836 int n; 837 unsigned qp_inuse = 0; 838 839 for (n = 0; n < dd->num_pports; n++) { 840 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; 841 842 rcu_read_lock(); 843 if (rcu_dereference(ibp->rvp.qp[0])) 844 qp_inuse++; 845 if (rcu_dereference(ibp->rvp.qp[1])) 846 qp_inuse++; 847 rcu_read_unlock(); 848 } 849 850 return qp_inuse; 851 } 852 853 void flush_qp_waiters(struct rvt_qp *qp) 854 { 855 flush_iowait(qp); 856 hfi1_stop_rc_timers(qp); 857 } 858 859 void stop_send_queue(struct rvt_qp *qp) 860 { 861 struct hfi1_qp_priv *priv = qp->priv; 862 863 cancel_work_sync(&priv->s_iowait.iowork); 864 hfi1_del_timers_sync(qp); 865 } 866 867 void quiesce_qp(struct rvt_qp *qp) 868 { 869 struct hfi1_qp_priv *priv = qp->priv; 870 871 iowait_sdma_drain(&priv->s_iowait); 872 qp_pio_drain(qp); 873 flush_tx_list(qp); 874 } 875 876 void notify_qp_reset(struct rvt_qp *qp) 877 { 878 struct hfi1_qp_priv *priv = qp->priv; 879 880 iowait_init( 881 &priv->s_iowait, 882 1, 883 _hfi1_do_send, 884 iowait_sleep, 885 iowait_wakeup, 886 iowait_sdma_drained); 887 priv->r_adefered = 0; 888 clear_ahg(qp); 889 } 890 891 /* 892 * Switch to alternate path. 893 * The QP s_lock should be held and interrupts disabled. 894 */ 895 void hfi1_migrate_qp(struct rvt_qp *qp) 896 { 897 struct hfi1_qp_priv *priv = qp->priv; 898 struct ib_event ev; 899 900 qp->s_mig_state = IB_MIG_MIGRATED; 901 qp->remote_ah_attr = qp->alt_ah_attr; 902 qp->port_num = qp->alt_ah_attr.port_num; 903 qp->s_pkey_index = qp->s_alt_pkey_index; 904 qp->s_flags |= RVT_S_AHG_CLEAR; 905 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); 906 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 907 908 ev.device = qp->ibqp.device; 909 ev.element.qp = &qp->ibqp; 910 ev.event = IB_EVENT_PATH_MIG; 911 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 912 } 913 914 int mtu_to_path_mtu(u32 mtu) 915 { 916 return mtu_to_enum(mtu, OPA_MTU_8192); 917 } 918 919 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) 920 { 921 u32 mtu; 922 struct hfi1_ibdev *verbs_dev = container_of(rdi, 923 struct hfi1_ibdev, 924 rdi); 925 struct hfi1_devdata *dd = container_of(verbs_dev, 926 struct hfi1_devdata, 927 verbs_dev); 928 struct hfi1_ibport *ibp; 929 u8 sc, vl; 930 931 ibp = &dd->pport[qp->port_num - 1].ibport_data; 932 sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; 933 vl = sc_to_vlt(dd, sc); 934 935 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); 936 if (vl < PER_VL_SEND_CONTEXTS) 937 mtu = min_t(u32, mtu, dd->vld[vl].mtu); 938 return mtu; 939 } 940 941 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, 942 struct ib_qp_attr *attr) 943 { 944 int mtu, pidx = qp->port_num - 1; 945 struct hfi1_ibdev *verbs_dev = container_of(rdi, 946 struct hfi1_ibdev, 947 rdi); 948 struct hfi1_devdata *dd = container_of(verbs_dev, 949 struct hfi1_devdata, 950 verbs_dev); 951 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); 952 if (mtu == -1) 953 return -1; /* values less than 0 are error */ 954 955 if (mtu > dd->pport[pidx].ibmtu) 956 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); 957 else 958 return attr->path_mtu; 959 } 960 961 void notify_error_qp(struct rvt_qp *qp) 962 { 963 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 964 struct hfi1_qp_priv *priv = qp->priv; 965 966 write_seqlock(&dev->iowait_lock); 967 if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { 968 qp->s_flags &= ~RVT_S_ANY_WAIT_IO; 969 list_del_init(&priv->s_iowait.list); 970 if (atomic_dec_and_test(&qp->refcount)) 971 wake_up(&qp->wait); 972 } 973 write_sequnlock(&dev->iowait_lock); 974 975 if (!(qp->s_flags & RVT_S_BUSY)) { 976 qp->s_hdrwords = 0; 977 if (qp->s_rdma_mr) { 978 rvt_put_mr(qp->s_rdma_mr); 979 qp->s_rdma_mr = NULL; 980 } 981 flush_tx_list(qp); 982 } 983 } 984 985 /** 986 * hfi1_error_port_qps - put a port's RC/UC qps into error state 987 * @ibp: the ibport. 988 * @sl: the service level. 989 * 990 * This function places all RC/UC qps with a given service level into error 991 * state. It is generally called to force upper lay apps to abandon stale qps 992 * after an sl->sc mapping change. 993 */ 994 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) 995 { 996 struct rvt_qp *qp = NULL; 997 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 998 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; 999 int n; 1000 int lastwqe; 1001 struct ib_event ev; 1002 1003 rcu_read_lock(); 1004 1005 /* Deal only with RC/UC qps that use the given SL. */ 1006 for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { 1007 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; 1008 qp = rcu_dereference(qp->next)) { 1009 if (qp->port_num == ppd->port && 1010 (qp->ibqp.qp_type == IB_QPT_UC || 1011 qp->ibqp.qp_type == IB_QPT_RC) && 1012 qp->remote_ah_attr.sl == sl && 1013 (ib_rvt_state_ops[qp->state] & 1014 RVT_POST_SEND_OK)) { 1015 spin_lock_irq(&qp->r_lock); 1016 spin_lock(&qp->s_hlock); 1017 spin_lock(&qp->s_lock); 1018 lastwqe = rvt_error_qp(qp, 1019 IB_WC_WR_FLUSH_ERR); 1020 spin_unlock(&qp->s_lock); 1021 spin_unlock(&qp->s_hlock); 1022 spin_unlock_irq(&qp->r_lock); 1023 if (lastwqe) { 1024 ev.device = qp->ibqp.device; 1025 ev.element.qp = &qp->ibqp; 1026 ev.event = 1027 IB_EVENT_QP_LAST_WQE_REACHED; 1028 qp->ibqp.event_handler(&ev, 1029 qp->ibqp.qp_context); 1030 } 1031 } 1032 } 1033 } 1034 1035 rcu_read_unlock(); 1036 } 1037