1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/err.h> 49 #include <linux/vmalloc.h> 50 #include <linux/hash.h> 51 #include <linux/module.h> 52 #include <linux/seq_file.h> 53 #include <rdma/rdma_vt.h> 54 #include <rdma/rdmavt_qp.h> 55 #include <rdma/ib_verbs.h> 56 57 #include "hfi.h" 58 #include "qp.h" 59 #include "trace.h" 60 #include "verbs_txreq.h" 61 62 unsigned int hfi1_qp_table_size = 256; 63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); 64 MODULE_PARM_DESC(qp_table_size, "QP table size"); 65 66 static void flush_tx_list(struct rvt_qp *qp); 67 static int iowait_sleep( 68 struct sdma_engine *sde, 69 struct iowait_work *wait, 70 struct sdma_txreq *stx, 71 unsigned int seq, 72 bool pkts_sent); 73 static void iowait_wakeup(struct iowait *wait, int reason); 74 static void iowait_sdma_drained(struct iowait *wait); 75 static void qp_pio_drain(struct rvt_qp *qp); 76 77 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { 78 [IB_WR_RDMA_WRITE] = { 79 .length = sizeof(struct ib_rdma_wr), 80 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 81 }, 82 83 [IB_WR_RDMA_READ] = { 84 .length = sizeof(struct ib_rdma_wr), 85 .qpt_support = BIT(IB_QPT_RC), 86 .flags = RVT_OPERATION_ATOMIC, 87 }, 88 89 [IB_WR_ATOMIC_CMP_AND_SWP] = { 90 .length = sizeof(struct ib_atomic_wr), 91 .qpt_support = BIT(IB_QPT_RC), 92 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 93 }, 94 95 [IB_WR_ATOMIC_FETCH_AND_ADD] = { 96 .length = sizeof(struct ib_atomic_wr), 97 .qpt_support = BIT(IB_QPT_RC), 98 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 99 }, 100 101 [IB_WR_RDMA_WRITE_WITH_IMM] = { 102 .length = sizeof(struct ib_rdma_wr), 103 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 104 }, 105 106 [IB_WR_SEND] = { 107 .length = sizeof(struct ib_send_wr), 108 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 109 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 110 }, 111 112 [IB_WR_SEND_WITH_IMM] = { 113 .length = sizeof(struct ib_send_wr), 114 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 115 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 116 }, 117 118 [IB_WR_REG_MR] = { 119 .length = sizeof(struct ib_reg_wr), 120 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 121 .flags = RVT_OPERATION_LOCAL, 122 }, 123 124 [IB_WR_LOCAL_INV] = { 125 .length = sizeof(struct ib_send_wr), 126 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 127 .flags = RVT_OPERATION_LOCAL, 128 }, 129 130 [IB_WR_SEND_WITH_INV] = { 131 .length = sizeof(struct ib_send_wr), 132 .qpt_support = BIT(IB_QPT_RC), 133 }, 134 135 }; 136 137 static void flush_list_head(struct list_head *l) 138 { 139 while (!list_empty(l)) { 140 struct sdma_txreq *tx; 141 142 tx = list_first_entry( 143 l, 144 struct sdma_txreq, 145 list); 146 list_del_init(&tx->list); 147 hfi1_put_txreq( 148 container_of(tx, struct verbs_txreq, txreq)); 149 } 150 } 151 152 static void flush_tx_list(struct rvt_qp *qp) 153 { 154 struct hfi1_qp_priv *priv = qp->priv; 155 156 flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head); 157 flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head); 158 } 159 160 static void flush_iowait(struct rvt_qp *qp) 161 { 162 struct hfi1_qp_priv *priv = qp->priv; 163 unsigned long flags; 164 seqlock_t *lock = priv->s_iowait.lock; 165 166 if (!lock) 167 return; 168 write_seqlock_irqsave(lock, flags); 169 if (!list_empty(&priv->s_iowait.list)) { 170 list_del_init(&priv->s_iowait.list); 171 priv->s_iowait.lock = NULL; 172 rvt_put_qp(qp); 173 } 174 write_sequnlock_irqrestore(lock, flags); 175 } 176 177 static inline int opa_mtu_enum_to_int(int mtu) 178 { 179 switch (mtu) { 180 case OPA_MTU_8192: return 8192; 181 case OPA_MTU_10240: return 10240; 182 default: return -1; 183 } 184 } 185 186 /** 187 * This function is what we would push to the core layer if we wanted to be a 188 * "first class citizen". Instead we hide this here and rely on Verbs ULPs 189 * to blindly pass the MTU enum value from the PathRecord to us. 190 */ 191 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) 192 { 193 int val; 194 195 /* Constraining 10KB packets to 8KB packets */ 196 if (mtu == (enum ib_mtu)OPA_MTU_10240) 197 mtu = OPA_MTU_8192; 198 val = opa_mtu_enum_to_int((int)mtu); 199 if (val > 0) 200 return val; 201 return ib_mtu_enum_to_int(mtu); 202 } 203 204 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 205 int attr_mask, struct ib_udata *udata) 206 { 207 struct ib_qp *ibqp = &qp->ibqp; 208 struct hfi1_ibdev *dev = to_idev(ibqp->device); 209 struct hfi1_devdata *dd = dd_from_dev(dev); 210 u8 sc; 211 212 if (attr_mask & IB_QP_AV) { 213 sc = ah_to_sc(ibqp->device, &attr->ah_attr); 214 if (sc == 0xf) 215 return -EINVAL; 216 217 if (!qp_to_sdma_engine(qp, sc) && 218 dd->flags & HFI1_HAS_SEND_DMA) 219 return -EINVAL; 220 221 if (!qp_to_send_context(qp, sc)) 222 return -EINVAL; 223 } 224 225 if (attr_mask & IB_QP_ALT_PATH) { 226 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); 227 if (sc == 0xf) 228 return -EINVAL; 229 230 if (!qp_to_sdma_engine(qp, sc) && 231 dd->flags & HFI1_HAS_SEND_DMA) 232 return -EINVAL; 233 234 if (!qp_to_send_context(qp, sc)) 235 return -EINVAL; 236 } 237 238 return 0; 239 } 240 241 /* 242 * qp_set_16b - Set the hdr_type based on whether the slid or the 243 * dlid in the connection is extended. Only applicable for RC and UC 244 * QPs. UD QPs determine this on the fly from the ah in the wqe 245 */ 246 static inline void qp_set_16b(struct rvt_qp *qp) 247 { 248 struct hfi1_pportdata *ppd; 249 struct hfi1_ibport *ibp; 250 struct hfi1_qp_priv *priv = qp->priv; 251 252 /* Update ah_attr to account for extended LIDs */ 253 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr); 254 255 /* Create 32 bit LIDs */ 256 hfi1_make_opa_lid(&qp->remote_ah_attr); 257 258 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) 259 return; 260 261 ibp = to_iport(qp->ibqp.device, qp->port_num); 262 ppd = ppd_from_ibp(ibp); 263 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr); 264 } 265 266 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 267 int attr_mask, struct ib_udata *udata) 268 { 269 struct ib_qp *ibqp = &qp->ibqp; 270 struct hfi1_qp_priv *priv = qp->priv; 271 272 if (attr_mask & IB_QP_AV) { 273 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 274 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 275 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 276 qp_set_16b(qp); 277 } 278 279 if (attr_mask & IB_QP_PATH_MIG_STATE && 280 attr->path_mig_state == IB_MIG_MIGRATED && 281 qp->s_mig_state == IB_MIG_ARMED) { 282 qp->s_flags |= HFI1_S_AHG_CLEAR; 283 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 284 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 285 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 286 qp_set_16b(qp); 287 } 288 } 289 290 /** 291 * hfi1_setup_wqe - set up the wqe 292 * @qp - The qp 293 * @wqe - The built wqe 294 * @call_send - Determine if the send should be posted or scheduled. 295 * 296 * Perform setup of the wqe. This is called 297 * prior to inserting the wqe into the ring but after 298 * the wqe has been setup by RDMAVT. This function 299 * allows the driver the opportunity to perform 300 * validation and additional setup of the wqe. 301 * 302 * Returns 0 on success, -EINVAL on failure 303 * 304 */ 305 int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) 306 { 307 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 308 struct rvt_ah *ah; 309 struct hfi1_pportdata *ppd; 310 struct hfi1_devdata *dd; 311 312 switch (qp->ibqp.qp_type) { 313 case IB_QPT_RC: 314 case IB_QPT_UC: 315 if (wqe->length > 0x80000000U) 316 return -EINVAL; 317 if (wqe->length > qp->pmtu) 318 *call_send = false; 319 break; 320 case IB_QPT_SMI: 321 /* 322 * SM packets should exclusively use VL15 and their SL is 323 * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah 324 * is created, SL is 0 in most cases and as a result some 325 * fields (vl and pmtu) in ah may not be set correctly, 326 * depending on the SL2SC and SC2VL tables at the time. 327 */ 328 ppd = ppd_from_ibp(ibp); 329 dd = dd_from_ppd(ppd); 330 if (wqe->length > dd->vld[15].mtu) 331 return -EINVAL; 332 break; 333 case IB_QPT_GSI: 334 case IB_QPT_UD: 335 ah = ibah_to_rvtah(wqe->ud_wr.ah); 336 if (wqe->length > (1 << ah->log_pmtu)) 337 return -EINVAL; 338 if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) 339 return -EINVAL; 340 default: 341 break; 342 } 343 344 /* 345 * System latency between send and schedule is large enough that 346 * forcing call_send to true for piothreshold packets is necessary. 347 */ 348 if (wqe->length <= piothreshold) 349 *call_send = true; 350 return 0; 351 } 352 353 /** 354 * _hfi1_schedule_send - schedule progress 355 * @qp: the QP 356 * 357 * This schedules qp progress w/o regard to the s_flags. 358 * 359 * It is only used in the post send, which doesn't hold 360 * the s_lock. 361 */ 362 bool _hfi1_schedule_send(struct rvt_qp *qp) 363 { 364 struct hfi1_qp_priv *priv = qp->priv; 365 struct hfi1_ibport *ibp = 366 to_iport(qp->ibqp.device, qp->port_num); 367 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 368 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 369 370 return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, 371 priv->s_sde ? 372 priv->s_sde->cpu : 373 cpumask_first(cpumask_of_node(dd->node))); 374 } 375 376 static void qp_pio_drain(struct rvt_qp *qp) 377 { 378 struct hfi1_qp_priv *priv = qp->priv; 379 380 if (!priv->s_sendcontext) 381 return; 382 while (iowait_pio_pending(&priv->s_iowait)) { 383 write_seqlock_irq(&priv->s_sendcontext->waitlock); 384 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); 385 write_sequnlock_irq(&priv->s_sendcontext->waitlock); 386 iowait_pio_drain(&priv->s_iowait); 387 write_seqlock_irq(&priv->s_sendcontext->waitlock); 388 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); 389 write_sequnlock_irq(&priv->s_sendcontext->waitlock); 390 } 391 } 392 393 /** 394 * hfi1_schedule_send - schedule progress 395 * @qp: the QP 396 * 397 * This schedules qp progress and caller should hold 398 * the s_lock. 399 * @return true if the first leg is scheduled; 400 * false if the first leg is not scheduled. 401 */ 402 bool hfi1_schedule_send(struct rvt_qp *qp) 403 { 404 lockdep_assert_held(&qp->s_lock); 405 if (hfi1_send_ok(qp)) { 406 _hfi1_schedule_send(qp); 407 return true; 408 } 409 if (qp->s_flags & HFI1_S_ANY_WAIT_IO) 410 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, 411 IOWAIT_PENDING_IB); 412 return false; 413 } 414 415 static void hfi1_qp_schedule(struct rvt_qp *qp) 416 { 417 struct hfi1_qp_priv *priv = qp->priv; 418 bool ret; 419 420 if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) { 421 ret = hfi1_schedule_send(qp); 422 if (ret) 423 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); 424 } 425 } 426 427 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) 428 { 429 unsigned long flags; 430 431 spin_lock_irqsave(&qp->s_lock, flags); 432 if (qp->s_flags & flag) { 433 qp->s_flags &= ~flag; 434 trace_hfi1_qpwakeup(qp, flag); 435 hfi1_qp_schedule(qp); 436 } 437 spin_unlock_irqrestore(&qp->s_lock, flags); 438 /* Notify hfi1_destroy_qp() if it is waiting. */ 439 rvt_put_qp(qp); 440 } 441 442 void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) 443 { 444 if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) 445 qp->s_flags &= ~RVT_S_BUSY; 446 } 447 448 static int iowait_sleep( 449 struct sdma_engine *sde, 450 struct iowait_work *wait, 451 struct sdma_txreq *stx, 452 uint seq, 453 bool pkts_sent) 454 { 455 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); 456 struct rvt_qp *qp; 457 struct hfi1_qp_priv *priv; 458 unsigned long flags; 459 int ret = 0; 460 461 qp = tx->qp; 462 priv = qp->priv; 463 464 spin_lock_irqsave(&qp->s_lock, flags); 465 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 466 /* 467 * If we couldn't queue the DMA request, save the info 468 * and try again later rather than destroying the 469 * buffer and undoing the side effects of the copy. 470 */ 471 /* Make a common routine? */ 472 list_add_tail(&stx->list, &wait->tx_head); 473 write_seqlock(&sde->waitlock); 474 if (sdma_progress(sde, seq, stx)) 475 goto eagain; 476 if (list_empty(&priv->s_iowait.list)) { 477 struct hfi1_ibport *ibp = 478 to_iport(qp->ibqp.device, qp->port_num); 479 480 ibp->rvp.n_dmawait++; 481 qp->s_flags |= RVT_S_WAIT_DMA_DESC; 482 iowait_queue(pkts_sent, &priv->s_iowait, 483 &sde->dmawait); 484 priv->s_iowait.lock = &sde->waitlock; 485 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); 486 rvt_get_qp(qp); 487 } 488 write_sequnlock(&sde->waitlock); 489 hfi1_qp_unbusy(qp, wait); 490 spin_unlock_irqrestore(&qp->s_lock, flags); 491 ret = -EBUSY; 492 } else { 493 spin_unlock_irqrestore(&qp->s_lock, flags); 494 hfi1_put_txreq(tx); 495 } 496 return ret; 497 eagain: 498 write_sequnlock(&sde->waitlock); 499 spin_unlock_irqrestore(&qp->s_lock, flags); 500 list_del_init(&stx->list); 501 return -EAGAIN; 502 } 503 504 static void iowait_wakeup(struct iowait *wait, int reason) 505 { 506 struct rvt_qp *qp = iowait_to_qp(wait); 507 508 WARN_ON(reason != SDMA_AVAIL_REASON); 509 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); 510 } 511 512 static void iowait_sdma_drained(struct iowait *wait) 513 { 514 struct rvt_qp *qp = iowait_to_qp(wait); 515 unsigned long flags; 516 517 /* 518 * This happens when the send engine notes 519 * a QP in the error state and cannot 520 * do the flush work until that QP's 521 * sdma work has finished. 522 */ 523 spin_lock_irqsave(&qp->s_lock, flags); 524 if (qp->s_flags & RVT_S_WAIT_DMA) { 525 qp->s_flags &= ~RVT_S_WAIT_DMA; 526 hfi1_schedule_send(qp); 527 } 528 spin_unlock_irqrestore(&qp->s_lock, flags); 529 } 530 531 /** 532 * qp_to_sdma_engine - map a qp to a send engine 533 * @qp: the QP 534 * @sc5: the 5 bit sc 535 * 536 * Return: 537 * A send engine for the qp or NULL for SMI type qp. 538 */ 539 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) 540 { 541 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 542 struct sdma_engine *sde; 543 544 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 545 return NULL; 546 switch (qp->ibqp.qp_type) { 547 case IB_QPT_SMI: 548 return NULL; 549 default: 550 break; 551 } 552 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); 553 return sde; 554 } 555 556 /* 557 * qp_to_send_context - map a qp to a send context 558 * @qp: the QP 559 * @sc5: the 5 bit sc 560 * 561 * Return: 562 * A send context for the qp 563 */ 564 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) 565 { 566 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 567 568 switch (qp->ibqp.qp_type) { 569 case IB_QPT_SMI: 570 /* SMA packets to VL15 */ 571 return dd->vld[15].sc; 572 default: 573 break; 574 } 575 576 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, 577 sc5); 578 } 579 580 static const char * const qp_type_str[] = { 581 "SMI", "GSI", "RC", "UC", "UD", 582 }; 583 584 static int qp_idle(struct rvt_qp *qp) 585 { 586 return 587 qp->s_last == qp->s_acked && 588 qp->s_acked == qp->s_cur && 589 qp->s_cur == qp->s_tail && 590 qp->s_tail == qp->s_head; 591 } 592 593 /** 594 * qp_iter_print - print the qp information to seq_file 595 * @s: the seq_file to emit the qp information on 596 * @iter: the iterator for the qp hash list 597 */ 598 void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter) 599 { 600 struct rvt_swqe *wqe; 601 struct rvt_qp *qp = iter->qp; 602 struct hfi1_qp_priv *priv = qp->priv; 603 struct sdma_engine *sde; 604 struct send_context *send_context; 605 struct rvt_ack_entry *e = NULL; 606 struct rvt_srq *srq = qp->ibqp.srq ? 607 ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL; 608 609 sde = qp_to_sdma_engine(qp, priv->s_sc); 610 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 611 send_context = qp_to_send_context(qp, priv->s_sc); 612 if (qp->s_ack_queue) 613 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 614 seq_printf(s, 615 "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n", 616 iter->n, 617 qp_idle(qp) ? "I" : "B", 618 qp->ibqp.qp_num, 619 atomic_read(&qp->refcount), 620 qp_type_str[qp->ibqp.qp_type], 621 qp->state, 622 wqe ? wqe->wr.opcode : 0, 623 qp->s_flags, 624 iowait_sdma_pending(&priv->s_iowait), 625 iowait_pio_pending(&priv->s_iowait), 626 !list_empty(&priv->s_iowait.list), 627 qp->timeout, 628 wqe ? wqe->ssn : 0, 629 qp->s_lsn, 630 qp->s_last_psn, 631 qp->s_psn, qp->s_next_psn, 632 qp->s_sending_psn, qp->s_sending_hpsn, 633 qp->r_psn, 634 qp->s_last, qp->s_acked, qp->s_cur, 635 qp->s_tail, qp->s_head, qp->s_size, 636 qp->s_avail, 637 /* ack_queue ring pointers, size */ 638 qp->s_tail_ack_queue, qp->r_head_ack_queue, 639 rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi), 640 /* remote QP info */ 641 qp->remote_qpn, 642 rdma_ah_get_dlid(&qp->remote_ah_attr), 643 rdma_ah_get_sl(&qp->remote_ah_attr), 644 qp->pmtu, 645 qp->s_retry, 646 qp->s_retry_cnt, 647 qp->s_rnr_retry_cnt, 648 qp->s_rnr_retry, 649 sde, 650 sde ? sde->this_idx : 0, 651 send_context, 652 send_context ? send_context->sw_index : 0, 653 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, 654 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, 655 qp->pid, 656 qp->s_state, 657 qp->s_ack_state, 658 /* ack queue information */ 659 e ? e->opcode : 0, 660 e ? e->psn : 0, 661 e ? e->lpsn : 0, 662 qp->r_min_rnr_timer, 663 srq ? "SRQ" : "RQ", 664 srq ? srq->rq.size : qp->r_rq.size 665 ); 666 } 667 668 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) 669 { 670 struct hfi1_qp_priv *priv; 671 672 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); 673 if (!priv) 674 return ERR_PTR(-ENOMEM); 675 676 priv->owner = qp; 677 678 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, 679 rdi->dparms.node); 680 if (!priv->s_ahg) { 681 kfree(priv); 682 return ERR_PTR(-ENOMEM); 683 } 684 iowait_init( 685 &priv->s_iowait, 686 1, 687 _hfi1_do_send, 688 NULL, 689 iowait_sleep, 690 iowait_wakeup, 691 iowait_sdma_drained); 692 return priv; 693 } 694 695 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) 696 { 697 struct hfi1_qp_priv *priv = qp->priv; 698 699 kfree(priv->s_ahg); 700 kfree(priv); 701 } 702 703 unsigned free_all_qps(struct rvt_dev_info *rdi) 704 { 705 struct hfi1_ibdev *verbs_dev = container_of(rdi, 706 struct hfi1_ibdev, 707 rdi); 708 struct hfi1_devdata *dd = container_of(verbs_dev, 709 struct hfi1_devdata, 710 verbs_dev); 711 int n; 712 unsigned qp_inuse = 0; 713 714 for (n = 0; n < dd->num_pports; n++) { 715 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; 716 717 rcu_read_lock(); 718 if (rcu_dereference(ibp->rvp.qp[0])) 719 qp_inuse++; 720 if (rcu_dereference(ibp->rvp.qp[1])) 721 qp_inuse++; 722 rcu_read_unlock(); 723 } 724 725 return qp_inuse; 726 } 727 728 void flush_qp_waiters(struct rvt_qp *qp) 729 { 730 lockdep_assert_held(&qp->s_lock); 731 flush_iowait(qp); 732 } 733 734 void stop_send_queue(struct rvt_qp *qp) 735 { 736 struct hfi1_qp_priv *priv = qp->priv; 737 738 iowait_cancel_work(&priv->s_iowait); 739 } 740 741 void quiesce_qp(struct rvt_qp *qp) 742 { 743 struct hfi1_qp_priv *priv = qp->priv; 744 745 iowait_sdma_drain(&priv->s_iowait); 746 qp_pio_drain(qp); 747 flush_tx_list(qp); 748 } 749 750 void notify_qp_reset(struct rvt_qp *qp) 751 { 752 qp->r_adefered = 0; 753 clear_ahg(qp); 754 } 755 756 /* 757 * Switch to alternate path. 758 * The QP s_lock should be held and interrupts disabled. 759 */ 760 void hfi1_migrate_qp(struct rvt_qp *qp) 761 { 762 struct hfi1_qp_priv *priv = qp->priv; 763 struct ib_event ev; 764 765 qp->s_mig_state = IB_MIG_MIGRATED; 766 qp->remote_ah_attr = qp->alt_ah_attr; 767 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); 768 qp->s_pkey_index = qp->s_alt_pkey_index; 769 qp->s_flags |= HFI1_S_AHG_CLEAR; 770 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); 771 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 772 qp_set_16b(qp); 773 774 ev.device = qp->ibqp.device; 775 ev.element.qp = &qp->ibqp; 776 ev.event = IB_EVENT_PATH_MIG; 777 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 778 } 779 780 int mtu_to_path_mtu(u32 mtu) 781 { 782 return mtu_to_enum(mtu, OPA_MTU_8192); 783 } 784 785 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) 786 { 787 u32 mtu; 788 struct hfi1_ibdev *verbs_dev = container_of(rdi, 789 struct hfi1_ibdev, 790 rdi); 791 struct hfi1_devdata *dd = container_of(verbs_dev, 792 struct hfi1_devdata, 793 verbs_dev); 794 struct hfi1_ibport *ibp; 795 u8 sc, vl; 796 797 ibp = &dd->pport[qp->port_num - 1].ibport_data; 798 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; 799 vl = sc_to_vlt(dd, sc); 800 801 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); 802 if (vl < PER_VL_SEND_CONTEXTS) 803 mtu = min_t(u32, mtu, dd->vld[vl].mtu); 804 return mtu; 805 } 806 807 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, 808 struct ib_qp_attr *attr) 809 { 810 int mtu, pidx = qp->port_num - 1; 811 struct hfi1_ibdev *verbs_dev = container_of(rdi, 812 struct hfi1_ibdev, 813 rdi); 814 struct hfi1_devdata *dd = container_of(verbs_dev, 815 struct hfi1_devdata, 816 verbs_dev); 817 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); 818 if (mtu == -1) 819 return -1; /* values less than 0 are error */ 820 821 if (mtu > dd->pport[pidx].ibmtu) 822 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); 823 else 824 return attr->path_mtu; 825 } 826 827 void notify_error_qp(struct rvt_qp *qp) 828 { 829 struct hfi1_qp_priv *priv = qp->priv; 830 seqlock_t *lock = priv->s_iowait.lock; 831 832 if (lock) { 833 write_seqlock(lock); 834 if (!list_empty(&priv->s_iowait.list) && 835 !(qp->s_flags & RVT_S_BUSY)) { 836 qp->s_flags &= ~RVT_S_ANY_WAIT_IO; 837 list_del_init(&priv->s_iowait.list); 838 priv->s_iowait.lock = NULL; 839 rvt_put_qp(qp); 840 } 841 write_sequnlock(lock); 842 } 843 844 if (!(qp->s_flags & RVT_S_BUSY)) { 845 if (qp->s_rdma_mr) { 846 rvt_put_mr(qp->s_rdma_mr); 847 qp->s_rdma_mr = NULL; 848 } 849 flush_tx_list(qp); 850 } 851 } 852 853 /** 854 * hfi1_qp_iter_cb - callback for iterator 855 * @qp - the qp 856 * @v - the sl in low bits of v 857 * 858 * This is called from the iterator callback to work 859 * on an individual qp. 860 */ 861 static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) 862 { 863 int lastwqe; 864 struct ib_event ev; 865 struct hfi1_ibport *ibp = 866 to_iport(qp->ibqp.device, qp->port_num); 867 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 868 u8 sl = (u8)v; 869 870 if (qp->port_num != ppd->port || 871 (qp->ibqp.qp_type != IB_QPT_UC && 872 qp->ibqp.qp_type != IB_QPT_RC) || 873 rdma_ah_get_sl(&qp->remote_ah_attr) != sl || 874 !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK)) 875 return; 876 877 spin_lock_irq(&qp->r_lock); 878 spin_lock(&qp->s_hlock); 879 spin_lock(&qp->s_lock); 880 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 881 spin_unlock(&qp->s_lock); 882 spin_unlock(&qp->s_hlock); 883 spin_unlock_irq(&qp->r_lock); 884 if (lastwqe) { 885 ev.device = qp->ibqp.device; 886 ev.element.qp = &qp->ibqp; 887 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 888 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 889 } 890 } 891 892 /** 893 * hfi1_error_port_qps - put a port's RC/UC qps into error state 894 * @ibp: the ibport. 895 * @sl: the service level. 896 * 897 * This function places all RC/UC qps with a given service level into error 898 * state. It is generally called to force upper lay apps to abandon stale qps 899 * after an sl->sc mapping change. 900 */ 901 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) 902 { 903 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 904 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; 905 906 rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb); 907 } 908