1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/err.h> 49 #include <linux/vmalloc.h> 50 #include <linux/hash.h> 51 #include <linux/module.h> 52 #include <linux/seq_file.h> 53 #include <rdma/rdma_vt.h> 54 #include <rdma/rdmavt_qp.h> 55 #include <rdma/ib_verbs.h> 56 57 #include "hfi.h" 58 #include "qp.h" 59 #include "trace.h" 60 #include "verbs_txreq.h" 61 62 unsigned int hfi1_qp_table_size = 256; 63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); 64 MODULE_PARM_DESC(qp_table_size, "QP table size"); 65 66 static void flush_tx_list(struct rvt_qp *qp); 67 static int iowait_sleep( 68 struct sdma_engine *sde, 69 struct iowait_work *wait, 70 struct sdma_txreq *stx, 71 unsigned int seq, 72 bool pkts_sent); 73 static void iowait_wakeup(struct iowait *wait, int reason); 74 static void iowait_sdma_drained(struct iowait *wait); 75 static void qp_pio_drain(struct rvt_qp *qp); 76 77 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { 78 [IB_WR_RDMA_WRITE] = { 79 .length = sizeof(struct ib_rdma_wr), 80 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 81 }, 82 83 [IB_WR_RDMA_READ] = { 84 .length = sizeof(struct ib_rdma_wr), 85 .qpt_support = BIT(IB_QPT_RC), 86 .flags = RVT_OPERATION_ATOMIC, 87 }, 88 89 [IB_WR_ATOMIC_CMP_AND_SWP] = { 90 .length = sizeof(struct ib_atomic_wr), 91 .qpt_support = BIT(IB_QPT_RC), 92 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 93 }, 94 95 [IB_WR_ATOMIC_FETCH_AND_ADD] = { 96 .length = sizeof(struct ib_atomic_wr), 97 .qpt_support = BIT(IB_QPT_RC), 98 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 99 }, 100 101 [IB_WR_RDMA_WRITE_WITH_IMM] = { 102 .length = sizeof(struct ib_rdma_wr), 103 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 104 }, 105 106 [IB_WR_SEND] = { 107 .length = sizeof(struct ib_send_wr), 108 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 109 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 110 }, 111 112 [IB_WR_SEND_WITH_IMM] = { 113 .length = sizeof(struct ib_send_wr), 114 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 115 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 116 }, 117 118 [IB_WR_REG_MR] = { 119 .length = sizeof(struct ib_reg_wr), 120 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 121 .flags = RVT_OPERATION_LOCAL, 122 }, 123 124 [IB_WR_LOCAL_INV] = { 125 .length = sizeof(struct ib_send_wr), 126 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 127 .flags = RVT_OPERATION_LOCAL, 128 }, 129 130 [IB_WR_SEND_WITH_INV] = { 131 .length = sizeof(struct ib_send_wr), 132 .qpt_support = BIT(IB_QPT_RC), 133 }, 134 135 [IB_WR_OPFN] = { 136 .length = sizeof(struct ib_atomic_wr), 137 .qpt_support = BIT(IB_QPT_RC), 138 .flags = RVT_OPERATION_USE_RESERVE, 139 }, 140 141 [IB_WR_TID_RDMA_WRITE] = { 142 .length = sizeof(struct ib_rdma_wr), 143 .qpt_support = BIT(IB_QPT_RC), 144 .flags = RVT_OPERATION_IGN_RNR_CNT, 145 }, 146 147 }; 148 149 static void flush_list_head(struct list_head *l) 150 { 151 while (!list_empty(l)) { 152 struct sdma_txreq *tx; 153 154 tx = list_first_entry( 155 l, 156 struct sdma_txreq, 157 list); 158 list_del_init(&tx->list); 159 hfi1_put_txreq( 160 container_of(tx, struct verbs_txreq, txreq)); 161 } 162 } 163 164 static void flush_tx_list(struct rvt_qp *qp) 165 { 166 struct hfi1_qp_priv *priv = qp->priv; 167 168 flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head); 169 flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head); 170 } 171 172 static void flush_iowait(struct rvt_qp *qp) 173 { 174 struct hfi1_qp_priv *priv = qp->priv; 175 unsigned long flags; 176 seqlock_t *lock = priv->s_iowait.lock; 177 178 if (!lock) 179 return; 180 write_seqlock_irqsave(lock, flags); 181 if (!list_empty(&priv->s_iowait.list)) { 182 list_del_init(&priv->s_iowait.list); 183 priv->s_iowait.lock = NULL; 184 rvt_put_qp(qp); 185 } 186 write_sequnlock_irqrestore(lock, flags); 187 } 188 189 static inline int opa_mtu_enum_to_int(int mtu) 190 { 191 switch (mtu) { 192 case OPA_MTU_8192: return 8192; 193 case OPA_MTU_10240: return 10240; 194 default: return -1; 195 } 196 } 197 198 /** 199 * This function is what we would push to the core layer if we wanted to be a 200 * "first class citizen". Instead we hide this here and rely on Verbs ULPs 201 * to blindly pass the MTU enum value from the PathRecord to us. 202 */ 203 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) 204 { 205 int val; 206 207 /* Constraining 10KB packets to 8KB packets */ 208 if (mtu == (enum ib_mtu)OPA_MTU_10240) 209 mtu = OPA_MTU_8192; 210 val = opa_mtu_enum_to_int((int)mtu); 211 if (val > 0) 212 return val; 213 return ib_mtu_enum_to_int(mtu); 214 } 215 216 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 217 int attr_mask, struct ib_udata *udata) 218 { 219 struct ib_qp *ibqp = &qp->ibqp; 220 struct hfi1_ibdev *dev = to_idev(ibqp->device); 221 struct hfi1_devdata *dd = dd_from_dev(dev); 222 u8 sc; 223 224 if (attr_mask & IB_QP_AV) { 225 sc = ah_to_sc(ibqp->device, &attr->ah_attr); 226 if (sc == 0xf) 227 return -EINVAL; 228 229 if (!qp_to_sdma_engine(qp, sc) && 230 dd->flags & HFI1_HAS_SEND_DMA) 231 return -EINVAL; 232 233 if (!qp_to_send_context(qp, sc)) 234 return -EINVAL; 235 } 236 237 if (attr_mask & IB_QP_ALT_PATH) { 238 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); 239 if (sc == 0xf) 240 return -EINVAL; 241 242 if (!qp_to_sdma_engine(qp, sc) && 243 dd->flags & HFI1_HAS_SEND_DMA) 244 return -EINVAL; 245 246 if (!qp_to_send_context(qp, sc)) 247 return -EINVAL; 248 } 249 250 return 0; 251 } 252 253 /* 254 * qp_set_16b - Set the hdr_type based on whether the slid or the 255 * dlid in the connection is extended. Only applicable for RC and UC 256 * QPs. UD QPs determine this on the fly from the ah in the wqe 257 */ 258 static inline void qp_set_16b(struct rvt_qp *qp) 259 { 260 struct hfi1_pportdata *ppd; 261 struct hfi1_ibport *ibp; 262 struct hfi1_qp_priv *priv = qp->priv; 263 264 /* Update ah_attr to account for extended LIDs */ 265 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr); 266 267 /* Create 32 bit LIDs */ 268 hfi1_make_opa_lid(&qp->remote_ah_attr); 269 270 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) 271 return; 272 273 ibp = to_iport(qp->ibqp.device, qp->port_num); 274 ppd = ppd_from_ibp(ibp); 275 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr); 276 } 277 278 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 279 int attr_mask, struct ib_udata *udata) 280 { 281 struct ib_qp *ibqp = &qp->ibqp; 282 struct hfi1_qp_priv *priv = qp->priv; 283 284 if (attr_mask & IB_QP_AV) { 285 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 286 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 287 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 288 qp_set_16b(qp); 289 } 290 291 if (attr_mask & IB_QP_PATH_MIG_STATE && 292 attr->path_mig_state == IB_MIG_MIGRATED && 293 qp->s_mig_state == IB_MIG_ARMED) { 294 qp->s_flags |= HFI1_S_AHG_CLEAR; 295 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 296 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 297 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 298 qp_set_16b(qp); 299 } 300 301 opfn_qp_init(qp, attr, attr_mask); 302 } 303 304 /** 305 * hfi1_setup_wqe - set up the wqe 306 * @qp - The qp 307 * @wqe - The built wqe 308 * @call_send - Determine if the send should be posted or scheduled. 309 * 310 * Perform setup of the wqe. This is called 311 * prior to inserting the wqe into the ring but after 312 * the wqe has been setup by RDMAVT. This function 313 * allows the driver the opportunity to perform 314 * validation and additional setup of the wqe. 315 * 316 * Returns 0 on success, -EINVAL on failure 317 * 318 */ 319 int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) 320 { 321 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 322 struct rvt_ah *ah; 323 struct hfi1_pportdata *ppd; 324 struct hfi1_devdata *dd; 325 326 switch (qp->ibqp.qp_type) { 327 case IB_QPT_RC: 328 hfi1_setup_tid_rdma_wqe(qp, wqe); 329 /* fall through */ 330 case IB_QPT_UC: 331 if (wqe->length > 0x80000000U) 332 return -EINVAL; 333 if (wqe->length > qp->pmtu) 334 *call_send = false; 335 break; 336 case IB_QPT_SMI: 337 /* 338 * SM packets should exclusively use VL15 and their SL is 339 * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah 340 * is created, SL is 0 in most cases and as a result some 341 * fields (vl and pmtu) in ah may not be set correctly, 342 * depending on the SL2SC and SC2VL tables at the time. 343 */ 344 ppd = ppd_from_ibp(ibp); 345 dd = dd_from_ppd(ppd); 346 if (wqe->length > dd->vld[15].mtu) 347 return -EINVAL; 348 break; 349 case IB_QPT_GSI: 350 case IB_QPT_UD: 351 ah = ibah_to_rvtah(wqe->ud_wr.ah); 352 if (wqe->length > (1 << ah->log_pmtu)) 353 return -EINVAL; 354 if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) 355 return -EINVAL; 356 default: 357 break; 358 } 359 360 /* 361 * System latency between send and schedule is large enough that 362 * forcing call_send to true for piothreshold packets is necessary. 363 */ 364 if (wqe->length <= piothreshold) 365 *call_send = true; 366 return 0; 367 } 368 369 /** 370 * _hfi1_schedule_send - schedule progress 371 * @qp: the QP 372 * 373 * This schedules qp progress w/o regard to the s_flags. 374 * 375 * It is only used in the post send, which doesn't hold 376 * the s_lock. 377 */ 378 bool _hfi1_schedule_send(struct rvt_qp *qp) 379 { 380 struct hfi1_qp_priv *priv = qp->priv; 381 struct hfi1_ibport *ibp = 382 to_iport(qp->ibqp.device, qp->port_num); 383 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 384 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 385 386 return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, 387 priv->s_sde ? 388 priv->s_sde->cpu : 389 cpumask_first(cpumask_of_node(dd->node))); 390 } 391 392 static void qp_pio_drain(struct rvt_qp *qp) 393 { 394 struct hfi1_qp_priv *priv = qp->priv; 395 396 if (!priv->s_sendcontext) 397 return; 398 while (iowait_pio_pending(&priv->s_iowait)) { 399 write_seqlock_irq(&priv->s_sendcontext->waitlock); 400 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); 401 write_sequnlock_irq(&priv->s_sendcontext->waitlock); 402 iowait_pio_drain(&priv->s_iowait); 403 write_seqlock_irq(&priv->s_sendcontext->waitlock); 404 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); 405 write_sequnlock_irq(&priv->s_sendcontext->waitlock); 406 } 407 } 408 409 /** 410 * hfi1_schedule_send - schedule progress 411 * @qp: the QP 412 * 413 * This schedules qp progress and caller should hold 414 * the s_lock. 415 * @return true if the first leg is scheduled; 416 * false if the first leg is not scheduled. 417 */ 418 bool hfi1_schedule_send(struct rvt_qp *qp) 419 { 420 lockdep_assert_held(&qp->s_lock); 421 if (hfi1_send_ok(qp)) { 422 _hfi1_schedule_send(qp); 423 return true; 424 } 425 if (qp->s_flags & HFI1_S_ANY_WAIT_IO) 426 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, 427 IOWAIT_PENDING_IB); 428 return false; 429 } 430 431 static void hfi1_qp_schedule(struct rvt_qp *qp) 432 { 433 struct hfi1_qp_priv *priv = qp->priv; 434 bool ret; 435 436 if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) { 437 ret = hfi1_schedule_send(qp); 438 if (ret) 439 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); 440 } 441 if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) { 442 ret = hfi1_schedule_tid_send(qp); 443 if (ret) 444 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); 445 } 446 } 447 448 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) 449 { 450 unsigned long flags; 451 452 spin_lock_irqsave(&qp->s_lock, flags); 453 if (qp->s_flags & flag) { 454 qp->s_flags &= ~flag; 455 trace_hfi1_qpwakeup(qp, flag); 456 hfi1_qp_schedule(qp); 457 } 458 spin_unlock_irqrestore(&qp->s_lock, flags); 459 /* Notify hfi1_destroy_qp() if it is waiting. */ 460 rvt_put_qp(qp); 461 } 462 463 void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) 464 { 465 struct hfi1_qp_priv *priv = qp->priv; 466 467 if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) { 468 qp->s_flags &= ~RVT_S_BUSY; 469 /* 470 * If we are sending a first-leg packet from the second leg, 471 * we need to clear the busy flag from priv->s_flags to 472 * avoid a race condition when the qp wakes up before 473 * the call to hfi1_verbs_send() returns to the second 474 * leg. In that case, the second leg will terminate without 475 * being re-scheduled, resulting in failure to send TID RDMA 476 * WRITE DATA and TID RDMA ACK packets. 477 */ 478 if (priv->s_flags & HFI1_S_TID_BUSY_SET) { 479 priv->s_flags &= ~(HFI1_S_TID_BUSY_SET | 480 RVT_S_BUSY); 481 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID); 482 } 483 } else { 484 priv->s_flags &= ~RVT_S_BUSY; 485 } 486 } 487 488 static int iowait_sleep( 489 struct sdma_engine *sde, 490 struct iowait_work *wait, 491 struct sdma_txreq *stx, 492 uint seq, 493 bool pkts_sent) 494 { 495 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); 496 struct rvt_qp *qp; 497 struct hfi1_qp_priv *priv; 498 unsigned long flags; 499 int ret = 0; 500 501 qp = tx->qp; 502 priv = qp->priv; 503 504 spin_lock_irqsave(&qp->s_lock, flags); 505 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 506 /* 507 * If we couldn't queue the DMA request, save the info 508 * and try again later rather than destroying the 509 * buffer and undoing the side effects of the copy. 510 */ 511 /* Make a common routine? */ 512 list_add_tail(&stx->list, &wait->tx_head); 513 write_seqlock(&sde->waitlock); 514 if (sdma_progress(sde, seq, stx)) 515 goto eagain; 516 if (list_empty(&priv->s_iowait.list)) { 517 struct hfi1_ibport *ibp = 518 to_iport(qp->ibqp.device, qp->port_num); 519 520 ibp->rvp.n_dmawait++; 521 qp->s_flags |= RVT_S_WAIT_DMA_DESC; 522 iowait_get_priority(&priv->s_iowait); 523 iowait_queue(pkts_sent, &priv->s_iowait, 524 &sde->dmawait); 525 priv->s_iowait.lock = &sde->waitlock; 526 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); 527 rvt_get_qp(qp); 528 } 529 write_sequnlock(&sde->waitlock); 530 hfi1_qp_unbusy(qp, wait); 531 spin_unlock_irqrestore(&qp->s_lock, flags); 532 ret = -EBUSY; 533 } else { 534 spin_unlock_irqrestore(&qp->s_lock, flags); 535 hfi1_put_txreq(tx); 536 } 537 return ret; 538 eagain: 539 write_sequnlock(&sde->waitlock); 540 spin_unlock_irqrestore(&qp->s_lock, flags); 541 list_del_init(&stx->list); 542 return -EAGAIN; 543 } 544 545 static void iowait_wakeup(struct iowait *wait, int reason) 546 { 547 struct rvt_qp *qp = iowait_to_qp(wait); 548 549 WARN_ON(reason != SDMA_AVAIL_REASON); 550 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); 551 } 552 553 static void iowait_sdma_drained(struct iowait *wait) 554 { 555 struct rvt_qp *qp = iowait_to_qp(wait); 556 unsigned long flags; 557 558 /* 559 * This happens when the send engine notes 560 * a QP in the error state and cannot 561 * do the flush work until that QP's 562 * sdma work has finished. 563 */ 564 spin_lock_irqsave(&qp->s_lock, flags); 565 if (qp->s_flags & RVT_S_WAIT_DMA) { 566 qp->s_flags &= ~RVT_S_WAIT_DMA; 567 hfi1_schedule_send(qp); 568 } 569 spin_unlock_irqrestore(&qp->s_lock, flags); 570 } 571 572 static void hfi1_init_priority(struct iowait *w) 573 { 574 struct rvt_qp *qp = iowait_to_qp(w); 575 struct hfi1_qp_priv *priv = qp->priv; 576 577 if (qp->s_flags & RVT_S_ACK_PENDING) 578 w->priority++; 579 if (priv->s_flags & RVT_S_ACK_PENDING) 580 w->priority++; 581 } 582 583 /** 584 * qp_to_sdma_engine - map a qp to a send engine 585 * @qp: the QP 586 * @sc5: the 5 bit sc 587 * 588 * Return: 589 * A send engine for the qp or NULL for SMI type qp. 590 */ 591 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) 592 { 593 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 594 struct sdma_engine *sde; 595 596 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 597 return NULL; 598 switch (qp->ibqp.qp_type) { 599 case IB_QPT_SMI: 600 return NULL; 601 default: 602 break; 603 } 604 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); 605 return sde; 606 } 607 608 /* 609 * qp_to_send_context - map a qp to a send context 610 * @qp: the QP 611 * @sc5: the 5 bit sc 612 * 613 * Return: 614 * A send context for the qp 615 */ 616 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) 617 { 618 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 619 620 switch (qp->ibqp.qp_type) { 621 case IB_QPT_SMI: 622 /* SMA packets to VL15 */ 623 return dd->vld[15].sc; 624 default: 625 break; 626 } 627 628 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, 629 sc5); 630 } 631 632 static const char * const qp_type_str[] = { 633 "SMI", "GSI", "RC", "UC", "UD", 634 }; 635 636 static int qp_idle(struct rvt_qp *qp) 637 { 638 return 639 qp->s_last == qp->s_acked && 640 qp->s_acked == qp->s_cur && 641 qp->s_cur == qp->s_tail && 642 qp->s_tail == qp->s_head; 643 } 644 645 /** 646 * qp_iter_print - print the qp information to seq_file 647 * @s: the seq_file to emit the qp information on 648 * @iter: the iterator for the qp hash list 649 */ 650 void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter) 651 { 652 struct rvt_swqe *wqe; 653 struct rvt_qp *qp = iter->qp; 654 struct hfi1_qp_priv *priv = qp->priv; 655 struct sdma_engine *sde; 656 struct send_context *send_context; 657 struct rvt_ack_entry *e = NULL; 658 struct rvt_srq *srq = qp->ibqp.srq ? 659 ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL; 660 661 sde = qp_to_sdma_engine(qp, priv->s_sc); 662 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 663 send_context = qp_to_send_context(qp, priv->s_sc); 664 if (qp->s_ack_queue) 665 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 666 seq_printf(s, 667 "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n", 668 iter->n, 669 qp_idle(qp) ? "I" : "B", 670 qp->ibqp.qp_num, 671 atomic_read(&qp->refcount), 672 qp_type_str[qp->ibqp.qp_type], 673 qp->state, 674 wqe ? wqe->wr.opcode : 0, 675 qp->s_flags, 676 iowait_sdma_pending(&priv->s_iowait), 677 iowait_pio_pending(&priv->s_iowait), 678 !list_empty(&priv->s_iowait.list), 679 qp->timeout, 680 wqe ? wqe->ssn : 0, 681 qp->s_lsn, 682 qp->s_last_psn, 683 qp->s_psn, qp->s_next_psn, 684 qp->s_sending_psn, qp->s_sending_hpsn, 685 qp->r_psn, 686 qp->s_last, qp->s_acked, qp->s_cur, 687 qp->s_tail, qp->s_head, qp->s_size, 688 qp->s_avail, 689 /* ack_queue ring pointers, size */ 690 qp->s_tail_ack_queue, qp->r_head_ack_queue, 691 rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi), 692 /* remote QP info */ 693 qp->remote_qpn, 694 rdma_ah_get_dlid(&qp->remote_ah_attr), 695 rdma_ah_get_sl(&qp->remote_ah_attr), 696 qp->pmtu, 697 qp->s_retry, 698 qp->s_retry_cnt, 699 qp->s_rnr_retry_cnt, 700 qp->s_rnr_retry, 701 sde, 702 sde ? sde->this_idx : 0, 703 send_context, 704 send_context ? send_context->sw_index : 0, 705 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, 706 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, 707 qp->pid, 708 qp->s_state, 709 qp->s_ack_state, 710 /* ack queue information */ 711 e ? e->opcode : 0, 712 e ? e->psn : 0, 713 e ? e->lpsn : 0, 714 qp->r_min_rnr_timer, 715 srq ? "SRQ" : "RQ", 716 srq ? srq->rq.size : qp->r_rq.size 717 ); 718 } 719 720 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) 721 { 722 struct hfi1_qp_priv *priv; 723 724 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); 725 if (!priv) 726 return ERR_PTR(-ENOMEM); 727 728 priv->owner = qp; 729 730 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, 731 rdi->dparms.node); 732 if (!priv->s_ahg) { 733 kfree(priv); 734 return ERR_PTR(-ENOMEM); 735 } 736 iowait_init( 737 &priv->s_iowait, 738 1, 739 _hfi1_do_send, 740 _hfi1_do_tid_send, 741 iowait_sleep, 742 iowait_wakeup, 743 iowait_sdma_drained, 744 hfi1_init_priority); 745 /* Init to a value to start the running average correctly */ 746 priv->s_running_pkt_size = piothreshold / 2; 747 return priv; 748 } 749 750 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) 751 { 752 struct hfi1_qp_priv *priv = qp->priv; 753 754 hfi1_qp_priv_tid_free(rdi, qp); 755 kfree(priv->s_ahg); 756 kfree(priv); 757 } 758 759 unsigned free_all_qps(struct rvt_dev_info *rdi) 760 { 761 struct hfi1_ibdev *verbs_dev = container_of(rdi, 762 struct hfi1_ibdev, 763 rdi); 764 struct hfi1_devdata *dd = container_of(verbs_dev, 765 struct hfi1_devdata, 766 verbs_dev); 767 int n; 768 unsigned qp_inuse = 0; 769 770 for (n = 0; n < dd->num_pports; n++) { 771 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; 772 773 rcu_read_lock(); 774 if (rcu_dereference(ibp->rvp.qp[0])) 775 qp_inuse++; 776 if (rcu_dereference(ibp->rvp.qp[1])) 777 qp_inuse++; 778 rcu_read_unlock(); 779 } 780 781 return qp_inuse; 782 } 783 784 void flush_qp_waiters(struct rvt_qp *qp) 785 { 786 lockdep_assert_held(&qp->s_lock); 787 flush_iowait(qp); 788 hfi1_tid_rdma_flush_wait(qp); 789 } 790 791 void stop_send_queue(struct rvt_qp *qp) 792 { 793 struct hfi1_qp_priv *priv = qp->priv; 794 795 iowait_cancel_work(&priv->s_iowait); 796 if (cancel_work_sync(&priv->tid_rdma.trigger_work)) 797 rvt_put_qp(qp); 798 } 799 800 void quiesce_qp(struct rvt_qp *qp) 801 { 802 struct hfi1_qp_priv *priv = qp->priv; 803 804 hfi1_del_tid_reap_timer(qp); 805 hfi1_del_tid_retry_timer(qp); 806 iowait_sdma_drain(&priv->s_iowait); 807 qp_pio_drain(qp); 808 flush_tx_list(qp); 809 } 810 811 void notify_qp_reset(struct rvt_qp *qp) 812 { 813 hfi1_qp_kern_exp_rcv_clear_all(qp); 814 qp->r_adefered = 0; 815 clear_ahg(qp); 816 817 /* Clear any OPFN state */ 818 if (qp->ibqp.qp_type == IB_QPT_RC) 819 opfn_conn_error(qp); 820 } 821 822 /* 823 * Switch to alternate path. 824 * The QP s_lock should be held and interrupts disabled. 825 */ 826 void hfi1_migrate_qp(struct rvt_qp *qp) 827 { 828 struct hfi1_qp_priv *priv = qp->priv; 829 struct ib_event ev; 830 831 qp->s_mig_state = IB_MIG_MIGRATED; 832 qp->remote_ah_attr = qp->alt_ah_attr; 833 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); 834 qp->s_pkey_index = qp->s_alt_pkey_index; 835 qp->s_flags |= HFI1_S_AHG_CLEAR; 836 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); 837 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 838 qp_set_16b(qp); 839 840 ev.device = qp->ibqp.device; 841 ev.element.qp = &qp->ibqp; 842 ev.event = IB_EVENT_PATH_MIG; 843 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 844 } 845 846 int mtu_to_path_mtu(u32 mtu) 847 { 848 return mtu_to_enum(mtu, OPA_MTU_8192); 849 } 850 851 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) 852 { 853 u32 mtu; 854 struct hfi1_ibdev *verbs_dev = container_of(rdi, 855 struct hfi1_ibdev, 856 rdi); 857 struct hfi1_devdata *dd = container_of(verbs_dev, 858 struct hfi1_devdata, 859 verbs_dev); 860 struct hfi1_ibport *ibp; 861 u8 sc, vl; 862 863 ibp = &dd->pport[qp->port_num - 1].ibport_data; 864 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; 865 vl = sc_to_vlt(dd, sc); 866 867 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); 868 if (vl < PER_VL_SEND_CONTEXTS) 869 mtu = min_t(u32, mtu, dd->vld[vl].mtu); 870 return mtu; 871 } 872 873 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, 874 struct ib_qp_attr *attr) 875 { 876 int mtu, pidx = qp->port_num - 1; 877 struct hfi1_ibdev *verbs_dev = container_of(rdi, 878 struct hfi1_ibdev, 879 rdi); 880 struct hfi1_devdata *dd = container_of(verbs_dev, 881 struct hfi1_devdata, 882 verbs_dev); 883 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); 884 if (mtu == -1) 885 return -1; /* values less than 0 are error */ 886 887 if (mtu > dd->pport[pidx].ibmtu) 888 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); 889 else 890 return attr->path_mtu; 891 } 892 893 void notify_error_qp(struct rvt_qp *qp) 894 { 895 struct hfi1_qp_priv *priv = qp->priv; 896 seqlock_t *lock = priv->s_iowait.lock; 897 898 if (lock) { 899 write_seqlock(lock); 900 if (!list_empty(&priv->s_iowait.list) && 901 !(qp->s_flags & RVT_S_BUSY) && 902 !(priv->s_flags & RVT_S_BUSY)) { 903 qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; 904 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); 905 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); 906 list_del_init(&priv->s_iowait.list); 907 priv->s_iowait.lock = NULL; 908 rvt_put_qp(qp); 909 } 910 write_sequnlock(lock); 911 } 912 913 if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) { 914 qp->s_hdrwords = 0; 915 if (qp->s_rdma_mr) { 916 rvt_put_mr(qp->s_rdma_mr); 917 qp->s_rdma_mr = NULL; 918 } 919 flush_tx_list(qp); 920 } 921 } 922 923 /** 924 * hfi1_qp_iter_cb - callback for iterator 925 * @qp - the qp 926 * @v - the sl in low bits of v 927 * 928 * This is called from the iterator callback to work 929 * on an individual qp. 930 */ 931 static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) 932 { 933 int lastwqe; 934 struct ib_event ev; 935 struct hfi1_ibport *ibp = 936 to_iport(qp->ibqp.device, qp->port_num); 937 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 938 u8 sl = (u8)v; 939 940 if (qp->port_num != ppd->port || 941 (qp->ibqp.qp_type != IB_QPT_UC && 942 qp->ibqp.qp_type != IB_QPT_RC) || 943 rdma_ah_get_sl(&qp->remote_ah_attr) != sl || 944 !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK)) 945 return; 946 947 spin_lock_irq(&qp->r_lock); 948 spin_lock(&qp->s_hlock); 949 spin_lock(&qp->s_lock); 950 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 951 spin_unlock(&qp->s_lock); 952 spin_unlock(&qp->s_hlock); 953 spin_unlock_irq(&qp->r_lock); 954 if (lastwqe) { 955 ev.device = qp->ibqp.device; 956 ev.element.qp = &qp->ibqp; 957 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 958 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 959 } 960 } 961 962 /** 963 * hfi1_error_port_qps - put a port's RC/UC qps into error state 964 * @ibp: the ibport. 965 * @sl: the service level. 966 * 967 * This function places all RC/UC qps with a given service level into error 968 * state. It is generally called to force upper lay apps to abandon stale qps 969 * after an sl->sc mapping change. 970 */ 971 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) 972 { 973 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 974 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; 975 976 rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb); 977 } 978