1 /* 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/err.h> 36 #include <linux/vmalloc.h> 37 #include <rdma/rdma_vt.h> 38 #ifdef CONFIG_DEBUG_FS 39 #include <linux/seq_file.h> 40 #endif 41 42 #include "qib.h" 43 44 /* 45 * mask field which was present in now deleted qib_qpn_table 46 * is not present in rvt_qpn_table. Defining the same field 47 * as qpt_mask here instead of adding the mask field to 48 * rvt_qpn_table. 49 */ 50 u16 qpt_mask; 51 52 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 53 struct rvt_qpn_map *map, unsigned off) 54 { 55 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 56 } 57 58 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt, 59 struct rvt_qpn_map *map, unsigned off, 60 unsigned n) 61 { 62 if (qpt_mask) { 63 off++; 64 if (((off & qpt_mask) >> 1) >= n) 65 off = (off | qpt_mask) + 2; 66 } else { 67 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off); 68 } 69 return off; 70 } 71 72 /* 73 * Convert the AETH credit code into the number of credits. 74 */ 75 static u32 credit_table[31] = { 76 0, /* 0 */ 77 1, /* 1 */ 78 2, /* 2 */ 79 3, /* 3 */ 80 4, /* 4 */ 81 6, /* 5 */ 82 8, /* 6 */ 83 12, /* 7 */ 84 16, /* 8 */ 85 24, /* 9 */ 86 32, /* A */ 87 48, /* B */ 88 64, /* C */ 89 96, /* D */ 90 128, /* E */ 91 192, /* F */ 92 256, /* 10 */ 93 384, /* 11 */ 94 512, /* 12 */ 95 768, /* 13 */ 96 1024, /* 14 */ 97 1536, /* 15 */ 98 2048, /* 16 */ 99 3072, /* 17 */ 100 4096, /* 18 */ 101 6144, /* 19 */ 102 8192, /* 1A */ 103 12288, /* 1B */ 104 16384, /* 1C */ 105 24576, /* 1D */ 106 32768 /* 1E */ 107 }; 108 109 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, 110 gfp_t gfp) 111 { 112 unsigned long page = get_zeroed_page(gfp); 113 114 /* 115 * Free the page if someone raced with us installing it. 116 */ 117 118 spin_lock(&qpt->lock); 119 if (map->page) 120 free_page(page); 121 else 122 map->page = (void *)page; 123 spin_unlock(&qpt->lock); 124 } 125 126 /* 127 * Allocate the next available QPN or 128 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. 129 */ 130 int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 131 enum ib_qp_type type, u8 port, gfp_t gfp) 132 { 133 u32 i, offset, max_scan, qpn; 134 struct rvt_qpn_map *map; 135 u32 ret; 136 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); 137 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, 138 verbs_dev); 139 140 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 141 unsigned n; 142 143 ret = type == IB_QPT_GSI; 144 n = 1 << (ret + 2 * (port - 1)); 145 spin_lock(&qpt->lock); 146 if (qpt->flags & n) 147 ret = -EINVAL; 148 else 149 qpt->flags |= n; 150 spin_unlock(&qpt->lock); 151 goto bail; 152 } 153 154 qpn = qpt->last + 2; 155 if (qpn >= RVT_QPN_MAX) 156 qpn = 2; 157 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues) 158 qpn = (qpn | qpt_mask) + 2; 159 offset = qpn & RVT_BITS_PER_PAGE_MASK; 160 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; 161 max_scan = qpt->nmaps - !offset; 162 for (i = 0;;) { 163 if (unlikely(!map->page)) { 164 get_map_page(qpt, map, gfp); 165 if (unlikely(!map->page)) 166 break; 167 } 168 do { 169 if (!test_and_set_bit(offset, map->page)) { 170 qpt->last = qpn; 171 ret = qpn; 172 goto bail; 173 } 174 offset = find_next_offset(qpt, map, offset, 175 dd->n_krcv_queues); 176 qpn = mk_qpn(qpt, map, offset); 177 /* 178 * This test differs from alloc_pidmap(). 179 * If find_next_offset() does find a zero 180 * bit, we don't need to check for QPN 181 * wrapping around past our starting QPN. 182 * We just need to be sure we don't loop 183 * forever. 184 */ 185 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); 186 /* 187 * In order to keep the number of pages allocated to a 188 * minimum, we scan the all existing pages before increasing 189 * the size of the bitmap table. 190 */ 191 if (++i > max_scan) { 192 if (qpt->nmaps == RVT_QPNMAP_ENTRIES) 193 break; 194 map = &qpt->map[qpt->nmaps++]; 195 offset = 0; 196 } else if (map < &qpt->map[qpt->nmaps]) { 197 ++map; 198 offset = 0; 199 } else { 200 map = &qpt->map[0]; 201 offset = 2; 202 } 203 qpn = mk_qpn(qpt, map, offset); 204 } 205 206 ret = -ENOMEM; 207 208 bail: 209 return ret; 210 } 211 212 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) 213 { 214 struct rvt_qpn_map *map; 215 216 map = qpt->map + qpn / RVT_BITS_PER_PAGE; 217 if (map->page) 218 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); 219 } 220 221 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) 222 { 223 return hash_32(qpn, dev->rdi.qp_dev->qp_table_bits); 224 } 225 226 227 /* 228 * Put the QP into the hash table. 229 * The hash table holds a reference to the QP. 230 */ 231 static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp) 232 { 233 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 234 unsigned long flags; 235 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); 236 237 atomic_inc(&qp->refcount); 238 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); 239 240 if (qp->ibqp.qp_num == 0) 241 rcu_assign_pointer(ibp->rvp.qp[0], qp); 242 else if (qp->ibqp.qp_num == 1) 243 rcu_assign_pointer(ibp->rvp.qp[1], qp); 244 else { 245 qp->next = dev->rdi.qp_dev->qp_table[n]; 246 rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp); 247 } 248 249 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); 250 } 251 252 /* 253 * Remove the QP from the table so it can't be found asynchronously by 254 * the receive interrupt routine. 255 */ 256 static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp) 257 { 258 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 259 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); 260 unsigned long flags; 261 int removed = 1; 262 spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */ 263 264 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); 265 266 qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock; 267 if (rcu_dereference_protected(ibp->rvp.qp[0], 268 lockdep_is_held(qpt_lock_ptr)) == qp) { 269 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); 270 } else if (rcu_dereference_protected(ibp->rvp.qp[1], 271 lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) { 272 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); 273 } else { 274 struct rvt_qp *q; 275 struct rvt_qp __rcu **qpp; 276 277 removed = 0; 278 qpp = &dev->rdi.qp_dev->qp_table[n]; 279 for (; (q = rcu_dereference_protected(*qpp, 280 lockdep_is_held(qpt_lock_ptr))) != NULL; 281 qpp = &q->next) 282 if (q == qp) { 283 RCU_INIT_POINTER(*qpp, 284 rcu_dereference_protected(qp->next, 285 lockdep_is_held(qpt_lock_ptr))); 286 removed = 1; 287 break; 288 } 289 } 290 291 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); 292 if (removed) { 293 synchronize_rcu(); 294 if (atomic_dec_and_test(&qp->refcount)) 295 wake_up(&qp->wait); 296 } 297 } 298 299 /** 300 * qib_free_all_qps - check for QPs still in use 301 */ 302 unsigned qib_free_all_qps(struct rvt_dev_info *rdi) 303 { 304 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); 305 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, 306 verbs_dev); 307 unsigned n, qp_inuse = 0; 308 309 for (n = 0; n < dd->num_pports; n++) { 310 struct qib_ibport *ibp = &dd->pport[n].ibport_data; 311 312 rcu_read_lock(); 313 if (rcu_dereference(ibp->rvp.qp[0])) 314 qp_inuse++; 315 if (rcu_dereference(ibp->rvp.qp[1])) 316 qp_inuse++; 317 rcu_read_unlock(); 318 } 319 return qp_inuse; 320 } 321 322 void notify_qp_reset(struct rvt_qp *qp) 323 { 324 struct qib_qp_priv *priv = qp->priv; 325 326 atomic_set(&priv->s_dma_busy, 0); 327 } 328 329 static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) 330 { 331 unsigned n; 332 333 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 334 qib_put_ss(&qp->s_rdma_read_sge); 335 336 qib_put_ss(&qp->r_sge); 337 338 if (clr_sends) { 339 while (qp->s_last != qp->s_head) { 340 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); 341 unsigned i; 342 343 for (i = 0; i < wqe->wr.num_sge; i++) { 344 struct rvt_sge *sge = &wqe->sg_list[i]; 345 346 rvt_put_mr(sge->mr); 347 } 348 if (qp->ibqp.qp_type == IB_QPT_UD || 349 qp->ibqp.qp_type == IB_QPT_SMI || 350 qp->ibqp.qp_type == IB_QPT_GSI) 351 atomic_dec( 352 &ibah_to_rvtah(wqe->ud_wr.ah)->refcount); 353 if (++qp->s_last >= qp->s_size) 354 qp->s_last = 0; 355 } 356 if (qp->s_rdma_mr) { 357 rvt_put_mr(qp->s_rdma_mr); 358 qp->s_rdma_mr = NULL; 359 } 360 } 361 362 if (qp->ibqp.qp_type != IB_QPT_RC) 363 return; 364 365 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 366 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 367 368 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 369 e->rdma_sge.mr) { 370 rvt_put_mr(e->rdma_sge.mr); 371 e->rdma_sge.mr = NULL; 372 } 373 } 374 } 375 376 /** 377 * qib_error_qp - put a QP into the error state 378 * @qp: the QP to put into the error state 379 * @err: the receive completion error to signal if a RWQE is active 380 * 381 * Flushes both send and receive work queues. 382 * Returns true if last WQE event should be generated. 383 * The QP r_lock and s_lock should be held and interrupts disabled. 384 * If we are already in error state, just return. 385 */ 386 int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 387 { 388 struct qib_qp_priv *priv = qp->priv; 389 struct qib_ibdev *dev = to_idev(qp->ibqp.device); 390 struct ib_wc wc; 391 int ret = 0; 392 393 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 394 goto bail; 395 396 qp->state = IB_QPS_ERR; 397 398 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 399 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 400 del_timer(&qp->s_timer); 401 } 402 403 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) 404 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; 405 406 spin_lock(&dev->rdi.pending_lock); 407 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) { 408 qp->s_flags &= ~RVT_S_ANY_WAIT_IO; 409 list_del_init(&priv->iowait); 410 } 411 spin_unlock(&dev->rdi.pending_lock); 412 413 if (!(qp->s_flags & RVT_S_BUSY)) { 414 qp->s_hdrwords = 0; 415 if (qp->s_rdma_mr) { 416 rvt_put_mr(qp->s_rdma_mr); 417 qp->s_rdma_mr = NULL; 418 } 419 if (priv->s_tx) { 420 qib_put_txreq(priv->s_tx); 421 priv->s_tx = NULL; 422 } 423 } 424 425 /* Schedule the sending tasklet to drain the send work queue. */ 426 if (qp->s_last != qp->s_head) 427 qib_schedule_send(qp); 428 429 clear_mr_refs(qp, 0); 430 431 memset(&wc, 0, sizeof(wc)); 432 wc.qp = &qp->ibqp; 433 wc.opcode = IB_WC_RECV; 434 435 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 436 wc.wr_id = qp->r_wr_id; 437 wc.status = err; 438 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 439 } 440 wc.status = IB_WC_WR_FLUSH_ERR; 441 442 if (qp->r_rq.wq) { 443 struct rvt_rwq *wq; 444 u32 head; 445 u32 tail; 446 447 spin_lock(&qp->r_rq.lock); 448 449 /* sanity check pointers before trusting them */ 450 wq = qp->r_rq.wq; 451 head = wq->head; 452 if (head >= qp->r_rq.size) 453 head = 0; 454 tail = wq->tail; 455 if (tail >= qp->r_rq.size) 456 tail = 0; 457 while (tail != head) { 458 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 459 if (++tail >= qp->r_rq.size) 460 tail = 0; 461 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); 462 } 463 wq->tail = tail; 464 465 spin_unlock(&qp->r_rq.lock); 466 } else if (qp->ibqp.event_handler) 467 ret = 1; 468 469 bail: 470 return ret; 471 } 472 473 /** 474 * qib_modify_qp - modify the attributes of a queue pair 475 * @ibqp: the queue pair who's attributes we're modifying 476 * @attr: the new attributes 477 * @attr_mask: the mask of attributes to modify 478 * @udata: user data for libibverbs.so 479 * 480 * Returns 0 on success, otherwise returns an errno. 481 */ 482 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 483 int attr_mask, struct ib_udata *udata) 484 { 485 struct qib_ibdev *dev = to_idev(ibqp->device); 486 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 487 struct qib_qp_priv *priv = qp->priv; 488 enum ib_qp_state cur_state, new_state; 489 struct ib_event ev; 490 int lastwqe = 0; 491 int mig = 0; 492 int ret; 493 u32 pmtu = 0; /* for gcc warning only */ 494 495 spin_lock_irq(&qp->r_lock); 496 spin_lock(&qp->s_lock); 497 498 cur_state = attr_mask & IB_QP_CUR_STATE ? 499 attr->cur_qp_state : qp->state; 500 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 501 502 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 503 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) 504 goto inval; 505 506 if (attr_mask & IB_QP_AV) { 507 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 508 goto inval; 509 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 510 goto inval; 511 } 512 513 if (attr_mask & IB_QP_ALT_PATH) { 514 if (attr->alt_ah_attr.dlid >= 515 be16_to_cpu(IB_MULTICAST_LID_BASE)) 516 goto inval; 517 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 518 goto inval; 519 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev))) 520 goto inval; 521 } 522 523 if (attr_mask & IB_QP_PKEY_INDEX) 524 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev))) 525 goto inval; 526 527 if (attr_mask & IB_QP_MIN_RNR_TIMER) 528 if (attr->min_rnr_timer > 31) 529 goto inval; 530 531 if (attr_mask & IB_QP_PORT) 532 if (qp->ibqp.qp_type == IB_QPT_SMI || 533 qp->ibqp.qp_type == IB_QPT_GSI || 534 attr->port_num == 0 || 535 attr->port_num > ibqp->device->phys_port_cnt) 536 goto inval; 537 538 if (attr_mask & IB_QP_DEST_QPN) 539 if (attr->dest_qp_num > QIB_QPN_MASK) 540 goto inval; 541 542 if (attr_mask & IB_QP_RETRY_CNT) 543 if (attr->retry_cnt > 7) 544 goto inval; 545 546 if (attr_mask & IB_QP_RNR_RETRY) 547 if (attr->rnr_retry > 7) 548 goto inval; 549 550 /* 551 * Don't allow invalid path_mtu values. OK to set greater 552 * than the active mtu (or even the max_cap, if we have tuned 553 * that to a small mtu. We'll set qp->path_mtu 554 * to the lesser of requested attribute mtu and active, 555 * for packetizing messages. 556 * Note that the QP port has to be set in INIT and MTU in RTR. 557 */ 558 if (attr_mask & IB_QP_PATH_MTU) { 559 struct qib_devdata *dd = dd_from_dev(dev); 560 int mtu, pidx = qp->port_num - 1; 561 562 mtu = ib_mtu_enum_to_int(attr->path_mtu); 563 if (mtu == -1) 564 goto inval; 565 if (mtu > dd->pport[pidx].ibmtu) { 566 switch (dd->pport[pidx].ibmtu) { 567 case 4096: 568 pmtu = IB_MTU_4096; 569 break; 570 case 2048: 571 pmtu = IB_MTU_2048; 572 break; 573 case 1024: 574 pmtu = IB_MTU_1024; 575 break; 576 case 512: 577 pmtu = IB_MTU_512; 578 break; 579 case 256: 580 pmtu = IB_MTU_256; 581 break; 582 default: 583 pmtu = IB_MTU_2048; 584 } 585 } else 586 pmtu = attr->path_mtu; 587 } 588 589 if (attr_mask & IB_QP_PATH_MIG_STATE) { 590 if (attr->path_mig_state == IB_MIG_REARM) { 591 if (qp->s_mig_state == IB_MIG_ARMED) 592 goto inval; 593 if (new_state != IB_QPS_RTS) 594 goto inval; 595 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 596 if (qp->s_mig_state == IB_MIG_REARM) 597 goto inval; 598 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 599 goto inval; 600 if (qp->s_mig_state == IB_MIG_ARMED) 601 mig = 1; 602 } else 603 goto inval; 604 } 605 606 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 607 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC) 608 goto inval; 609 610 switch (new_state) { 611 case IB_QPS_RESET: 612 if (qp->state != IB_QPS_RESET) { 613 qp->state = IB_QPS_RESET; 614 spin_lock(&dev->rdi.pending_lock); 615 if (!list_empty(&priv->iowait)) 616 list_del_init(&priv->iowait); 617 spin_unlock(&dev->rdi.pending_lock); 618 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 619 spin_unlock(&qp->s_lock); 620 spin_unlock_irq(&qp->r_lock); 621 /* Stop the sending work queue and retry timer */ 622 cancel_work_sync(&priv->s_work); 623 del_timer_sync(&qp->s_timer); 624 wait_event(priv->wait_dma, 625 !atomic_read(&priv->s_dma_busy)); 626 if (priv->s_tx) { 627 qib_put_txreq(priv->s_tx); 628 priv->s_tx = NULL; 629 } 630 remove_qp(dev, qp); 631 wait_event(qp->wait, !atomic_read(&qp->refcount)); 632 spin_lock_irq(&qp->r_lock); 633 spin_lock(&qp->s_lock); 634 clear_mr_refs(qp, 1); 635 rvt_reset_qp(&dev->rdi, qp, ibqp->qp_type); 636 } 637 break; 638 639 case IB_QPS_RTR: 640 /* Allow event to retrigger if QP set to RTR more than once */ 641 qp->r_flags &= ~RVT_R_COMM_EST; 642 qp->state = new_state; 643 break; 644 645 case IB_QPS_SQD: 646 qp->s_draining = qp->s_last != qp->s_cur; 647 qp->state = new_state; 648 break; 649 650 case IB_QPS_SQE: 651 if (qp->ibqp.qp_type == IB_QPT_RC) 652 goto inval; 653 qp->state = new_state; 654 break; 655 656 case IB_QPS_ERR: 657 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); 658 break; 659 660 default: 661 qp->state = new_state; 662 break; 663 } 664 665 if (attr_mask & IB_QP_PKEY_INDEX) 666 qp->s_pkey_index = attr->pkey_index; 667 668 if (attr_mask & IB_QP_PORT) 669 qp->port_num = attr->port_num; 670 671 if (attr_mask & IB_QP_DEST_QPN) 672 qp->remote_qpn = attr->dest_qp_num; 673 674 if (attr_mask & IB_QP_SQ_PSN) { 675 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; 676 qp->s_psn = qp->s_next_psn; 677 qp->s_sending_psn = qp->s_next_psn; 678 qp->s_last_psn = qp->s_next_psn - 1; 679 qp->s_sending_hpsn = qp->s_last_psn; 680 } 681 682 if (attr_mask & IB_QP_RQ_PSN) 683 qp->r_psn = attr->rq_psn & QIB_PSN_MASK; 684 685 if (attr_mask & IB_QP_ACCESS_FLAGS) 686 qp->qp_access_flags = attr->qp_access_flags; 687 688 if (attr_mask & IB_QP_AV) { 689 qp->remote_ah_attr = attr->ah_attr; 690 qp->s_srate = attr->ah_attr.static_rate; 691 } 692 693 if (attr_mask & IB_QP_ALT_PATH) { 694 qp->alt_ah_attr = attr->alt_ah_attr; 695 qp->s_alt_pkey_index = attr->alt_pkey_index; 696 } 697 698 if (attr_mask & IB_QP_PATH_MIG_STATE) { 699 qp->s_mig_state = attr->path_mig_state; 700 if (mig) { 701 qp->remote_ah_attr = qp->alt_ah_attr; 702 qp->port_num = qp->alt_ah_attr.port_num; 703 qp->s_pkey_index = qp->s_alt_pkey_index; 704 } 705 } 706 707 if (attr_mask & IB_QP_PATH_MTU) { 708 qp->path_mtu = pmtu; 709 qp->pmtu = ib_mtu_enum_to_int(pmtu); 710 } 711 712 if (attr_mask & IB_QP_RETRY_CNT) { 713 qp->s_retry_cnt = attr->retry_cnt; 714 qp->s_retry = attr->retry_cnt; 715 } 716 717 if (attr_mask & IB_QP_RNR_RETRY) { 718 qp->s_rnr_retry_cnt = attr->rnr_retry; 719 qp->s_rnr_retry = attr->rnr_retry; 720 } 721 722 if (attr_mask & IB_QP_MIN_RNR_TIMER) 723 qp->r_min_rnr_timer = attr->min_rnr_timer; 724 725 if (attr_mask & IB_QP_TIMEOUT) { 726 qp->timeout = attr->timeout; 727 qp->timeout_jiffies = 728 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 729 1000UL); 730 } 731 732 if (attr_mask & IB_QP_QKEY) 733 qp->qkey = attr->qkey; 734 735 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 736 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 737 738 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 739 qp->s_max_rd_atomic = attr->max_rd_atomic; 740 741 spin_unlock(&qp->s_lock); 742 spin_unlock_irq(&qp->r_lock); 743 744 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 745 insert_qp(dev, qp); 746 747 if (lastwqe) { 748 ev.device = qp->ibqp.device; 749 ev.element.qp = &qp->ibqp; 750 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 751 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 752 } 753 if (mig) { 754 ev.device = qp->ibqp.device; 755 ev.element.qp = &qp->ibqp; 756 ev.event = IB_EVENT_PATH_MIG; 757 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 758 } 759 ret = 0; 760 goto bail; 761 762 inval: 763 spin_unlock(&qp->s_lock); 764 spin_unlock_irq(&qp->r_lock); 765 ret = -EINVAL; 766 767 bail: 768 return ret; 769 } 770 771 /** 772 * qib_compute_aeth - compute the AETH (syndrome + MSN) 773 * @qp: the queue pair to compute the AETH for 774 * 775 * Returns the AETH. 776 */ 777 __be32 qib_compute_aeth(struct rvt_qp *qp) 778 { 779 u32 aeth = qp->r_msn & QIB_MSN_MASK; 780 781 if (qp->ibqp.srq) { 782 /* 783 * Shared receive queues don't generate credits. 784 * Set the credit field to the invalid value. 785 */ 786 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT; 787 } else { 788 u32 min, max, x; 789 u32 credits; 790 struct rvt_rwq *wq = qp->r_rq.wq; 791 u32 head; 792 u32 tail; 793 794 /* sanity check pointers before trusting them */ 795 head = wq->head; 796 if (head >= qp->r_rq.size) 797 head = 0; 798 tail = wq->tail; 799 if (tail >= qp->r_rq.size) 800 tail = 0; 801 /* 802 * Compute the number of credits available (RWQEs). 803 * XXX Not holding the r_rq.lock here so there is a small 804 * chance that the pair of reads are not atomic. 805 */ 806 credits = head - tail; 807 if ((int)credits < 0) 808 credits += qp->r_rq.size; 809 /* 810 * Binary search the credit table to find the code to 811 * use. 812 */ 813 min = 0; 814 max = 31; 815 for (;;) { 816 x = (min + max) / 2; 817 if (credit_table[x] == credits) 818 break; 819 if (credit_table[x] > credits) 820 max = x; 821 else if (min == x) 822 break; 823 else 824 min = x; 825 } 826 aeth |= x << QIB_AETH_CREDIT_SHIFT; 827 } 828 return cpu_to_be32(aeth); 829 } 830 831 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) 832 { 833 struct qib_qp_priv *priv; 834 835 priv = kzalloc(sizeof(*priv), gfp); 836 if (!priv) 837 return ERR_PTR(-ENOMEM); 838 priv->owner = qp; 839 840 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); 841 if (!priv->s_hdr) { 842 kfree(priv); 843 return ERR_PTR(-ENOMEM); 844 } 845 init_waitqueue_head(&priv->wait_dma); 846 INIT_WORK(&priv->s_work, _qib_do_send); 847 INIT_LIST_HEAD(&priv->iowait); 848 849 return priv; 850 } 851 852 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) 853 { 854 struct qib_qp_priv *priv = qp->priv; 855 856 kfree(priv->s_hdr); 857 kfree(priv); 858 } 859 860 /** 861 * qib_destroy_qp - destroy a queue pair 862 * @ibqp: the queue pair to destroy 863 * 864 * Returns 0 on success. 865 * 866 * Note that this can be called while the QP is actively sending or 867 * receiving! 868 */ 869 int qib_destroy_qp(struct ib_qp *ibqp) 870 { 871 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 872 struct qib_ibdev *dev = to_idev(ibqp->device); 873 struct qib_qp_priv *priv = qp->priv; 874 875 /* Make sure HW and driver activity is stopped. */ 876 spin_lock_irq(&qp->s_lock); 877 if (qp->state != IB_QPS_RESET) { 878 qp->state = IB_QPS_RESET; 879 spin_lock(&dev->rdi.pending_lock); 880 if (!list_empty(&priv->iowait)) 881 list_del_init(&priv->iowait); 882 spin_unlock(&dev->rdi.pending_lock); 883 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); 884 spin_unlock_irq(&qp->s_lock); 885 cancel_work_sync(&priv->s_work); 886 del_timer_sync(&qp->s_timer); 887 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy)); 888 if (priv->s_tx) { 889 qib_put_txreq(priv->s_tx); 890 priv->s_tx = NULL; 891 } 892 remove_qp(dev, qp); 893 wait_event(qp->wait, !atomic_read(&qp->refcount)); 894 clear_mr_refs(qp, 1); 895 } else 896 spin_unlock_irq(&qp->s_lock); 897 898 /* all user's cleaned up, mark it available */ 899 free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); 900 spin_lock(&dev->n_qps_lock); 901 dev->n_qps_allocated--; 902 spin_unlock(&dev->n_qps_lock); 903 904 if (qp->ip) 905 kref_put(&qp->ip->ref, rvt_release_mmap_info); 906 else 907 vfree(qp->r_rq.wq); 908 vfree(qp->s_wq); 909 kfree(priv->s_hdr); 910 kfree(priv); 911 kfree(qp); 912 return 0; 913 } 914 915 /** 916 * qib_get_credit - flush the send work queue of a QP 917 * @qp: the qp who's send work queue to flush 918 * @aeth: the Acknowledge Extended Transport Header 919 * 920 * The QP s_lock should be held. 921 */ 922 void qib_get_credit(struct rvt_qp *qp, u32 aeth) 923 { 924 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; 925 926 /* 927 * If the credit is invalid, we can send 928 * as many packets as we like. Otherwise, we have to 929 * honor the credit field. 930 */ 931 if (credit == QIB_AETH_CREDIT_INVAL) { 932 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { 933 qp->s_flags |= RVT_S_UNLIMITED_CREDIT; 934 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { 935 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; 936 qib_schedule_send(qp); 937 } 938 } 939 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { 940 /* Compute new LSN (i.e., MSN + credit) */ 941 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; 942 if (qib_cmp24(credit, qp->s_lsn) > 0) { 943 qp->s_lsn = credit; 944 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { 945 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; 946 qib_schedule_send(qp); 947 } 948 } 949 } 950 } 951 952 #ifdef CONFIG_DEBUG_FS 953 954 struct qib_qp_iter { 955 struct qib_ibdev *dev; 956 struct rvt_qp *qp; 957 int n; 958 }; 959 960 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) 961 { 962 struct qib_qp_iter *iter; 963 964 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 965 if (!iter) 966 return NULL; 967 968 iter->dev = dev; 969 if (qib_qp_iter_next(iter)) { 970 kfree(iter); 971 return NULL; 972 } 973 974 return iter; 975 } 976 977 int qib_qp_iter_next(struct qib_qp_iter *iter) 978 { 979 struct qib_ibdev *dev = iter->dev; 980 int n = iter->n; 981 int ret = 1; 982 struct rvt_qp *pqp = iter->qp; 983 struct rvt_qp *qp; 984 985 for (; n < dev->rdi.qp_dev->qp_table_size; n++) { 986 if (pqp) 987 qp = rcu_dereference(pqp->next); 988 else 989 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); 990 pqp = qp; 991 if (qp) { 992 iter->qp = qp; 993 iter->n = n; 994 return 0; 995 } 996 } 997 return ret; 998 } 999 1000 static const char * const qp_type_str[] = { 1001 "SMI", "GSI", "RC", "UC", "UD", 1002 }; 1003 1004 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) 1005 { 1006 struct rvt_swqe *wqe; 1007 struct rvt_qp *qp = iter->qp; 1008 struct qib_qp_priv *priv = qp->priv; 1009 1010 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 1011 seq_printf(s, 1012 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n", 1013 iter->n, 1014 qp->ibqp.qp_num, 1015 qp_type_str[qp->ibqp.qp_type], 1016 qp->state, 1017 wqe->wr.opcode, 1018 qp->s_hdrwords, 1019 qp->s_flags, 1020 atomic_read(&priv->s_dma_busy), 1021 !list_empty(&priv->iowait), 1022 qp->timeout, 1023 wqe->ssn, 1024 qp->s_lsn, 1025 qp->s_last_psn, 1026 qp->s_psn, qp->s_next_psn, 1027 qp->s_sending_psn, qp->s_sending_hpsn, 1028 qp->s_last, qp->s_acked, qp->s_cur, 1029 qp->s_tail, qp->s_head, qp->s_size, 1030 qp->remote_qpn, 1031 qp->remote_ah_attr.dlid); 1032 } 1033 1034 #endif 1035