1 /* 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/err.h> 36 #include <linux/vmalloc.h> 37 #include <linux/jhash.h> 38 #include <rdma/rdma_vt.h> 39 #ifdef CONFIG_DEBUG_FS 40 #include <linux/seq_file.h> 41 #endif 42 43 #include "qib.h" 44 45 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 46 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 47 48 static inline unsigned mk_qpn(struct qib_qpn_table *qpt, 49 struct qpn_map *map, unsigned off) 50 { 51 return (map - qpt->map) * BITS_PER_PAGE + off; 52 } 53 54 static inline unsigned find_next_offset(struct qib_qpn_table *qpt, 55 struct qpn_map *map, unsigned off, 56 unsigned n) 57 { 58 if (qpt->mask) { 59 off++; 60 if (((off & qpt->mask) >> 1) >= n) 61 off = (off | qpt->mask) + 2; 62 } else 63 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); 64 return off; 65 } 66 67 /* 68 * Convert the AETH credit code into the number of credits. 69 */ 70 static u32 credit_table[31] = { 71 0, /* 0 */ 72 1, /* 1 */ 73 2, /* 2 */ 74 3, /* 3 */ 75 4, /* 4 */ 76 6, /* 5 */ 77 8, /* 6 */ 78 12, /* 7 */ 79 16, /* 8 */ 80 24, /* 9 */ 81 32, /* A */ 82 48, /* B */ 83 64, /* C */ 84 96, /* D */ 85 128, /* E */ 86 192, /* F */ 87 256, /* 10 */ 88 384, /* 11 */ 89 512, /* 12 */ 90 768, /* 13 */ 91 1024, /* 14 */ 92 1536, /* 15 */ 93 2048, /* 16 */ 94 3072, /* 17 */ 95 4096, /* 18 */ 96 6144, /* 19 */ 97 8192, /* 1A */ 98 12288, /* 1B */ 99 16384, /* 1C */ 100 24576, /* 1D */ 101 32768 /* 1E */ 102 }; 103 104 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map, 105 gfp_t gfp) 106 { 107 unsigned long page = get_zeroed_page(gfp); 108 109 /* 110 * Free the page if someone raced with us installing it. 111 */ 112 113 spin_lock(&qpt->lock); 114 if (map->page) 115 free_page(page); 116 else 117 map->page = (void *)page; 118 spin_unlock(&qpt->lock); 119 } 120 121 /* 122 * Allocate the next available QPN or 123 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. 124 */ 125 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, 126 enum ib_qp_type type, u8 port, gfp_t gfp) 127 { 128 u32 i, offset, max_scan, qpn; 129 struct qpn_map *map; 130 u32 ret; 131 132 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 133 unsigned n; 134 135 ret = type == IB_QPT_GSI; 136 n = 1 << (ret + 2 * (port - 1)); 137 spin_lock(&qpt->lock); 138 if (qpt->flags & n) 139 ret = -EINVAL; 140 else 141 qpt->flags |= n; 142 spin_unlock(&qpt->lock); 143 goto bail; 144 } 145 146 qpn = qpt->last + 2; 147 if (qpn >= QPN_MAX) 148 qpn = 2; 149 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) 150 qpn = (qpn | qpt->mask) + 2; 151 offset = qpn & BITS_PER_PAGE_MASK; 152 map = &qpt->map[qpn / BITS_PER_PAGE]; 153 max_scan = qpt->nmaps - !offset; 154 for (i = 0;;) { 155 if (unlikely(!map->page)) { 156 get_map_page(qpt, map, gfp); 157 if (unlikely(!map->page)) 158 break; 159 } 160 do { 161 if (!test_and_set_bit(offset, map->page)) { 162 qpt->last = qpn; 163 ret = qpn; 164 goto bail; 165 } 166 offset = find_next_offset(qpt, map, offset, 167 dd->n_krcv_queues); 168 qpn = mk_qpn(qpt, map, offset); 169 /* 170 * This test differs from alloc_pidmap(). 171 * If find_next_offset() does find a zero 172 * bit, we don't need to check for QPN 173 * wrapping around past our starting QPN. 174 * We just need to be sure we don't loop 175 * forever. 176 */ 177 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); 178 /* 179 * In order to keep the number of pages allocated to a 180 * minimum, we scan the all existing pages before increasing 181 * the size of the bitmap table. 182 */ 183 if (++i > max_scan) { 184 if (qpt->nmaps == QPNMAP_ENTRIES) 185 break; 186 map = &qpt->map[qpt->nmaps++]; 187 offset = 0; 188 } else if (map < &qpt->map[qpt->nmaps]) { 189 ++map; 190 offset = 0; 191 } else { 192 map = &qpt->map[0]; 193 offset = 2; 194 } 195 qpn = mk_qpn(qpt, map, offset); 196 } 197 198 ret = -ENOMEM; 199 200 bail: 201 return ret; 202 } 203 204 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) 205 { 206 struct qpn_map *map; 207 208 map = qpt->map + qpn / BITS_PER_PAGE; 209 if (map->page) 210 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 211 } 212 213 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) 214 { 215 return jhash_1word(qpn, dev->qp_rnd) & 216 (dev->qp_table_size - 1); 217 } 218 219 220 /* 221 * Put the QP into the hash table. 222 * The hash table holds a reference to the QP. 223 */ 224 static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp) 225 { 226 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 227 unsigned long flags; 228 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); 229 230 atomic_inc(&qp->refcount); 231 spin_lock_irqsave(&dev->qpt_lock, flags); 232 233 if (qp->ibqp.qp_num == 0) 234 rcu_assign_pointer(ibp->rvp.qp[0], qp); 235 else if (qp->ibqp.qp_num == 1) 236 rcu_assign_pointer(ibp->rvp.qp[1], qp); 237 else { 238 qp->next = dev->qp_table[n]; 239 rcu_assign_pointer(dev->qp_table[n], qp); 240 } 241 242 spin_unlock_irqrestore(&dev->qpt_lock, flags); 243 } 244 245 /* 246 * Remove the QP from the table so it can't be found asynchronously by 247 * the receive interrupt routine. 248 */ 249 static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp) 250 { 251 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 252 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); 253 unsigned long flags; 254 int removed = 1; 255 256 spin_lock_irqsave(&dev->qpt_lock, flags); 257 258 if (rcu_dereference_protected(ibp->rvp.qp[0], 259 lockdep_is_held(&dev->qpt_lock)) == qp) { 260 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); 261 } else if (rcu_dereference_protected(ibp->rvp.qp[1], 262 lockdep_is_held(&dev->qpt_lock)) == qp) { 263 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); 264 } else { 265 struct rvt_qp *q; 266 struct rvt_qp __rcu **qpp; 267 268 removed = 0; 269 qpp = &dev->qp_table[n]; 270 for (; (q = rcu_dereference_protected(*qpp, 271 lockdep_is_held(&dev->qpt_lock))) != NULL; 272 qpp = &q->next) 273 if (q == qp) { 274 RCU_INIT_POINTER(*qpp, 275 rcu_dereference_protected(qp->next, 276 lockdep_is_held(&dev->qpt_lock))); 277 removed = 1; 278 break; 279 } 280 } 281 282 spin_unlock_irqrestore(&dev->qpt_lock, flags); 283 if (removed) { 284 synchronize_rcu(); 285 atomic_dec(&qp->refcount); 286 } 287 } 288 289 /** 290 * qib_free_all_qps - check for QPs still in use 291 * @qpt: the QP table to empty 292 * 293 * There should not be any QPs still in use. 294 * Free memory for table. 295 */ 296 unsigned qib_free_all_qps(struct qib_devdata *dd) 297 { 298 struct qib_ibdev *dev = &dd->verbs_dev; 299 unsigned long flags; 300 struct rvt_qp *qp; 301 unsigned n, qp_inuse = 0; 302 303 for (n = 0; n < dd->num_pports; n++) { 304 struct qib_ibport *ibp = &dd->pport[n].ibport_data; 305 306 if (!qib_mcast_tree_empty(ibp)) 307 qp_inuse++; 308 rcu_read_lock(); 309 if (rcu_dereference(ibp->rvp.qp[0])) 310 qp_inuse++; 311 if (rcu_dereference(ibp->rvp.qp[1])) 312 qp_inuse++; 313 rcu_read_unlock(); 314 } 315 316 spin_lock_irqsave(&dev->qpt_lock, flags); 317 for (n = 0; n < dev->qp_table_size; n++) { 318 qp = rcu_dereference_protected(dev->qp_table[n], 319 lockdep_is_held(&dev->qpt_lock)); 320 RCU_INIT_POINTER(dev->qp_table[n], NULL); 321 322 for (; qp; qp = rcu_dereference_protected(qp->next, 323 lockdep_is_held(&dev->qpt_lock))) 324 qp_inuse++; 325 } 326 spin_unlock_irqrestore(&dev->qpt_lock, flags); 327 synchronize_rcu(); 328 329 return qp_inuse; 330 } 331 332 /** 333 * qib_lookup_qpn - return the QP with the given QPN 334 * @qpt: the QP table 335 * @qpn: the QP number to look up 336 * 337 * The caller is responsible for decrementing the QP reference count 338 * when done. 339 */ 340 struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) 341 { 342 struct rvt_qp *qp = NULL; 343 344 rcu_read_lock(); 345 if (unlikely(qpn <= 1)) { 346 if (qpn == 0) 347 qp = rcu_dereference(ibp->rvp.qp[0]); 348 else 349 qp = rcu_dereference(ibp->rvp.qp[1]); 350 if (qp) 351 atomic_inc(&qp->refcount); 352 } else { 353 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; 354 unsigned n = qpn_hash(dev, qpn); 355 356 for (qp = rcu_dereference(dev->qp_table[n]); qp; 357 qp = rcu_dereference(qp->next)) 358 if (qp->ibqp.qp_num == qpn) { 359 atomic_inc(&qp->refcount); 360 break; 361 } 362 } 363 rcu_read_unlock(); 364 return qp; 365 } 366 367 /** 368 * qib_reset_qp - initialize the QP state to the reset state 369 * @qp: the QP to reset 370 * @type: the QP type 371 */ 372 static void qib_reset_qp(struct rvt_qp *qp, enum ib_qp_type type) 373 { 374 struct qib_qp_priv *priv = qp->priv; 375 qp->remote_qpn = 0; 376 qp->qkey = 0; 377 qp->qp_access_flags = 0; 378 atomic_set(&priv->s_dma_busy, 0); 379 qp->s_flags &= QIB_S_SIGNAL_REQ_WR; 380 qp->s_hdrwords = 0; 381 qp->s_wqe = NULL; 382 qp->s_draining = 0; 383 qp->s_next_psn = 0; 384 qp->s_last_psn = 0; 385 qp->s_sending_psn = 0; 386 qp->s_sending_hpsn = 0; 387 qp->s_psn = 0; 388 qp->r_psn = 0; 389 qp->r_msn = 0; 390 if (type == IB_QPT_RC) { 391 qp->s_state = IB_OPCODE_RC_SEND_LAST; 392 qp->r_state = IB_OPCODE_RC_SEND_LAST; 393 } else { 394 qp->s_state = IB_OPCODE_UC_SEND_LAST; 395 qp->r_state = IB_OPCODE_UC_SEND_LAST; 396 } 397 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 398 qp->r_nak_state = 0; 399 qp->r_aflags = 0; 400 qp->r_flags = 0; 401 qp->s_head = 0; 402 qp->s_tail = 0; 403 qp->s_cur = 0; 404 qp->s_acked = 0; 405 qp->s_last = 0; 406 qp->s_ssn = 1; 407 qp->s_lsn = 0; 408 qp->s_mig_state = IB_MIG_MIGRATED; 409 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 410 qp->r_head_ack_queue = 0; 411 qp->s_tail_ack_queue = 0; 412 qp->s_num_rd_atomic = 0; 413 if (qp->r_rq.wq) { 414 qp->r_rq.wq->head = 0; 415 qp->r_rq.wq->tail = 0; 416 } 417 qp->r_sge.num_sge = 0; 418 } 419 420 static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) 421 { 422 unsigned n; 423 424 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) 425 qib_put_ss(&qp->s_rdma_read_sge); 426 427 qib_put_ss(&qp->r_sge); 428 429 if (clr_sends) { 430 while (qp->s_last != qp->s_head) { 431 struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 432 unsigned i; 433 434 for (i = 0; i < wqe->wr.num_sge; i++) { 435 struct rvt_sge *sge = &wqe->sg_list[i]; 436 437 rvt_put_mr(sge->mr); 438 } 439 if (qp->ibqp.qp_type == IB_QPT_UD || 440 qp->ibqp.qp_type == IB_QPT_SMI || 441 qp->ibqp.qp_type == IB_QPT_GSI) 442 atomic_dec( 443 &ibah_to_rvtah(wqe->ud_wr.ah)->refcount); 444 if (++qp->s_last >= qp->s_size) 445 qp->s_last = 0; 446 } 447 if (qp->s_rdma_mr) { 448 rvt_put_mr(qp->s_rdma_mr); 449 qp->s_rdma_mr = NULL; 450 } 451 } 452 453 if (qp->ibqp.qp_type != IB_QPT_RC) 454 return; 455 456 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 457 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 458 459 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 460 e->rdma_sge.mr) { 461 rvt_put_mr(e->rdma_sge.mr); 462 e->rdma_sge.mr = NULL; 463 } 464 } 465 } 466 467 /** 468 * qib_error_qp - put a QP into the error state 469 * @qp: the QP to put into the error state 470 * @err: the receive completion error to signal if a RWQE is active 471 * 472 * Flushes both send and receive work queues. 473 * Returns true if last WQE event should be generated. 474 * The QP r_lock and s_lock should be held and interrupts disabled. 475 * If we are already in error state, just return. 476 */ 477 int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err) 478 { 479 struct qib_qp_priv *priv = qp->priv; 480 struct qib_ibdev *dev = to_idev(qp->ibqp.device); 481 struct ib_wc wc; 482 int ret = 0; 483 484 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 485 goto bail; 486 487 qp->state = IB_QPS_ERR; 488 489 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { 490 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); 491 del_timer(&qp->s_timer); 492 } 493 494 if (qp->s_flags & QIB_S_ANY_WAIT_SEND) 495 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; 496 497 spin_lock(&dev->pending_lock); 498 if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) { 499 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; 500 list_del_init(&priv->iowait); 501 } 502 spin_unlock(&dev->pending_lock); 503 504 if (!(qp->s_flags & QIB_S_BUSY)) { 505 qp->s_hdrwords = 0; 506 if (qp->s_rdma_mr) { 507 rvt_put_mr(qp->s_rdma_mr); 508 qp->s_rdma_mr = NULL; 509 } 510 if (priv->s_tx) { 511 qib_put_txreq(priv->s_tx); 512 priv->s_tx = NULL; 513 } 514 } 515 516 /* Schedule the sending tasklet to drain the send work queue. */ 517 if (qp->s_last != qp->s_head) 518 qib_schedule_send(qp); 519 520 clear_mr_refs(qp, 0); 521 522 memset(&wc, 0, sizeof(wc)); 523 wc.qp = &qp->ibqp; 524 wc.opcode = IB_WC_RECV; 525 526 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { 527 wc.wr_id = qp->r_wr_id; 528 wc.status = err; 529 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 530 } 531 wc.status = IB_WC_WR_FLUSH_ERR; 532 533 if (qp->r_rq.wq) { 534 struct rvt_rwq *wq; 535 u32 head; 536 u32 tail; 537 538 spin_lock(&qp->r_rq.lock); 539 540 /* sanity check pointers before trusting them */ 541 wq = qp->r_rq.wq; 542 head = wq->head; 543 if (head >= qp->r_rq.size) 544 head = 0; 545 tail = wq->tail; 546 if (tail >= qp->r_rq.size) 547 tail = 0; 548 while (tail != head) { 549 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 550 if (++tail >= qp->r_rq.size) 551 tail = 0; 552 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 553 } 554 wq->tail = tail; 555 556 spin_unlock(&qp->r_rq.lock); 557 } else if (qp->ibqp.event_handler) 558 ret = 1; 559 560 bail: 561 return ret; 562 } 563 564 /** 565 * qib_modify_qp - modify the attributes of a queue pair 566 * @ibqp: the queue pair who's attributes we're modifying 567 * @attr: the new attributes 568 * @attr_mask: the mask of attributes to modify 569 * @udata: user data for libibverbs.so 570 * 571 * Returns 0 on success, otherwise returns an errno. 572 */ 573 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 574 int attr_mask, struct ib_udata *udata) 575 { 576 struct qib_ibdev *dev = to_idev(ibqp->device); 577 struct rvt_qp *qp = to_iqp(ibqp); 578 struct qib_qp_priv *priv = qp->priv; 579 enum ib_qp_state cur_state, new_state; 580 struct ib_event ev; 581 int lastwqe = 0; 582 int mig = 0; 583 int ret; 584 u32 pmtu = 0; /* for gcc warning only */ 585 586 spin_lock_irq(&qp->r_lock); 587 spin_lock(&qp->s_lock); 588 589 cur_state = attr_mask & IB_QP_CUR_STATE ? 590 attr->cur_qp_state : qp->state; 591 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 592 593 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 594 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) 595 goto inval; 596 597 if (attr_mask & IB_QP_AV) { 598 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 599 goto inval; 600 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) 601 goto inval; 602 } 603 604 if (attr_mask & IB_QP_ALT_PATH) { 605 if (attr->alt_ah_attr.dlid >= 606 be16_to_cpu(IB_MULTICAST_LID_BASE)) 607 goto inval; 608 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 609 goto inval; 610 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev))) 611 goto inval; 612 } 613 614 if (attr_mask & IB_QP_PKEY_INDEX) 615 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev))) 616 goto inval; 617 618 if (attr_mask & IB_QP_MIN_RNR_TIMER) 619 if (attr->min_rnr_timer > 31) 620 goto inval; 621 622 if (attr_mask & IB_QP_PORT) 623 if (qp->ibqp.qp_type == IB_QPT_SMI || 624 qp->ibqp.qp_type == IB_QPT_GSI || 625 attr->port_num == 0 || 626 attr->port_num > ibqp->device->phys_port_cnt) 627 goto inval; 628 629 if (attr_mask & IB_QP_DEST_QPN) 630 if (attr->dest_qp_num > QIB_QPN_MASK) 631 goto inval; 632 633 if (attr_mask & IB_QP_RETRY_CNT) 634 if (attr->retry_cnt > 7) 635 goto inval; 636 637 if (attr_mask & IB_QP_RNR_RETRY) 638 if (attr->rnr_retry > 7) 639 goto inval; 640 641 /* 642 * Don't allow invalid path_mtu values. OK to set greater 643 * than the active mtu (or even the max_cap, if we have tuned 644 * that to a small mtu. We'll set qp->path_mtu 645 * to the lesser of requested attribute mtu and active, 646 * for packetizing messages. 647 * Note that the QP port has to be set in INIT and MTU in RTR. 648 */ 649 if (attr_mask & IB_QP_PATH_MTU) { 650 struct qib_devdata *dd = dd_from_dev(dev); 651 int mtu, pidx = qp->port_num - 1; 652 653 mtu = ib_mtu_enum_to_int(attr->path_mtu); 654 if (mtu == -1) 655 goto inval; 656 if (mtu > dd->pport[pidx].ibmtu) { 657 switch (dd->pport[pidx].ibmtu) { 658 case 4096: 659 pmtu = IB_MTU_4096; 660 break; 661 case 2048: 662 pmtu = IB_MTU_2048; 663 break; 664 case 1024: 665 pmtu = IB_MTU_1024; 666 break; 667 case 512: 668 pmtu = IB_MTU_512; 669 break; 670 case 256: 671 pmtu = IB_MTU_256; 672 break; 673 default: 674 pmtu = IB_MTU_2048; 675 } 676 } else 677 pmtu = attr->path_mtu; 678 } 679 680 if (attr_mask & IB_QP_PATH_MIG_STATE) { 681 if (attr->path_mig_state == IB_MIG_REARM) { 682 if (qp->s_mig_state == IB_MIG_ARMED) 683 goto inval; 684 if (new_state != IB_QPS_RTS) 685 goto inval; 686 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 687 if (qp->s_mig_state == IB_MIG_REARM) 688 goto inval; 689 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 690 goto inval; 691 if (qp->s_mig_state == IB_MIG_ARMED) 692 mig = 1; 693 } else 694 goto inval; 695 } 696 697 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 698 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC) 699 goto inval; 700 701 switch (new_state) { 702 case IB_QPS_RESET: 703 if (qp->state != IB_QPS_RESET) { 704 qp->state = IB_QPS_RESET; 705 spin_lock(&dev->pending_lock); 706 if (!list_empty(&priv->iowait)) 707 list_del_init(&priv->iowait); 708 spin_unlock(&dev->pending_lock); 709 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); 710 spin_unlock(&qp->s_lock); 711 spin_unlock_irq(&qp->r_lock); 712 /* Stop the sending work queue and retry timer */ 713 cancel_work_sync(&priv->s_work); 714 del_timer_sync(&qp->s_timer); 715 wait_event(priv->wait_dma, 716 !atomic_read(&priv->s_dma_busy)); 717 if (priv->s_tx) { 718 qib_put_txreq(priv->s_tx); 719 priv->s_tx = NULL; 720 } 721 remove_qp(dev, qp); 722 wait_event(qp->wait, !atomic_read(&qp->refcount)); 723 spin_lock_irq(&qp->r_lock); 724 spin_lock(&qp->s_lock); 725 clear_mr_refs(qp, 1); 726 qib_reset_qp(qp, ibqp->qp_type); 727 } 728 break; 729 730 case IB_QPS_RTR: 731 /* Allow event to retrigger if QP set to RTR more than once */ 732 qp->r_flags &= ~QIB_R_COMM_EST; 733 qp->state = new_state; 734 break; 735 736 case IB_QPS_SQD: 737 qp->s_draining = qp->s_last != qp->s_cur; 738 qp->state = new_state; 739 break; 740 741 case IB_QPS_SQE: 742 if (qp->ibqp.qp_type == IB_QPT_RC) 743 goto inval; 744 qp->state = new_state; 745 break; 746 747 case IB_QPS_ERR: 748 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); 749 break; 750 751 default: 752 qp->state = new_state; 753 break; 754 } 755 756 if (attr_mask & IB_QP_PKEY_INDEX) 757 qp->s_pkey_index = attr->pkey_index; 758 759 if (attr_mask & IB_QP_PORT) 760 qp->port_num = attr->port_num; 761 762 if (attr_mask & IB_QP_DEST_QPN) 763 qp->remote_qpn = attr->dest_qp_num; 764 765 if (attr_mask & IB_QP_SQ_PSN) { 766 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; 767 qp->s_psn = qp->s_next_psn; 768 qp->s_sending_psn = qp->s_next_psn; 769 qp->s_last_psn = qp->s_next_psn - 1; 770 qp->s_sending_hpsn = qp->s_last_psn; 771 } 772 773 if (attr_mask & IB_QP_RQ_PSN) 774 qp->r_psn = attr->rq_psn & QIB_PSN_MASK; 775 776 if (attr_mask & IB_QP_ACCESS_FLAGS) 777 qp->qp_access_flags = attr->qp_access_flags; 778 779 if (attr_mask & IB_QP_AV) { 780 qp->remote_ah_attr = attr->ah_attr; 781 qp->s_srate = attr->ah_attr.static_rate; 782 } 783 784 if (attr_mask & IB_QP_ALT_PATH) { 785 qp->alt_ah_attr = attr->alt_ah_attr; 786 qp->s_alt_pkey_index = attr->alt_pkey_index; 787 } 788 789 if (attr_mask & IB_QP_PATH_MIG_STATE) { 790 qp->s_mig_state = attr->path_mig_state; 791 if (mig) { 792 qp->remote_ah_attr = qp->alt_ah_attr; 793 qp->port_num = qp->alt_ah_attr.port_num; 794 qp->s_pkey_index = qp->s_alt_pkey_index; 795 } 796 } 797 798 if (attr_mask & IB_QP_PATH_MTU) { 799 qp->path_mtu = pmtu; 800 qp->pmtu = ib_mtu_enum_to_int(pmtu); 801 } 802 803 if (attr_mask & IB_QP_RETRY_CNT) { 804 qp->s_retry_cnt = attr->retry_cnt; 805 qp->s_retry = attr->retry_cnt; 806 } 807 808 if (attr_mask & IB_QP_RNR_RETRY) { 809 qp->s_rnr_retry_cnt = attr->rnr_retry; 810 qp->s_rnr_retry = attr->rnr_retry; 811 } 812 813 if (attr_mask & IB_QP_MIN_RNR_TIMER) 814 qp->r_min_rnr_timer = attr->min_rnr_timer; 815 816 if (attr_mask & IB_QP_TIMEOUT) { 817 qp->timeout = attr->timeout; 818 qp->timeout_jiffies = 819 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 820 1000UL); 821 } 822 823 if (attr_mask & IB_QP_QKEY) 824 qp->qkey = attr->qkey; 825 826 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 827 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 828 829 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 830 qp->s_max_rd_atomic = attr->max_rd_atomic; 831 832 spin_unlock(&qp->s_lock); 833 spin_unlock_irq(&qp->r_lock); 834 835 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 836 insert_qp(dev, qp); 837 838 if (lastwqe) { 839 ev.device = qp->ibqp.device; 840 ev.element.qp = &qp->ibqp; 841 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 842 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 843 } 844 if (mig) { 845 ev.device = qp->ibqp.device; 846 ev.element.qp = &qp->ibqp; 847 ev.event = IB_EVENT_PATH_MIG; 848 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 849 } 850 ret = 0; 851 goto bail; 852 853 inval: 854 spin_unlock(&qp->s_lock); 855 spin_unlock_irq(&qp->r_lock); 856 ret = -EINVAL; 857 858 bail: 859 return ret; 860 } 861 862 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 863 int attr_mask, struct ib_qp_init_attr *init_attr) 864 { 865 struct rvt_qp *qp = to_iqp(ibqp); 866 867 attr->qp_state = qp->state; 868 attr->cur_qp_state = attr->qp_state; 869 attr->path_mtu = qp->path_mtu; 870 attr->path_mig_state = qp->s_mig_state; 871 attr->qkey = qp->qkey; 872 attr->rq_psn = qp->r_psn & QIB_PSN_MASK; 873 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; 874 attr->dest_qp_num = qp->remote_qpn; 875 attr->qp_access_flags = qp->qp_access_flags; 876 attr->cap.max_send_wr = qp->s_size - 1; 877 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 878 attr->cap.max_send_sge = qp->s_max_sge; 879 attr->cap.max_recv_sge = qp->r_rq.max_sge; 880 attr->cap.max_inline_data = 0; 881 attr->ah_attr = qp->remote_ah_attr; 882 attr->alt_ah_attr = qp->alt_ah_attr; 883 attr->pkey_index = qp->s_pkey_index; 884 attr->alt_pkey_index = qp->s_alt_pkey_index; 885 attr->en_sqd_async_notify = 0; 886 attr->sq_draining = qp->s_draining; 887 attr->max_rd_atomic = qp->s_max_rd_atomic; 888 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 889 attr->min_rnr_timer = qp->r_min_rnr_timer; 890 attr->port_num = qp->port_num; 891 attr->timeout = qp->timeout; 892 attr->retry_cnt = qp->s_retry_cnt; 893 attr->rnr_retry = qp->s_rnr_retry_cnt; 894 attr->alt_port_num = qp->alt_ah_attr.port_num; 895 attr->alt_timeout = qp->alt_timeout; 896 897 init_attr->event_handler = qp->ibqp.event_handler; 898 init_attr->qp_context = qp->ibqp.qp_context; 899 init_attr->send_cq = qp->ibqp.send_cq; 900 init_attr->recv_cq = qp->ibqp.recv_cq; 901 init_attr->srq = qp->ibqp.srq; 902 init_attr->cap = attr->cap; 903 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) 904 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 905 else 906 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 907 init_attr->qp_type = qp->ibqp.qp_type; 908 init_attr->port_num = qp->port_num; 909 return 0; 910 } 911 912 /** 913 * qib_compute_aeth - compute the AETH (syndrome + MSN) 914 * @qp: the queue pair to compute the AETH for 915 * 916 * Returns the AETH. 917 */ 918 __be32 qib_compute_aeth(struct rvt_qp *qp) 919 { 920 u32 aeth = qp->r_msn & QIB_MSN_MASK; 921 922 if (qp->ibqp.srq) { 923 /* 924 * Shared receive queues don't generate credits. 925 * Set the credit field to the invalid value. 926 */ 927 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT; 928 } else { 929 u32 min, max, x; 930 u32 credits; 931 struct rvt_rwq *wq = qp->r_rq.wq; 932 u32 head; 933 u32 tail; 934 935 /* sanity check pointers before trusting them */ 936 head = wq->head; 937 if (head >= qp->r_rq.size) 938 head = 0; 939 tail = wq->tail; 940 if (tail >= qp->r_rq.size) 941 tail = 0; 942 /* 943 * Compute the number of credits available (RWQEs). 944 * XXX Not holding the r_rq.lock here so there is a small 945 * chance that the pair of reads are not atomic. 946 */ 947 credits = head - tail; 948 if ((int)credits < 0) 949 credits += qp->r_rq.size; 950 /* 951 * Binary search the credit table to find the code to 952 * use. 953 */ 954 min = 0; 955 max = 31; 956 for (;;) { 957 x = (min + max) / 2; 958 if (credit_table[x] == credits) 959 break; 960 if (credit_table[x] > credits) 961 max = x; 962 else if (min == x) 963 break; 964 else 965 min = x; 966 } 967 aeth |= x << QIB_AETH_CREDIT_SHIFT; 968 } 969 return cpu_to_be32(aeth); 970 } 971 972 /** 973 * qib_create_qp - create a queue pair for a device 974 * @ibpd: the protection domain who's device we create the queue pair for 975 * @init_attr: the attributes of the queue pair 976 * @udata: user data for libibverbs.so 977 * 978 * Returns the queue pair on success, otherwise returns an errno. 979 * 980 * Called by the ib_create_qp() core verbs function. 981 */ 982 struct ib_qp *qib_create_qp(struct ib_pd *ibpd, 983 struct ib_qp_init_attr *init_attr, 984 struct ib_udata *udata) 985 { 986 struct rvt_qp *qp; 987 int err; 988 struct rvt_swqe *swq = NULL; 989 struct qib_ibdev *dev; 990 struct qib_devdata *dd; 991 size_t sz; 992 size_t sg_list_sz; 993 struct ib_qp *ret; 994 gfp_t gfp; 995 struct qib_qp_priv *priv; 996 997 if (init_attr->cap.max_send_sge > ib_qib_max_sges || 998 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs || 999 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 1000 return ERR_PTR(-EINVAL); 1001 1002 /* GFP_NOIO is applicable in RC QPs only */ 1003 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && 1004 init_attr->qp_type != IB_QPT_RC) 1005 return ERR_PTR(-EINVAL); 1006 1007 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? 1008 GFP_NOIO : GFP_KERNEL; 1009 1010 /* Check receive queue parameters if no SRQ is specified. */ 1011 if (!init_attr->srq) { 1012 if (init_attr->cap.max_recv_sge > ib_qib_max_sges || 1013 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) { 1014 ret = ERR_PTR(-EINVAL); 1015 goto bail; 1016 } 1017 if (init_attr->cap.max_send_sge + 1018 init_attr->cap.max_send_wr + 1019 init_attr->cap.max_recv_sge + 1020 init_attr->cap.max_recv_wr == 0) { 1021 ret = ERR_PTR(-EINVAL); 1022 goto bail; 1023 } 1024 } 1025 1026 switch (init_attr->qp_type) { 1027 case IB_QPT_SMI: 1028 case IB_QPT_GSI: 1029 if (init_attr->port_num == 0 || 1030 init_attr->port_num > ibpd->device->phys_port_cnt) { 1031 ret = ERR_PTR(-EINVAL); 1032 goto bail; 1033 } 1034 case IB_QPT_UC: 1035 case IB_QPT_RC: 1036 case IB_QPT_UD: 1037 sz = sizeof(struct rvt_sge) * 1038 init_attr->cap.max_send_sge + 1039 sizeof(struct rvt_swqe); 1040 swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz, 1041 gfp, PAGE_KERNEL); 1042 if (swq == NULL) { 1043 ret = ERR_PTR(-ENOMEM); 1044 goto bail; 1045 } 1046 sz = sizeof(*qp); 1047 sg_list_sz = 0; 1048 if (init_attr->srq) { 1049 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); 1050 1051 if (srq->rq.max_sge > 1) 1052 sg_list_sz = sizeof(*qp->r_sg_list) * 1053 (srq->rq.max_sge - 1); 1054 } else if (init_attr->cap.max_recv_sge > 1) 1055 sg_list_sz = sizeof(*qp->r_sg_list) * 1056 (init_attr->cap.max_recv_sge - 1); 1057 qp = kzalloc(sz + sg_list_sz, gfp); 1058 if (!qp) { 1059 ret = ERR_PTR(-ENOMEM); 1060 goto bail_swq; 1061 } 1062 RCU_INIT_POINTER(qp->next, NULL); 1063 priv = kzalloc(sizeof(*priv), gfp); 1064 if (!priv) { 1065 ret = ERR_PTR(-ENOMEM); 1066 goto bail_qp_hdr; 1067 } 1068 priv->owner = qp; 1069 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); 1070 if (!priv->s_hdr) { 1071 ret = ERR_PTR(-ENOMEM); 1072 goto bail_qp; 1073 } 1074 qp->priv = priv; 1075 qp->timeout_jiffies = 1076 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1077 1000UL); 1078 if (init_attr->srq) 1079 sz = 0; 1080 else { 1081 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 1082 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 1083 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 1084 sizeof(struct rvt_rwqe); 1085 if (gfp != GFP_NOIO) 1086 qp->r_rq.wq = vmalloc_user( 1087 sizeof(struct rvt_rwq) + 1088 qp->r_rq.size * sz); 1089 else 1090 qp->r_rq.wq = __vmalloc( 1091 sizeof(struct rvt_rwq) + 1092 qp->r_rq.size * sz, 1093 gfp, PAGE_KERNEL); 1094 1095 if (!qp->r_rq.wq) { 1096 ret = ERR_PTR(-ENOMEM); 1097 goto bail_qp; 1098 } 1099 } 1100 1101 /* 1102 * ib_create_qp() will initialize qp->ibqp 1103 * except for qp->ibqp.qp_num. 1104 */ 1105 spin_lock_init(&qp->r_lock); 1106 spin_lock_init(&qp->s_lock); 1107 spin_lock_init(&qp->r_rq.lock); 1108 atomic_set(&qp->refcount, 0); 1109 init_waitqueue_head(&qp->wait); 1110 init_waitqueue_head(&priv->wait_dma); 1111 init_timer(&qp->s_timer); 1112 qp->s_timer.data = (unsigned long)qp; 1113 INIT_WORK(&priv->s_work, qib_do_send); 1114 INIT_LIST_HEAD(&priv->iowait); 1115 INIT_LIST_HEAD(&qp->rspwait); 1116 qp->state = IB_QPS_RESET; 1117 qp->s_wq = swq; 1118 qp->s_size = init_attr->cap.max_send_wr + 1; 1119 qp->s_max_sge = init_attr->cap.max_send_sge; 1120 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 1121 qp->s_flags = QIB_S_SIGNAL_REQ_WR; 1122 dev = to_idev(ibpd->device); 1123 dd = dd_from_dev(dev); 1124 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type, 1125 init_attr->port_num, gfp); 1126 if (err < 0) { 1127 ret = ERR_PTR(err); 1128 vfree(qp->r_rq.wq); 1129 goto bail_qp; 1130 } 1131 qp->ibqp.qp_num = err; 1132 qp->port_num = init_attr->port_num; 1133 qib_reset_qp(qp, init_attr->qp_type); 1134 break; 1135 1136 default: 1137 /* Don't support raw QPs */ 1138 ret = ERR_PTR(-ENOSYS); 1139 goto bail; 1140 } 1141 1142 init_attr->cap.max_inline_data = 0; 1143 1144 /* 1145 * Return the address of the RWQ as the offset to mmap. 1146 * See qib_mmap() for details. 1147 */ 1148 if (udata && udata->outlen >= sizeof(__u64)) { 1149 if (!qp->r_rq.wq) { 1150 __u64 offset = 0; 1151 1152 err = ib_copy_to_udata(udata, &offset, 1153 sizeof(offset)); 1154 if (err) { 1155 ret = ERR_PTR(err); 1156 goto bail_ip; 1157 } 1158 } else { 1159 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 1160 1161 qp->ip = qib_create_mmap_info(dev, s, 1162 ibpd->uobject->context, 1163 qp->r_rq.wq); 1164 if (!qp->ip) { 1165 ret = ERR_PTR(-ENOMEM); 1166 goto bail_ip; 1167 } 1168 1169 err = ib_copy_to_udata(udata, &(qp->ip->offset), 1170 sizeof(qp->ip->offset)); 1171 if (err) { 1172 ret = ERR_PTR(err); 1173 goto bail_ip; 1174 } 1175 } 1176 } 1177 1178 spin_lock(&dev->n_qps_lock); 1179 if (dev->n_qps_allocated == ib_qib_max_qps) { 1180 spin_unlock(&dev->n_qps_lock); 1181 ret = ERR_PTR(-ENOMEM); 1182 goto bail_ip; 1183 } 1184 1185 dev->n_qps_allocated++; 1186 spin_unlock(&dev->n_qps_lock); 1187 1188 if (qp->ip) { 1189 spin_lock_irq(&dev->pending_lock); 1190 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); 1191 spin_unlock_irq(&dev->pending_lock); 1192 } 1193 1194 ret = &qp->ibqp; 1195 goto bail; 1196 1197 bail_ip: 1198 if (qp->ip) 1199 kref_put(&qp->ip->ref, qib_release_mmap_info); 1200 else 1201 vfree(qp->r_rq.wq); 1202 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); 1203 bail_qp: 1204 kfree(priv->s_hdr); 1205 kfree(priv); 1206 bail_qp_hdr: 1207 kfree(qp); 1208 bail_swq: 1209 vfree(swq); 1210 bail: 1211 return ret; 1212 } 1213 1214 /** 1215 * qib_destroy_qp - destroy a queue pair 1216 * @ibqp: the queue pair to destroy 1217 * 1218 * Returns 0 on success. 1219 * 1220 * Note that this can be called while the QP is actively sending or 1221 * receiving! 1222 */ 1223 int qib_destroy_qp(struct ib_qp *ibqp) 1224 { 1225 struct rvt_qp *qp = to_iqp(ibqp); 1226 struct qib_ibdev *dev = to_idev(ibqp->device); 1227 struct qib_qp_priv *priv = qp->priv; 1228 1229 /* Make sure HW and driver activity is stopped. */ 1230 spin_lock_irq(&qp->s_lock); 1231 if (qp->state != IB_QPS_RESET) { 1232 qp->state = IB_QPS_RESET; 1233 spin_lock(&dev->pending_lock); 1234 if (!list_empty(&priv->iowait)) 1235 list_del_init(&priv->iowait); 1236 spin_unlock(&dev->pending_lock); 1237 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); 1238 spin_unlock_irq(&qp->s_lock); 1239 cancel_work_sync(&priv->s_work); 1240 del_timer_sync(&qp->s_timer); 1241 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy)); 1242 if (priv->s_tx) { 1243 qib_put_txreq(priv->s_tx); 1244 priv->s_tx = NULL; 1245 } 1246 remove_qp(dev, qp); 1247 wait_event(qp->wait, !atomic_read(&qp->refcount)); 1248 clear_mr_refs(qp, 1); 1249 } else 1250 spin_unlock_irq(&qp->s_lock); 1251 1252 /* all user's cleaned up, mark it available */ 1253 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); 1254 spin_lock(&dev->n_qps_lock); 1255 dev->n_qps_allocated--; 1256 spin_unlock(&dev->n_qps_lock); 1257 1258 if (qp->ip) 1259 kref_put(&qp->ip->ref, qib_release_mmap_info); 1260 else 1261 vfree(qp->r_rq.wq); 1262 vfree(qp->s_wq); 1263 kfree(priv->s_hdr); 1264 kfree(priv); 1265 kfree(qp); 1266 return 0; 1267 } 1268 1269 /** 1270 * qib_init_qpn_table - initialize the QP number table for a device 1271 * @qpt: the QPN table 1272 */ 1273 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt) 1274 { 1275 spin_lock_init(&qpt->lock); 1276 qpt->last = 1; /* start with QPN 2 */ 1277 qpt->nmaps = 1; 1278 qpt->mask = dd->qpn_mask; 1279 } 1280 1281 /** 1282 * qib_free_qpn_table - free the QP number table for a device 1283 * @qpt: the QPN table 1284 */ 1285 void qib_free_qpn_table(struct qib_qpn_table *qpt) 1286 { 1287 int i; 1288 1289 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 1290 if (qpt->map[i].page) 1291 free_page((unsigned long) qpt->map[i].page); 1292 } 1293 1294 /** 1295 * qib_get_credit - flush the send work queue of a QP 1296 * @qp: the qp who's send work queue to flush 1297 * @aeth: the Acknowledge Extended Transport Header 1298 * 1299 * The QP s_lock should be held. 1300 */ 1301 void qib_get_credit(struct rvt_qp *qp, u32 aeth) 1302 { 1303 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; 1304 1305 /* 1306 * If the credit is invalid, we can send 1307 * as many packets as we like. Otherwise, we have to 1308 * honor the credit field. 1309 */ 1310 if (credit == QIB_AETH_CREDIT_INVAL) { 1311 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { 1312 qp->s_flags |= QIB_S_UNLIMITED_CREDIT; 1313 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { 1314 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; 1315 qib_schedule_send(qp); 1316 } 1317 } 1318 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { 1319 /* Compute new LSN (i.e., MSN + credit) */ 1320 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; 1321 if (qib_cmp24(credit, qp->s_lsn) > 0) { 1322 qp->s_lsn = credit; 1323 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { 1324 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; 1325 qib_schedule_send(qp); 1326 } 1327 } 1328 } 1329 } 1330 1331 #ifdef CONFIG_DEBUG_FS 1332 1333 struct qib_qp_iter { 1334 struct qib_ibdev *dev; 1335 struct rvt_qp *qp; 1336 int n; 1337 }; 1338 1339 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) 1340 { 1341 struct qib_qp_iter *iter; 1342 1343 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1344 if (!iter) 1345 return NULL; 1346 1347 iter->dev = dev; 1348 if (qib_qp_iter_next(iter)) { 1349 kfree(iter); 1350 return NULL; 1351 } 1352 1353 return iter; 1354 } 1355 1356 int qib_qp_iter_next(struct qib_qp_iter *iter) 1357 { 1358 struct qib_ibdev *dev = iter->dev; 1359 int n = iter->n; 1360 int ret = 1; 1361 struct rvt_qp *pqp = iter->qp; 1362 struct rvt_qp *qp; 1363 1364 for (; n < dev->qp_table_size; n++) { 1365 if (pqp) 1366 qp = rcu_dereference(pqp->next); 1367 else 1368 qp = rcu_dereference(dev->qp_table[n]); 1369 pqp = qp; 1370 if (qp) { 1371 iter->qp = qp; 1372 iter->n = n; 1373 return 0; 1374 } 1375 } 1376 return ret; 1377 } 1378 1379 static const char * const qp_type_str[] = { 1380 "SMI", "GSI", "RC", "UC", "UD", 1381 }; 1382 1383 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) 1384 { 1385 struct rvt_swqe *wqe; 1386 struct rvt_qp *qp = iter->qp; 1387 struct qib_qp_priv *priv = qp->priv; 1388 1389 wqe = get_swqe_ptr(qp, qp->s_last); 1390 seq_printf(s, 1391 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n", 1392 iter->n, 1393 qp->ibqp.qp_num, 1394 qp_type_str[qp->ibqp.qp_type], 1395 qp->state, 1396 wqe->wr.opcode, 1397 qp->s_hdrwords, 1398 qp->s_flags, 1399 atomic_read(&priv->s_dma_busy), 1400 !list_empty(&priv->iowait), 1401 qp->timeout, 1402 wqe->ssn, 1403 qp->s_lsn, 1404 qp->s_last_psn, 1405 qp->s_psn, qp->s_next_psn, 1406 qp->s_sending_psn, qp->s_sending_hpsn, 1407 qp->s_last, qp->s_acked, qp->s_cur, 1408 qp->s_tail, qp->s_head, qp->s_size, 1409 qp->remote_qpn, 1410 qp->remote_ah_attr.dlid); 1411 } 1412 1413 #endif 1414