1 /* 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/err.h> 36 #include <linux/vmalloc.h> 37 #include <linux/jhash.h> 38 #include <rdma/rdma_vt.h> 39 #ifdef CONFIG_DEBUG_FS 40 #include <linux/seq_file.h> 41 #endif 42 43 #include "qib.h" 44 45 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 46 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 47 48 static inline unsigned mk_qpn(struct qib_qpn_table *qpt, 49 struct qpn_map *map, unsigned off) 50 { 51 return (map - qpt->map) * BITS_PER_PAGE + off; 52 } 53 54 static inline unsigned find_next_offset(struct qib_qpn_table *qpt, 55 struct qpn_map *map, unsigned off, 56 unsigned n) 57 { 58 if (qpt->mask) { 59 off++; 60 if (((off & qpt->mask) >> 1) >= n) 61 off = (off | qpt->mask) + 2; 62 } else 63 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); 64 return off; 65 } 66 67 /* 68 * Convert the AETH credit code into the number of credits. 69 */ 70 static u32 credit_table[31] = { 71 0, /* 0 */ 72 1, /* 1 */ 73 2, /* 2 */ 74 3, /* 3 */ 75 4, /* 4 */ 76 6, /* 5 */ 77 8, /* 6 */ 78 12, /* 7 */ 79 16, /* 8 */ 80 24, /* 9 */ 81 32, /* A */ 82 48, /* B */ 83 64, /* C */ 84 96, /* D */ 85 128, /* E */ 86 192, /* F */ 87 256, /* 10 */ 88 384, /* 11 */ 89 512, /* 12 */ 90 768, /* 13 */ 91 1024, /* 14 */ 92 1536, /* 15 */ 93 2048, /* 16 */ 94 3072, /* 17 */ 95 4096, /* 18 */ 96 6144, /* 19 */ 97 8192, /* 1A */ 98 12288, /* 1B */ 99 16384, /* 1C */ 100 24576, /* 1D */ 101 32768 /* 1E */ 102 }; 103 104 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map, 105 gfp_t gfp) 106 { 107 unsigned long page = get_zeroed_page(gfp); 108 109 /* 110 * Free the page if someone raced with us installing it. 111 */ 112 113 spin_lock(&qpt->lock); 114 if (map->page) 115 free_page(page); 116 else 117 map->page = (void *)page; 118 spin_unlock(&qpt->lock); 119 } 120 121 /* 122 * Allocate the next available QPN or 123 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. 124 */ 125 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, 126 enum ib_qp_type type, u8 port, gfp_t gfp) 127 { 128 u32 i, offset, max_scan, qpn; 129 struct qpn_map *map; 130 u32 ret; 131 132 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 133 unsigned n; 134 135 ret = type == IB_QPT_GSI; 136 n = 1 << (ret + 2 * (port - 1)); 137 spin_lock(&qpt->lock); 138 if (qpt->flags & n) 139 ret = -EINVAL; 140 else 141 qpt->flags |= n; 142 spin_unlock(&qpt->lock); 143 goto bail; 144 } 145 146 qpn = qpt->last + 2; 147 if (qpn >= QPN_MAX) 148 qpn = 2; 149 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) 150 qpn = (qpn | qpt->mask) + 2; 151 offset = qpn & BITS_PER_PAGE_MASK; 152 map = &qpt->map[qpn / BITS_PER_PAGE]; 153 max_scan = qpt->nmaps - !offset; 154 for (i = 0;;) { 155 if (unlikely(!map->page)) { 156 get_map_page(qpt, map, gfp); 157 if (unlikely(!map->page)) 158 break; 159 } 160 do { 161 if (!test_and_set_bit(offset, map->page)) { 162 qpt->last = qpn; 163 ret = qpn; 164 goto bail; 165 } 166 offset = find_next_offset(qpt, map, offset, 167 dd->n_krcv_queues); 168 qpn = mk_qpn(qpt, map, offset); 169 /* 170 * This test differs from alloc_pidmap(). 171 * If find_next_offset() does find a zero 172 * bit, we don't need to check for QPN 173 * wrapping around past our starting QPN. 174 * We just need to be sure we don't loop 175 * forever. 176 */ 177 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); 178 /* 179 * In order to keep the number of pages allocated to a 180 * minimum, we scan the all existing pages before increasing 181 * the size of the bitmap table. 182 */ 183 if (++i > max_scan) { 184 if (qpt->nmaps == QPNMAP_ENTRIES) 185 break; 186 map = &qpt->map[qpt->nmaps++]; 187 offset = 0; 188 } else if (map < &qpt->map[qpt->nmaps]) { 189 ++map; 190 offset = 0; 191 } else { 192 map = &qpt->map[0]; 193 offset = 2; 194 } 195 qpn = mk_qpn(qpt, map, offset); 196 } 197 198 ret = -ENOMEM; 199 200 bail: 201 return ret; 202 } 203 204 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) 205 { 206 struct qpn_map *map; 207 208 map = qpt->map + qpn / BITS_PER_PAGE; 209 if (map->page) 210 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 211 } 212 213 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) 214 { 215 return jhash_1word(qpn, dev->qp_rnd) & 216 (dev->qp_table_size - 1); 217 } 218 219 220 /* 221 * Put the QP into the hash table. 222 * The hash table holds a reference to the QP. 223 */ 224 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) 225 { 226 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 227 unsigned long flags; 228 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); 229 230 atomic_inc(&qp->refcount); 231 spin_lock_irqsave(&dev->qpt_lock, flags); 232 233 if (qp->ibqp.qp_num == 0) 234 rcu_assign_pointer(ibp->qp0, qp); 235 else if (qp->ibqp.qp_num == 1) 236 rcu_assign_pointer(ibp->qp1, qp); 237 else { 238 qp->next = dev->qp_table[n]; 239 rcu_assign_pointer(dev->qp_table[n], qp); 240 } 241 242 spin_unlock_irqrestore(&dev->qpt_lock, flags); 243 } 244 245 /* 246 * Remove the QP from the table so it can't be found asynchronously by 247 * the receive interrupt routine. 248 */ 249 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) 250 { 251 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 252 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); 253 unsigned long flags; 254 int removed = 1; 255 256 spin_lock_irqsave(&dev->qpt_lock, flags); 257 258 if (rcu_dereference_protected(ibp->qp0, 259 lockdep_is_held(&dev->qpt_lock)) == qp) { 260 RCU_INIT_POINTER(ibp->qp0, NULL); 261 } else if (rcu_dereference_protected(ibp->qp1, 262 lockdep_is_held(&dev->qpt_lock)) == qp) { 263 RCU_INIT_POINTER(ibp->qp1, NULL); 264 } else { 265 struct qib_qp *q; 266 struct qib_qp __rcu **qpp; 267 268 removed = 0; 269 qpp = &dev->qp_table[n]; 270 for (; (q = rcu_dereference_protected(*qpp, 271 lockdep_is_held(&dev->qpt_lock))) != NULL; 272 qpp = &q->next) 273 if (q == qp) { 274 RCU_INIT_POINTER(*qpp, 275 rcu_dereference_protected(qp->next, 276 lockdep_is_held(&dev->qpt_lock))); 277 removed = 1; 278 break; 279 } 280 } 281 282 spin_unlock_irqrestore(&dev->qpt_lock, flags); 283 if (removed) { 284 synchronize_rcu(); 285 atomic_dec(&qp->refcount); 286 } 287 } 288 289 /** 290 * qib_free_all_qps - check for QPs still in use 291 * @qpt: the QP table to empty 292 * 293 * There should not be any QPs still in use. 294 * Free memory for table. 295 */ 296 unsigned qib_free_all_qps(struct qib_devdata *dd) 297 { 298 struct qib_ibdev *dev = &dd->verbs_dev; 299 unsigned long flags; 300 struct qib_qp *qp; 301 unsigned n, qp_inuse = 0; 302 303 for (n = 0; n < dd->num_pports; n++) { 304 struct qib_ibport *ibp = &dd->pport[n].ibport_data; 305 306 if (!qib_mcast_tree_empty(ibp)) 307 qp_inuse++; 308 rcu_read_lock(); 309 if (rcu_dereference(ibp->qp0)) 310 qp_inuse++; 311 if (rcu_dereference(ibp->qp1)) 312 qp_inuse++; 313 rcu_read_unlock(); 314 } 315 316 spin_lock_irqsave(&dev->qpt_lock, flags); 317 for (n = 0; n < dev->qp_table_size; n++) { 318 qp = rcu_dereference_protected(dev->qp_table[n], 319 lockdep_is_held(&dev->qpt_lock)); 320 RCU_INIT_POINTER(dev->qp_table[n], NULL); 321 322 for (; qp; qp = rcu_dereference_protected(qp->next, 323 lockdep_is_held(&dev->qpt_lock))) 324 qp_inuse++; 325 } 326 spin_unlock_irqrestore(&dev->qpt_lock, flags); 327 synchronize_rcu(); 328 329 return qp_inuse; 330 } 331 332 /** 333 * qib_lookup_qpn - return the QP with the given QPN 334 * @qpt: the QP table 335 * @qpn: the QP number to look up 336 * 337 * The caller is responsible for decrementing the QP reference count 338 * when done. 339 */ 340 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) 341 { 342 struct qib_qp *qp = NULL; 343 344 rcu_read_lock(); 345 if (unlikely(qpn <= 1)) { 346 if (qpn == 0) 347 qp = rcu_dereference(ibp->qp0); 348 else 349 qp = rcu_dereference(ibp->qp1); 350 if (qp) 351 atomic_inc(&qp->refcount); 352 } else { 353 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; 354 unsigned n = qpn_hash(dev, qpn); 355 356 for (qp = rcu_dereference(dev->qp_table[n]); qp; 357 qp = rcu_dereference(qp->next)) 358 if (qp->ibqp.qp_num == qpn) { 359 atomic_inc(&qp->refcount); 360 break; 361 } 362 } 363 rcu_read_unlock(); 364 return qp; 365 } 366 367 /** 368 * qib_reset_qp - initialize the QP state to the reset state 369 * @qp: the QP to reset 370 * @type: the QP type 371 */ 372 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) 373 { 374 qp->remote_qpn = 0; 375 qp->qkey = 0; 376 qp->qp_access_flags = 0; 377 atomic_set(&qp->s_dma_busy, 0); 378 qp->s_flags &= QIB_S_SIGNAL_REQ_WR; 379 qp->s_hdrwords = 0; 380 qp->s_wqe = NULL; 381 qp->s_draining = 0; 382 qp->s_next_psn = 0; 383 qp->s_last_psn = 0; 384 qp->s_sending_psn = 0; 385 qp->s_sending_hpsn = 0; 386 qp->s_psn = 0; 387 qp->r_psn = 0; 388 qp->r_msn = 0; 389 if (type == IB_QPT_RC) { 390 qp->s_state = IB_OPCODE_RC_SEND_LAST; 391 qp->r_state = IB_OPCODE_RC_SEND_LAST; 392 } else { 393 qp->s_state = IB_OPCODE_UC_SEND_LAST; 394 qp->r_state = IB_OPCODE_UC_SEND_LAST; 395 } 396 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 397 qp->r_nak_state = 0; 398 qp->r_aflags = 0; 399 qp->r_flags = 0; 400 qp->s_head = 0; 401 qp->s_tail = 0; 402 qp->s_cur = 0; 403 qp->s_acked = 0; 404 qp->s_last = 0; 405 qp->s_ssn = 1; 406 qp->s_lsn = 0; 407 qp->s_mig_state = IB_MIG_MIGRATED; 408 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 409 qp->r_head_ack_queue = 0; 410 qp->s_tail_ack_queue = 0; 411 qp->s_num_rd_atomic = 0; 412 if (qp->r_rq.wq) { 413 qp->r_rq.wq->head = 0; 414 qp->r_rq.wq->tail = 0; 415 } 416 qp->r_sge.num_sge = 0; 417 } 418 419 static void clear_mr_refs(struct qib_qp *qp, int clr_sends) 420 { 421 unsigned n; 422 423 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) 424 qib_put_ss(&qp->s_rdma_read_sge); 425 426 qib_put_ss(&qp->r_sge); 427 428 if (clr_sends) { 429 while (qp->s_last != qp->s_head) { 430 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 431 unsigned i; 432 433 for (i = 0; i < wqe->wr.num_sge; i++) { 434 struct qib_sge *sge = &wqe->sg_list[i]; 435 436 qib_put_mr(sge->mr); 437 } 438 if (qp->ibqp.qp_type == IB_QPT_UD || 439 qp->ibqp.qp_type == IB_QPT_SMI || 440 qp->ibqp.qp_type == IB_QPT_GSI) 441 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); 442 if (++qp->s_last >= qp->s_size) 443 qp->s_last = 0; 444 } 445 if (qp->s_rdma_mr) { 446 qib_put_mr(qp->s_rdma_mr); 447 qp->s_rdma_mr = NULL; 448 } 449 } 450 451 if (qp->ibqp.qp_type != IB_QPT_RC) 452 return; 453 454 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 455 struct qib_ack_entry *e = &qp->s_ack_queue[n]; 456 457 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 458 e->rdma_sge.mr) { 459 qib_put_mr(e->rdma_sge.mr); 460 e->rdma_sge.mr = NULL; 461 } 462 } 463 } 464 465 /** 466 * qib_error_qp - put a QP into the error state 467 * @qp: the QP to put into the error state 468 * @err: the receive completion error to signal if a RWQE is active 469 * 470 * Flushes both send and receive work queues. 471 * Returns true if last WQE event should be generated. 472 * The QP r_lock and s_lock should be held and interrupts disabled. 473 * If we are already in error state, just return. 474 */ 475 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) 476 { 477 struct qib_ibdev *dev = to_idev(qp->ibqp.device); 478 struct ib_wc wc; 479 int ret = 0; 480 481 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 482 goto bail; 483 484 qp->state = IB_QPS_ERR; 485 486 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { 487 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); 488 del_timer(&qp->s_timer); 489 } 490 491 if (qp->s_flags & QIB_S_ANY_WAIT_SEND) 492 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; 493 494 spin_lock(&dev->pending_lock); 495 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { 496 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; 497 list_del_init(&qp->iowait); 498 } 499 spin_unlock(&dev->pending_lock); 500 501 if (!(qp->s_flags & QIB_S_BUSY)) { 502 qp->s_hdrwords = 0; 503 if (qp->s_rdma_mr) { 504 qib_put_mr(qp->s_rdma_mr); 505 qp->s_rdma_mr = NULL; 506 } 507 if (qp->s_tx) { 508 qib_put_txreq(qp->s_tx); 509 qp->s_tx = NULL; 510 } 511 } 512 513 /* Schedule the sending tasklet to drain the send work queue. */ 514 if (qp->s_last != qp->s_head) 515 qib_schedule_send(qp); 516 517 clear_mr_refs(qp, 0); 518 519 memset(&wc, 0, sizeof(wc)); 520 wc.qp = &qp->ibqp; 521 wc.opcode = IB_WC_RECV; 522 523 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { 524 wc.wr_id = qp->r_wr_id; 525 wc.status = err; 526 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 527 } 528 wc.status = IB_WC_WR_FLUSH_ERR; 529 530 if (qp->r_rq.wq) { 531 struct qib_rwq *wq; 532 u32 head; 533 u32 tail; 534 535 spin_lock(&qp->r_rq.lock); 536 537 /* sanity check pointers before trusting them */ 538 wq = qp->r_rq.wq; 539 head = wq->head; 540 if (head >= qp->r_rq.size) 541 head = 0; 542 tail = wq->tail; 543 if (tail >= qp->r_rq.size) 544 tail = 0; 545 while (tail != head) { 546 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 547 if (++tail >= qp->r_rq.size) 548 tail = 0; 549 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 550 } 551 wq->tail = tail; 552 553 spin_unlock(&qp->r_rq.lock); 554 } else if (qp->ibqp.event_handler) 555 ret = 1; 556 557 bail: 558 return ret; 559 } 560 561 /** 562 * qib_modify_qp - modify the attributes of a queue pair 563 * @ibqp: the queue pair who's attributes we're modifying 564 * @attr: the new attributes 565 * @attr_mask: the mask of attributes to modify 566 * @udata: user data for libibverbs.so 567 * 568 * Returns 0 on success, otherwise returns an errno. 569 */ 570 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 571 int attr_mask, struct ib_udata *udata) 572 { 573 struct qib_ibdev *dev = to_idev(ibqp->device); 574 struct qib_qp *qp = to_iqp(ibqp); 575 enum ib_qp_state cur_state, new_state; 576 struct ib_event ev; 577 int lastwqe = 0; 578 int mig = 0; 579 int ret; 580 u32 pmtu = 0; /* for gcc warning only */ 581 582 spin_lock_irq(&qp->r_lock); 583 spin_lock(&qp->s_lock); 584 585 cur_state = attr_mask & IB_QP_CUR_STATE ? 586 attr->cur_qp_state : qp->state; 587 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 588 589 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 590 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) 591 goto inval; 592 593 if (attr_mask & IB_QP_AV) { 594 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 595 goto inval; 596 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) 597 goto inval; 598 } 599 600 if (attr_mask & IB_QP_ALT_PATH) { 601 if (attr->alt_ah_attr.dlid >= 602 be16_to_cpu(IB_MULTICAST_LID_BASE)) 603 goto inval; 604 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) 605 goto inval; 606 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev))) 607 goto inval; 608 } 609 610 if (attr_mask & IB_QP_PKEY_INDEX) 611 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev))) 612 goto inval; 613 614 if (attr_mask & IB_QP_MIN_RNR_TIMER) 615 if (attr->min_rnr_timer > 31) 616 goto inval; 617 618 if (attr_mask & IB_QP_PORT) 619 if (qp->ibqp.qp_type == IB_QPT_SMI || 620 qp->ibqp.qp_type == IB_QPT_GSI || 621 attr->port_num == 0 || 622 attr->port_num > ibqp->device->phys_port_cnt) 623 goto inval; 624 625 if (attr_mask & IB_QP_DEST_QPN) 626 if (attr->dest_qp_num > QIB_QPN_MASK) 627 goto inval; 628 629 if (attr_mask & IB_QP_RETRY_CNT) 630 if (attr->retry_cnt > 7) 631 goto inval; 632 633 if (attr_mask & IB_QP_RNR_RETRY) 634 if (attr->rnr_retry > 7) 635 goto inval; 636 637 /* 638 * Don't allow invalid path_mtu values. OK to set greater 639 * than the active mtu (or even the max_cap, if we have tuned 640 * that to a small mtu. We'll set qp->path_mtu 641 * to the lesser of requested attribute mtu and active, 642 * for packetizing messages. 643 * Note that the QP port has to be set in INIT and MTU in RTR. 644 */ 645 if (attr_mask & IB_QP_PATH_MTU) { 646 struct qib_devdata *dd = dd_from_dev(dev); 647 int mtu, pidx = qp->port_num - 1; 648 649 mtu = ib_mtu_enum_to_int(attr->path_mtu); 650 if (mtu == -1) 651 goto inval; 652 if (mtu > dd->pport[pidx].ibmtu) { 653 switch (dd->pport[pidx].ibmtu) { 654 case 4096: 655 pmtu = IB_MTU_4096; 656 break; 657 case 2048: 658 pmtu = IB_MTU_2048; 659 break; 660 case 1024: 661 pmtu = IB_MTU_1024; 662 break; 663 case 512: 664 pmtu = IB_MTU_512; 665 break; 666 case 256: 667 pmtu = IB_MTU_256; 668 break; 669 default: 670 pmtu = IB_MTU_2048; 671 } 672 } else 673 pmtu = attr->path_mtu; 674 } 675 676 if (attr_mask & IB_QP_PATH_MIG_STATE) { 677 if (attr->path_mig_state == IB_MIG_REARM) { 678 if (qp->s_mig_state == IB_MIG_ARMED) 679 goto inval; 680 if (new_state != IB_QPS_RTS) 681 goto inval; 682 } else if (attr->path_mig_state == IB_MIG_MIGRATED) { 683 if (qp->s_mig_state == IB_MIG_REARM) 684 goto inval; 685 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) 686 goto inval; 687 if (qp->s_mig_state == IB_MIG_ARMED) 688 mig = 1; 689 } else 690 goto inval; 691 } 692 693 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 694 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC) 695 goto inval; 696 697 switch (new_state) { 698 case IB_QPS_RESET: 699 if (qp->state != IB_QPS_RESET) { 700 qp->state = IB_QPS_RESET; 701 spin_lock(&dev->pending_lock); 702 if (!list_empty(&qp->iowait)) 703 list_del_init(&qp->iowait); 704 spin_unlock(&dev->pending_lock); 705 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); 706 spin_unlock(&qp->s_lock); 707 spin_unlock_irq(&qp->r_lock); 708 /* Stop the sending work queue and retry timer */ 709 cancel_work_sync(&qp->s_work); 710 del_timer_sync(&qp->s_timer); 711 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); 712 if (qp->s_tx) { 713 qib_put_txreq(qp->s_tx); 714 qp->s_tx = NULL; 715 } 716 remove_qp(dev, qp); 717 wait_event(qp->wait, !atomic_read(&qp->refcount)); 718 spin_lock_irq(&qp->r_lock); 719 spin_lock(&qp->s_lock); 720 clear_mr_refs(qp, 1); 721 qib_reset_qp(qp, ibqp->qp_type); 722 } 723 break; 724 725 case IB_QPS_RTR: 726 /* Allow event to retrigger if QP set to RTR more than once */ 727 qp->r_flags &= ~QIB_R_COMM_EST; 728 qp->state = new_state; 729 break; 730 731 case IB_QPS_SQD: 732 qp->s_draining = qp->s_last != qp->s_cur; 733 qp->state = new_state; 734 break; 735 736 case IB_QPS_SQE: 737 if (qp->ibqp.qp_type == IB_QPT_RC) 738 goto inval; 739 qp->state = new_state; 740 break; 741 742 case IB_QPS_ERR: 743 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); 744 break; 745 746 default: 747 qp->state = new_state; 748 break; 749 } 750 751 if (attr_mask & IB_QP_PKEY_INDEX) 752 qp->s_pkey_index = attr->pkey_index; 753 754 if (attr_mask & IB_QP_PORT) 755 qp->port_num = attr->port_num; 756 757 if (attr_mask & IB_QP_DEST_QPN) 758 qp->remote_qpn = attr->dest_qp_num; 759 760 if (attr_mask & IB_QP_SQ_PSN) { 761 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; 762 qp->s_psn = qp->s_next_psn; 763 qp->s_sending_psn = qp->s_next_psn; 764 qp->s_last_psn = qp->s_next_psn - 1; 765 qp->s_sending_hpsn = qp->s_last_psn; 766 } 767 768 if (attr_mask & IB_QP_RQ_PSN) 769 qp->r_psn = attr->rq_psn & QIB_PSN_MASK; 770 771 if (attr_mask & IB_QP_ACCESS_FLAGS) 772 qp->qp_access_flags = attr->qp_access_flags; 773 774 if (attr_mask & IB_QP_AV) { 775 qp->remote_ah_attr = attr->ah_attr; 776 qp->s_srate = attr->ah_attr.static_rate; 777 } 778 779 if (attr_mask & IB_QP_ALT_PATH) { 780 qp->alt_ah_attr = attr->alt_ah_attr; 781 qp->s_alt_pkey_index = attr->alt_pkey_index; 782 } 783 784 if (attr_mask & IB_QP_PATH_MIG_STATE) { 785 qp->s_mig_state = attr->path_mig_state; 786 if (mig) { 787 qp->remote_ah_attr = qp->alt_ah_attr; 788 qp->port_num = qp->alt_ah_attr.port_num; 789 qp->s_pkey_index = qp->s_alt_pkey_index; 790 } 791 } 792 793 if (attr_mask & IB_QP_PATH_MTU) { 794 qp->path_mtu = pmtu; 795 qp->pmtu = ib_mtu_enum_to_int(pmtu); 796 } 797 798 if (attr_mask & IB_QP_RETRY_CNT) { 799 qp->s_retry_cnt = attr->retry_cnt; 800 qp->s_retry = attr->retry_cnt; 801 } 802 803 if (attr_mask & IB_QP_RNR_RETRY) { 804 qp->s_rnr_retry_cnt = attr->rnr_retry; 805 qp->s_rnr_retry = attr->rnr_retry; 806 } 807 808 if (attr_mask & IB_QP_MIN_RNR_TIMER) 809 qp->r_min_rnr_timer = attr->min_rnr_timer; 810 811 if (attr_mask & IB_QP_TIMEOUT) { 812 qp->timeout = attr->timeout; 813 qp->timeout_jiffies = 814 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 815 1000UL); 816 } 817 818 if (attr_mask & IB_QP_QKEY) 819 qp->qkey = attr->qkey; 820 821 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 822 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; 823 824 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 825 qp->s_max_rd_atomic = attr->max_rd_atomic; 826 827 spin_unlock(&qp->s_lock); 828 spin_unlock_irq(&qp->r_lock); 829 830 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 831 insert_qp(dev, qp); 832 833 if (lastwqe) { 834 ev.device = qp->ibqp.device; 835 ev.element.qp = &qp->ibqp; 836 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 837 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 838 } 839 if (mig) { 840 ev.device = qp->ibqp.device; 841 ev.element.qp = &qp->ibqp; 842 ev.event = IB_EVENT_PATH_MIG; 843 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 844 } 845 ret = 0; 846 goto bail; 847 848 inval: 849 spin_unlock(&qp->s_lock); 850 spin_unlock_irq(&qp->r_lock); 851 ret = -EINVAL; 852 853 bail: 854 return ret; 855 } 856 857 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 858 int attr_mask, struct ib_qp_init_attr *init_attr) 859 { 860 struct qib_qp *qp = to_iqp(ibqp); 861 862 attr->qp_state = qp->state; 863 attr->cur_qp_state = attr->qp_state; 864 attr->path_mtu = qp->path_mtu; 865 attr->path_mig_state = qp->s_mig_state; 866 attr->qkey = qp->qkey; 867 attr->rq_psn = qp->r_psn & QIB_PSN_MASK; 868 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; 869 attr->dest_qp_num = qp->remote_qpn; 870 attr->qp_access_flags = qp->qp_access_flags; 871 attr->cap.max_send_wr = qp->s_size - 1; 872 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; 873 attr->cap.max_send_sge = qp->s_max_sge; 874 attr->cap.max_recv_sge = qp->r_rq.max_sge; 875 attr->cap.max_inline_data = 0; 876 attr->ah_attr = qp->remote_ah_attr; 877 attr->alt_ah_attr = qp->alt_ah_attr; 878 attr->pkey_index = qp->s_pkey_index; 879 attr->alt_pkey_index = qp->s_alt_pkey_index; 880 attr->en_sqd_async_notify = 0; 881 attr->sq_draining = qp->s_draining; 882 attr->max_rd_atomic = qp->s_max_rd_atomic; 883 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 884 attr->min_rnr_timer = qp->r_min_rnr_timer; 885 attr->port_num = qp->port_num; 886 attr->timeout = qp->timeout; 887 attr->retry_cnt = qp->s_retry_cnt; 888 attr->rnr_retry = qp->s_rnr_retry_cnt; 889 attr->alt_port_num = qp->alt_ah_attr.port_num; 890 attr->alt_timeout = qp->alt_timeout; 891 892 init_attr->event_handler = qp->ibqp.event_handler; 893 init_attr->qp_context = qp->ibqp.qp_context; 894 init_attr->send_cq = qp->ibqp.send_cq; 895 init_attr->recv_cq = qp->ibqp.recv_cq; 896 init_attr->srq = qp->ibqp.srq; 897 init_attr->cap = attr->cap; 898 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) 899 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 900 else 901 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 902 init_attr->qp_type = qp->ibqp.qp_type; 903 init_attr->port_num = qp->port_num; 904 return 0; 905 } 906 907 /** 908 * qib_compute_aeth - compute the AETH (syndrome + MSN) 909 * @qp: the queue pair to compute the AETH for 910 * 911 * Returns the AETH. 912 */ 913 __be32 qib_compute_aeth(struct qib_qp *qp) 914 { 915 u32 aeth = qp->r_msn & QIB_MSN_MASK; 916 917 if (qp->ibqp.srq) { 918 /* 919 * Shared receive queues don't generate credits. 920 * Set the credit field to the invalid value. 921 */ 922 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT; 923 } else { 924 u32 min, max, x; 925 u32 credits; 926 struct qib_rwq *wq = qp->r_rq.wq; 927 u32 head; 928 u32 tail; 929 930 /* sanity check pointers before trusting them */ 931 head = wq->head; 932 if (head >= qp->r_rq.size) 933 head = 0; 934 tail = wq->tail; 935 if (tail >= qp->r_rq.size) 936 tail = 0; 937 /* 938 * Compute the number of credits available (RWQEs). 939 * XXX Not holding the r_rq.lock here so there is a small 940 * chance that the pair of reads are not atomic. 941 */ 942 credits = head - tail; 943 if ((int)credits < 0) 944 credits += qp->r_rq.size; 945 /* 946 * Binary search the credit table to find the code to 947 * use. 948 */ 949 min = 0; 950 max = 31; 951 for (;;) { 952 x = (min + max) / 2; 953 if (credit_table[x] == credits) 954 break; 955 if (credit_table[x] > credits) 956 max = x; 957 else if (min == x) 958 break; 959 else 960 min = x; 961 } 962 aeth |= x << QIB_AETH_CREDIT_SHIFT; 963 } 964 return cpu_to_be32(aeth); 965 } 966 967 /** 968 * qib_create_qp - create a queue pair for a device 969 * @ibpd: the protection domain who's device we create the queue pair for 970 * @init_attr: the attributes of the queue pair 971 * @udata: user data for libibverbs.so 972 * 973 * Returns the queue pair on success, otherwise returns an errno. 974 * 975 * Called by the ib_create_qp() core verbs function. 976 */ 977 struct ib_qp *qib_create_qp(struct ib_pd *ibpd, 978 struct ib_qp_init_attr *init_attr, 979 struct ib_udata *udata) 980 { 981 struct qib_qp *qp; 982 int err; 983 struct qib_swqe *swq = NULL; 984 struct qib_ibdev *dev; 985 struct qib_devdata *dd; 986 size_t sz; 987 size_t sg_list_sz; 988 struct ib_qp *ret; 989 gfp_t gfp; 990 991 992 if (init_attr->cap.max_send_sge > ib_qib_max_sges || 993 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs || 994 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 995 return ERR_PTR(-EINVAL); 996 997 /* GFP_NOIO is applicable in RC QPs only */ 998 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && 999 init_attr->qp_type != IB_QPT_RC) 1000 return ERR_PTR(-EINVAL); 1001 1002 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? 1003 GFP_NOIO : GFP_KERNEL; 1004 1005 /* Check receive queue parameters if no SRQ is specified. */ 1006 if (!init_attr->srq) { 1007 if (init_attr->cap.max_recv_sge > ib_qib_max_sges || 1008 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) { 1009 ret = ERR_PTR(-EINVAL); 1010 goto bail; 1011 } 1012 if (init_attr->cap.max_send_sge + 1013 init_attr->cap.max_send_wr + 1014 init_attr->cap.max_recv_sge + 1015 init_attr->cap.max_recv_wr == 0) { 1016 ret = ERR_PTR(-EINVAL); 1017 goto bail; 1018 } 1019 } 1020 1021 switch (init_attr->qp_type) { 1022 case IB_QPT_SMI: 1023 case IB_QPT_GSI: 1024 if (init_attr->port_num == 0 || 1025 init_attr->port_num > ibpd->device->phys_port_cnt) { 1026 ret = ERR_PTR(-EINVAL); 1027 goto bail; 1028 } 1029 case IB_QPT_UC: 1030 case IB_QPT_RC: 1031 case IB_QPT_UD: 1032 sz = sizeof(struct qib_sge) * 1033 init_attr->cap.max_send_sge + 1034 sizeof(struct qib_swqe); 1035 swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz, 1036 gfp, PAGE_KERNEL); 1037 if (swq == NULL) { 1038 ret = ERR_PTR(-ENOMEM); 1039 goto bail; 1040 } 1041 sz = sizeof(*qp); 1042 sg_list_sz = 0; 1043 if (init_attr->srq) { 1044 struct qib_srq *srq = to_isrq(init_attr->srq); 1045 1046 if (srq->rq.max_sge > 1) 1047 sg_list_sz = sizeof(*qp->r_sg_list) * 1048 (srq->rq.max_sge - 1); 1049 } else if (init_attr->cap.max_recv_sge > 1) 1050 sg_list_sz = sizeof(*qp->r_sg_list) * 1051 (init_attr->cap.max_recv_sge - 1); 1052 qp = kzalloc(sz + sg_list_sz, gfp); 1053 if (!qp) { 1054 ret = ERR_PTR(-ENOMEM); 1055 goto bail_swq; 1056 } 1057 RCU_INIT_POINTER(qp->next, NULL); 1058 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp); 1059 if (!qp->s_hdr) { 1060 ret = ERR_PTR(-ENOMEM); 1061 goto bail_qp; 1062 } 1063 qp->timeout_jiffies = 1064 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1065 1000UL); 1066 if (init_attr->srq) 1067 sz = 0; 1068 else { 1069 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 1070 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 1071 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 1072 sizeof(struct qib_rwqe); 1073 if (gfp != GFP_NOIO) 1074 qp->r_rq.wq = vmalloc_user( 1075 sizeof(struct qib_rwq) + 1076 qp->r_rq.size * sz); 1077 else 1078 qp->r_rq.wq = __vmalloc( 1079 sizeof(struct qib_rwq) + 1080 qp->r_rq.size * sz, 1081 gfp, PAGE_KERNEL); 1082 1083 if (!qp->r_rq.wq) { 1084 ret = ERR_PTR(-ENOMEM); 1085 goto bail_qp; 1086 } 1087 } 1088 1089 /* 1090 * ib_create_qp() will initialize qp->ibqp 1091 * except for qp->ibqp.qp_num. 1092 */ 1093 spin_lock_init(&qp->r_lock); 1094 spin_lock_init(&qp->s_lock); 1095 spin_lock_init(&qp->r_rq.lock); 1096 atomic_set(&qp->refcount, 0); 1097 init_waitqueue_head(&qp->wait); 1098 init_waitqueue_head(&qp->wait_dma); 1099 init_timer(&qp->s_timer); 1100 qp->s_timer.data = (unsigned long)qp; 1101 INIT_WORK(&qp->s_work, qib_do_send); 1102 INIT_LIST_HEAD(&qp->iowait); 1103 INIT_LIST_HEAD(&qp->rspwait); 1104 qp->state = IB_QPS_RESET; 1105 qp->s_wq = swq; 1106 qp->s_size = init_attr->cap.max_send_wr + 1; 1107 qp->s_max_sge = init_attr->cap.max_send_sge; 1108 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 1109 qp->s_flags = QIB_S_SIGNAL_REQ_WR; 1110 dev = to_idev(ibpd->device); 1111 dd = dd_from_dev(dev); 1112 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type, 1113 init_attr->port_num, gfp); 1114 if (err < 0) { 1115 ret = ERR_PTR(err); 1116 vfree(qp->r_rq.wq); 1117 goto bail_qp; 1118 } 1119 qp->ibqp.qp_num = err; 1120 qp->port_num = init_attr->port_num; 1121 qib_reset_qp(qp, init_attr->qp_type); 1122 break; 1123 1124 default: 1125 /* Don't support raw QPs */ 1126 ret = ERR_PTR(-ENOSYS); 1127 goto bail; 1128 } 1129 1130 init_attr->cap.max_inline_data = 0; 1131 1132 /* 1133 * Return the address of the RWQ as the offset to mmap. 1134 * See qib_mmap() for details. 1135 */ 1136 if (udata && udata->outlen >= sizeof(__u64)) { 1137 if (!qp->r_rq.wq) { 1138 __u64 offset = 0; 1139 1140 err = ib_copy_to_udata(udata, &offset, 1141 sizeof(offset)); 1142 if (err) { 1143 ret = ERR_PTR(err); 1144 goto bail_ip; 1145 } 1146 } else { 1147 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; 1148 1149 qp->ip = qib_create_mmap_info(dev, s, 1150 ibpd->uobject->context, 1151 qp->r_rq.wq); 1152 if (!qp->ip) { 1153 ret = ERR_PTR(-ENOMEM); 1154 goto bail_ip; 1155 } 1156 1157 err = ib_copy_to_udata(udata, &(qp->ip->offset), 1158 sizeof(qp->ip->offset)); 1159 if (err) { 1160 ret = ERR_PTR(err); 1161 goto bail_ip; 1162 } 1163 } 1164 } 1165 1166 spin_lock(&dev->n_qps_lock); 1167 if (dev->n_qps_allocated == ib_qib_max_qps) { 1168 spin_unlock(&dev->n_qps_lock); 1169 ret = ERR_PTR(-ENOMEM); 1170 goto bail_ip; 1171 } 1172 1173 dev->n_qps_allocated++; 1174 spin_unlock(&dev->n_qps_lock); 1175 1176 if (qp->ip) { 1177 spin_lock_irq(&dev->pending_lock); 1178 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); 1179 spin_unlock_irq(&dev->pending_lock); 1180 } 1181 1182 ret = &qp->ibqp; 1183 goto bail; 1184 1185 bail_ip: 1186 if (qp->ip) 1187 kref_put(&qp->ip->ref, qib_release_mmap_info); 1188 else 1189 vfree(qp->r_rq.wq); 1190 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); 1191 bail_qp: 1192 kfree(qp->s_hdr); 1193 kfree(qp); 1194 bail_swq: 1195 vfree(swq); 1196 bail: 1197 return ret; 1198 } 1199 1200 /** 1201 * qib_destroy_qp - destroy a queue pair 1202 * @ibqp: the queue pair to destroy 1203 * 1204 * Returns 0 on success. 1205 * 1206 * Note that this can be called while the QP is actively sending or 1207 * receiving! 1208 */ 1209 int qib_destroy_qp(struct ib_qp *ibqp) 1210 { 1211 struct qib_qp *qp = to_iqp(ibqp); 1212 struct qib_ibdev *dev = to_idev(ibqp->device); 1213 1214 /* Make sure HW and driver activity is stopped. */ 1215 spin_lock_irq(&qp->s_lock); 1216 if (qp->state != IB_QPS_RESET) { 1217 qp->state = IB_QPS_RESET; 1218 spin_lock(&dev->pending_lock); 1219 if (!list_empty(&qp->iowait)) 1220 list_del_init(&qp->iowait); 1221 spin_unlock(&dev->pending_lock); 1222 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); 1223 spin_unlock_irq(&qp->s_lock); 1224 cancel_work_sync(&qp->s_work); 1225 del_timer_sync(&qp->s_timer); 1226 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); 1227 if (qp->s_tx) { 1228 qib_put_txreq(qp->s_tx); 1229 qp->s_tx = NULL; 1230 } 1231 remove_qp(dev, qp); 1232 wait_event(qp->wait, !atomic_read(&qp->refcount)); 1233 clear_mr_refs(qp, 1); 1234 } else 1235 spin_unlock_irq(&qp->s_lock); 1236 1237 /* all user's cleaned up, mark it available */ 1238 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); 1239 spin_lock(&dev->n_qps_lock); 1240 dev->n_qps_allocated--; 1241 spin_unlock(&dev->n_qps_lock); 1242 1243 if (qp->ip) 1244 kref_put(&qp->ip->ref, qib_release_mmap_info); 1245 else 1246 vfree(qp->r_rq.wq); 1247 vfree(qp->s_wq); 1248 kfree(qp->s_hdr); 1249 kfree(qp); 1250 return 0; 1251 } 1252 1253 /** 1254 * qib_init_qpn_table - initialize the QP number table for a device 1255 * @qpt: the QPN table 1256 */ 1257 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt) 1258 { 1259 spin_lock_init(&qpt->lock); 1260 qpt->last = 1; /* start with QPN 2 */ 1261 qpt->nmaps = 1; 1262 qpt->mask = dd->qpn_mask; 1263 } 1264 1265 /** 1266 * qib_free_qpn_table - free the QP number table for a device 1267 * @qpt: the QPN table 1268 */ 1269 void qib_free_qpn_table(struct qib_qpn_table *qpt) 1270 { 1271 int i; 1272 1273 for (i = 0; i < ARRAY_SIZE(qpt->map); i++) 1274 if (qpt->map[i].page) 1275 free_page((unsigned long) qpt->map[i].page); 1276 } 1277 1278 /** 1279 * qib_get_credit - flush the send work queue of a QP 1280 * @qp: the qp who's send work queue to flush 1281 * @aeth: the Acknowledge Extended Transport Header 1282 * 1283 * The QP s_lock should be held. 1284 */ 1285 void qib_get_credit(struct qib_qp *qp, u32 aeth) 1286 { 1287 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; 1288 1289 /* 1290 * If the credit is invalid, we can send 1291 * as many packets as we like. Otherwise, we have to 1292 * honor the credit field. 1293 */ 1294 if (credit == QIB_AETH_CREDIT_INVAL) { 1295 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { 1296 qp->s_flags |= QIB_S_UNLIMITED_CREDIT; 1297 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { 1298 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; 1299 qib_schedule_send(qp); 1300 } 1301 } 1302 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { 1303 /* Compute new LSN (i.e., MSN + credit) */ 1304 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; 1305 if (qib_cmp24(credit, qp->s_lsn) > 0) { 1306 qp->s_lsn = credit; 1307 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { 1308 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; 1309 qib_schedule_send(qp); 1310 } 1311 } 1312 } 1313 } 1314 1315 #ifdef CONFIG_DEBUG_FS 1316 1317 struct qib_qp_iter { 1318 struct qib_ibdev *dev; 1319 struct qib_qp *qp; 1320 int n; 1321 }; 1322 1323 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) 1324 { 1325 struct qib_qp_iter *iter; 1326 1327 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1328 if (!iter) 1329 return NULL; 1330 1331 iter->dev = dev; 1332 if (qib_qp_iter_next(iter)) { 1333 kfree(iter); 1334 return NULL; 1335 } 1336 1337 return iter; 1338 } 1339 1340 int qib_qp_iter_next(struct qib_qp_iter *iter) 1341 { 1342 struct qib_ibdev *dev = iter->dev; 1343 int n = iter->n; 1344 int ret = 1; 1345 struct qib_qp *pqp = iter->qp; 1346 struct qib_qp *qp; 1347 1348 for (; n < dev->qp_table_size; n++) { 1349 if (pqp) 1350 qp = rcu_dereference(pqp->next); 1351 else 1352 qp = rcu_dereference(dev->qp_table[n]); 1353 pqp = qp; 1354 if (qp) { 1355 iter->qp = qp; 1356 iter->n = n; 1357 return 0; 1358 } 1359 } 1360 return ret; 1361 } 1362 1363 static const char * const qp_type_str[] = { 1364 "SMI", "GSI", "RC", "UC", "UD", 1365 }; 1366 1367 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) 1368 { 1369 struct qib_swqe *wqe; 1370 struct qib_qp *qp = iter->qp; 1371 1372 wqe = get_swqe_ptr(qp, qp->s_last); 1373 seq_printf(s, 1374 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n", 1375 iter->n, 1376 qp->ibqp.qp_num, 1377 qp_type_str[qp->ibqp.qp_type], 1378 qp->state, 1379 wqe->wr.opcode, 1380 qp->s_hdrwords, 1381 qp->s_flags, 1382 atomic_read(&qp->s_dma_busy), 1383 !list_empty(&qp->iowait), 1384 qp->timeout, 1385 wqe->ssn, 1386 qp->s_lsn, 1387 qp->s_last_psn, 1388 qp->s_psn, qp->s_next_psn, 1389 qp->s_sending_psn, qp->s_sending_hpsn, 1390 qp->s_last, qp->s_acked, qp->s_cur, 1391 qp->s_tail, qp->s_head, qp->s_size, 1392 qp->remote_qpn, 1393 qp->remote_ah_attr.dlid); 1394 } 1395 1396 #endif 1397