1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $ 33 */ 34 35 #include <linux/init.h> 36 #include <linux/hardirq.h> 37 38 #include <ib_pack.h> 39 40 #include "mthca_dev.h" 41 #include "mthca_cmd.h" 42 #include "mthca_memfree.h" 43 44 enum { 45 MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE 46 }; 47 48 enum { 49 MTHCA_CQ_ENTRY_SIZE = 0x20 50 }; 51 52 /* 53 * Must be packed because start is 64 bits but only aligned to 32 bits. 54 */ 55 struct mthca_cq_context { 56 u32 flags; 57 u64 start; 58 u32 logsize_usrpage; 59 u32 error_eqn; /* Tavor only */ 60 u32 comp_eqn; 61 u32 pd; 62 u32 lkey; 63 u32 last_notified_index; 64 u32 solicit_producer_index; 65 u32 consumer_index; 66 u32 producer_index; 67 u32 cqn; 68 u32 ci_db; /* Arbel only */ 69 u32 state_db; /* Arbel only */ 70 u32 reserved; 71 } __attribute__((packed)); 72 73 #define MTHCA_CQ_STATUS_OK ( 0 << 28) 74 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28) 75 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28) 76 #define MTHCA_CQ_FLAG_TR ( 1 << 18) 77 #define MTHCA_CQ_FLAG_OI ( 1 << 17) 78 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8) 79 #define MTHCA_CQ_STATE_ARMED ( 1 << 8) 80 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8) 81 #define MTHCA_EQ_STATE_FIRED (10 << 8) 82 83 enum { 84 MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe 85 }; 86 87 enum { 88 SYNDROME_LOCAL_LENGTH_ERR = 0x01, 89 SYNDROME_LOCAL_QP_OP_ERR = 0x02, 90 SYNDROME_LOCAL_EEC_OP_ERR = 0x03, 91 SYNDROME_LOCAL_PROT_ERR = 0x04, 92 SYNDROME_WR_FLUSH_ERR = 0x05, 93 SYNDROME_MW_BIND_ERR = 0x06, 94 SYNDROME_BAD_RESP_ERR = 0x10, 95 SYNDROME_LOCAL_ACCESS_ERR = 0x11, 96 SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, 97 SYNDROME_REMOTE_ACCESS_ERR = 0x13, 98 SYNDROME_REMOTE_OP_ERR = 0x14, 99 SYNDROME_RETRY_EXC_ERR = 0x15, 100 SYNDROME_RNR_RETRY_EXC_ERR = 0x16, 101 SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20, 102 SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21, 103 SYNDROME_REMOTE_ABORTED_ERR = 0x22, 104 SYNDROME_INVAL_EECN_ERR = 0x23, 105 SYNDROME_INVAL_EEC_STATE_ERR = 0x24 106 }; 107 108 struct mthca_cqe { 109 u32 my_qpn; 110 u32 my_ee; 111 u32 rqpn; 112 u16 sl_g_mlpath; 113 u16 rlid; 114 u32 imm_etype_pkey_eec; 115 u32 byte_cnt; 116 u32 wqe; 117 u8 opcode; 118 u8 is_send; 119 u8 reserved; 120 u8 owner; 121 }; 122 123 struct mthca_err_cqe { 124 u32 my_qpn; 125 u32 reserved1[3]; 126 u8 syndrome; 127 u8 reserved2; 128 u16 db_cnt; 129 u32 reserved3; 130 u32 wqe; 131 u8 opcode; 132 u8 reserved4[2]; 133 u8 owner; 134 }; 135 136 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) 137 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7) 138 139 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24) 140 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24) 141 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24) 142 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24) 143 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24) 144 145 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24) 146 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) 147 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) 148 149 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) 150 { 151 if (cq->is_direct) 152 return cq->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE); 153 else 154 return cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf 155 + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE; 156 } 157 158 static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i) 159 { 160 struct mthca_cqe *cqe = get_cqe(cq, i); 161 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; 162 } 163 164 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) 165 { 166 return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe); 167 } 168 169 static inline void set_cqe_hw(struct mthca_cqe *cqe) 170 { 171 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; 172 } 173 174 /* 175 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index 176 * should be correct before calling update_cons_index(). 177 */ 178 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, 179 int incr) 180 { 181 u32 doorbell[2]; 182 183 if (dev->hca_type == ARBEL_NATIVE) { 184 *cq->set_ci_db = cpu_to_be32(cq->cons_index); 185 wmb(); 186 } else { 187 doorbell[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn); 188 doorbell[1] = cpu_to_be32(incr - 1); 189 190 mthca_write64(doorbell, 191 dev->kar + MTHCA_CQ_DOORBELL, 192 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 193 } 194 } 195 196 void mthca_cq_event(struct mthca_dev *dev, u32 cqn) 197 { 198 struct mthca_cq *cq; 199 200 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 201 202 if (!cq) { 203 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn); 204 return; 205 } 206 207 ++cq->arm_sn; 208 209 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 210 } 211 212 void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) 213 { 214 struct mthca_cq *cq; 215 struct mthca_cqe *cqe; 216 int prod_index; 217 int nfreed = 0; 218 219 spin_lock_irq(&dev->cq_table.lock); 220 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 221 if (cq) 222 atomic_inc(&cq->refcount); 223 spin_unlock_irq(&dev->cq_table.lock); 224 225 if (!cq) 226 return; 227 228 spin_lock_irq(&cq->lock); 229 230 /* 231 * First we need to find the current producer index, so we 232 * know where to start cleaning from. It doesn't matter if HW 233 * adds new entries after this loop -- the QP we're worried 234 * about is already in RESET, so the new entries won't come 235 * from our QP and therefore don't need to be checked. 236 */ 237 for (prod_index = cq->cons_index; 238 cqe_sw(cq, prod_index & cq->ibcq.cqe); 239 ++prod_index) 240 if (prod_index == cq->cons_index + cq->ibcq.cqe) 241 break; 242 243 if (0) 244 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", 245 qpn, cqn, cq->cons_index, prod_index); 246 247 /* 248 * Now sweep backwards through the CQ, removing CQ entries 249 * that match our QP by copying older entries on top of them. 250 */ 251 while (prod_index > cq->cons_index) { 252 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); 253 if (cqe->my_qpn == cpu_to_be32(qpn)) 254 ++nfreed; 255 else if (nfreed) 256 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & 257 cq->ibcq.cqe), 258 cqe, 259 MTHCA_CQ_ENTRY_SIZE); 260 --prod_index; 261 } 262 263 if (nfreed) { 264 wmb(); 265 cq->cons_index += nfreed; 266 update_cons_index(dev, cq, nfreed); 267 } 268 269 spin_unlock_irq(&cq->lock); 270 if (atomic_dec_and_test(&cq->refcount)) 271 wake_up(&cq->wait); 272 } 273 274 static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, 275 struct mthca_qp *qp, int wqe_index, int is_send, 276 struct mthca_err_cqe *cqe, 277 struct ib_wc *entry, int *free_cqe) 278 { 279 int err; 280 int dbd; 281 u32 new_wqe; 282 283 if (1 && cqe->syndrome != SYNDROME_WR_FLUSH_ERR) { 284 int j; 285 286 mthca_dbg(dev, "%x/%d: error CQE -> QPN %06x, WQE @ %08x\n", 287 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), 288 be32_to_cpu(cqe->wqe)); 289 290 for (j = 0; j < 8; ++j) 291 printk(KERN_DEBUG " [%2x] %08x\n", 292 j * 4, be32_to_cpu(((u32 *) cqe)[j])); 293 } 294 295 /* 296 * For completions in error, only work request ID, status (and 297 * freed resource count for RD) have to be set. 298 */ 299 switch (cqe->syndrome) { 300 case SYNDROME_LOCAL_LENGTH_ERR: 301 entry->status = IB_WC_LOC_LEN_ERR; 302 break; 303 case SYNDROME_LOCAL_QP_OP_ERR: 304 entry->status = IB_WC_LOC_QP_OP_ERR; 305 break; 306 case SYNDROME_LOCAL_EEC_OP_ERR: 307 entry->status = IB_WC_LOC_EEC_OP_ERR; 308 break; 309 case SYNDROME_LOCAL_PROT_ERR: 310 entry->status = IB_WC_LOC_PROT_ERR; 311 break; 312 case SYNDROME_WR_FLUSH_ERR: 313 entry->status = IB_WC_WR_FLUSH_ERR; 314 break; 315 case SYNDROME_MW_BIND_ERR: 316 entry->status = IB_WC_MW_BIND_ERR; 317 break; 318 case SYNDROME_BAD_RESP_ERR: 319 entry->status = IB_WC_BAD_RESP_ERR; 320 break; 321 case SYNDROME_LOCAL_ACCESS_ERR: 322 entry->status = IB_WC_LOC_ACCESS_ERR; 323 break; 324 case SYNDROME_REMOTE_INVAL_REQ_ERR: 325 entry->status = IB_WC_REM_INV_REQ_ERR; 326 break; 327 case SYNDROME_REMOTE_ACCESS_ERR: 328 entry->status = IB_WC_REM_ACCESS_ERR; 329 break; 330 case SYNDROME_REMOTE_OP_ERR: 331 entry->status = IB_WC_REM_OP_ERR; 332 break; 333 case SYNDROME_RETRY_EXC_ERR: 334 entry->status = IB_WC_RETRY_EXC_ERR; 335 break; 336 case SYNDROME_RNR_RETRY_EXC_ERR: 337 entry->status = IB_WC_RNR_RETRY_EXC_ERR; 338 break; 339 case SYNDROME_LOCAL_RDD_VIOL_ERR: 340 entry->status = IB_WC_LOC_RDD_VIOL_ERR; 341 break; 342 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR: 343 entry->status = IB_WC_REM_INV_RD_REQ_ERR; 344 break; 345 case SYNDROME_REMOTE_ABORTED_ERR: 346 entry->status = IB_WC_REM_ABORT_ERR; 347 break; 348 case SYNDROME_INVAL_EECN_ERR: 349 entry->status = IB_WC_INV_EECN_ERR; 350 break; 351 case SYNDROME_INVAL_EEC_STATE_ERR: 352 entry->status = IB_WC_INV_EEC_STATE_ERR; 353 break; 354 default: 355 entry->status = IB_WC_GENERAL_ERR; 356 break; 357 } 358 359 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); 360 if (err) 361 return err; 362 363 /* 364 * If we're at the end of the WQE chain, or we've used up our 365 * doorbell count, free the CQE. Otherwise just update it for 366 * the next poll operation. 367 */ 368 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) 369 return 0; 370 371 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); 372 cqe->wqe = new_wqe; 373 cqe->syndrome = SYNDROME_WR_FLUSH_ERR; 374 375 *free_cqe = 0; 376 377 return 0; 378 } 379 380 static void dump_cqe(struct mthca_cqe *cqe) 381 { 382 int j; 383 384 for (j = 0; j < 8; ++j) 385 printk(KERN_DEBUG " [%2x] %08x\n", 386 j * 4, be32_to_cpu(((u32 *) cqe)[j])); 387 } 388 389 static inline int mthca_poll_one(struct mthca_dev *dev, 390 struct mthca_cq *cq, 391 struct mthca_qp **cur_qp, 392 int *freed, 393 struct ib_wc *entry) 394 { 395 struct mthca_wq *wq; 396 struct mthca_cqe *cqe; 397 int wqe_index; 398 int is_error; 399 int is_send; 400 int free_cqe = 1; 401 int err = 0; 402 403 cqe = next_cqe_sw(cq); 404 if (!cqe) 405 return -EAGAIN; 406 407 /* 408 * Make sure we read CQ entry contents after we've checked the 409 * ownership bit. 410 */ 411 rmb(); 412 413 if (0) { 414 mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n", 415 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), 416 be32_to_cpu(cqe->wqe)); 417 418 dump_cqe(cqe); 419 } 420 421 is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == 422 MTHCA_ERROR_CQE_OPCODE_MASK; 423 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; 424 425 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) { 426 /* 427 * We do not have to take the QP table lock here, 428 * because CQs will be locked while QPs are removed 429 * from the table. 430 */ 431 *cur_qp = mthca_array_get(&dev->qp_table.qp, 432 be32_to_cpu(cqe->my_qpn) & 433 (dev->limits.num_qps - 1)); 434 if (!*cur_qp) { 435 mthca_warn(dev, "CQ entry for unknown QP %06x\n", 436 be32_to_cpu(cqe->my_qpn) & 0xffffff); 437 err = -EINVAL; 438 goto out; 439 } 440 } 441 442 entry->qp_num = (*cur_qp)->qpn; 443 444 if (is_send) { 445 wq = &(*cur_qp)->sq; 446 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) 447 >> wq->wqe_shift); 448 entry->wr_id = (*cur_qp)->wrid[wqe_index + 449 (*cur_qp)->rq.max]; 450 } else { 451 wq = &(*cur_qp)->rq; 452 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; 453 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 454 } 455 456 if (wq->last_comp < wqe_index) 457 wq->tail += wqe_index - wq->last_comp; 458 else 459 wq->tail += wqe_index + wq->max - wq->last_comp; 460 461 wq->last_comp = wqe_index; 462 463 if (0) 464 mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", 465 is_send ? "Send" : "Receive", 466 (*cur_qp)->qpn, wqe_index, wq->max); 467 468 if (is_error) { 469 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, 470 (struct mthca_err_cqe *) cqe, 471 entry, &free_cqe); 472 goto out; 473 } 474 475 if (is_send) { 476 entry->opcode = IB_WC_SEND; /* XXX */ 477 } else { 478 entry->byte_len = be32_to_cpu(cqe->byte_cnt); 479 switch (cqe->opcode & 0x1f) { 480 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE: 481 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE: 482 entry->wc_flags = IB_WC_WITH_IMM; 483 entry->imm_data = cqe->imm_etype_pkey_eec; 484 entry->opcode = IB_WC_RECV; 485 break; 486 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: 487 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: 488 entry->wc_flags = IB_WC_WITH_IMM; 489 entry->imm_data = cqe->imm_etype_pkey_eec; 490 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; 491 break; 492 default: 493 entry->wc_flags = 0; 494 entry->opcode = IB_WC_RECV; 495 break; 496 } 497 entry->slid = be16_to_cpu(cqe->rlid); 498 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12; 499 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; 500 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f; 501 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16; 502 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ? 503 IB_WC_GRH : 0; 504 } 505 506 entry->status = IB_WC_SUCCESS; 507 508 out: 509 if (likely(free_cqe)) { 510 set_cqe_hw(cqe); 511 ++(*freed); 512 ++cq->cons_index; 513 } 514 515 return err; 516 } 517 518 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, 519 struct ib_wc *entry) 520 { 521 struct mthca_dev *dev = to_mdev(ibcq->device); 522 struct mthca_cq *cq = to_mcq(ibcq); 523 struct mthca_qp *qp = NULL; 524 unsigned long flags; 525 int err = 0; 526 int freed = 0; 527 int npolled; 528 529 spin_lock_irqsave(&cq->lock, flags); 530 531 for (npolled = 0; npolled < num_entries; ++npolled) { 532 err = mthca_poll_one(dev, cq, &qp, 533 &freed, entry + npolled); 534 if (err) 535 break; 536 } 537 538 if (freed) { 539 wmb(); 540 update_cons_index(dev, cq, freed); 541 } 542 543 spin_unlock_irqrestore(&cq->lock, flags); 544 545 return err == 0 || err == -EAGAIN ? npolled : err; 546 } 547 548 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) 549 { 550 u32 doorbell[2]; 551 552 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? 553 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 554 MTHCA_TAVOR_CQ_DB_REQ_NOT) | 555 to_mcq(cq)->cqn); 556 doorbell[1] = 0xffffffff; 557 558 mthca_write64(doorbell, 559 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, 560 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); 561 562 return 0; 563 } 564 565 int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 566 { 567 struct mthca_cq *cq = to_mcq(ibcq); 568 u32 doorbell[2]; 569 u32 sn; 570 u32 ci; 571 572 sn = cq->arm_sn & 3; 573 ci = cpu_to_be32(cq->cons_index); 574 575 doorbell[0] = ci; 576 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | 577 (notify == IB_CQ_SOLICITED ? 1 : 2)); 578 579 mthca_write_db_rec(doorbell, cq->arm_db); 580 581 /* 582 * Make sure that the doorbell record in host memory is 583 * written before ringing the doorbell via PCI MMIO. 584 */ 585 wmb(); 586 587 doorbell[0] = cpu_to_be32((sn << 28) | 588 (notify == IB_CQ_SOLICITED ? 589 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : 590 MTHCA_ARBEL_CQ_DB_REQ_NOT) | 591 cq->cqn); 592 doorbell[1] = ci; 593 594 mthca_write64(doorbell, 595 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, 596 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); 597 598 return 0; 599 } 600 601 static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) 602 { 603 int i; 604 int size; 605 606 if (cq->is_direct) 607 pci_free_consistent(dev->pdev, 608 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, 609 cq->queue.direct.buf, 610 pci_unmap_addr(&cq->queue.direct, 611 mapping)); 612 else { 613 size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; 614 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) 615 if (cq->queue.page_list[i].buf) 616 pci_free_consistent(dev->pdev, PAGE_SIZE, 617 cq->queue.page_list[i].buf, 618 pci_unmap_addr(&cq->queue.page_list[i], 619 mapping)); 620 621 kfree(cq->queue.page_list); 622 } 623 } 624 625 static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, 626 struct mthca_cq *cq) 627 { 628 int err = -ENOMEM; 629 int npages, shift; 630 u64 *dma_list = NULL; 631 dma_addr_t t; 632 int i; 633 634 if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) { 635 cq->is_direct = 1; 636 npages = 1; 637 shift = get_order(size) + PAGE_SHIFT; 638 639 cq->queue.direct.buf = pci_alloc_consistent(dev->pdev, 640 size, &t); 641 if (!cq->queue.direct.buf) 642 return -ENOMEM; 643 644 pci_unmap_addr_set(&cq->queue.direct, mapping, t); 645 646 memset(cq->queue.direct.buf, 0, size); 647 648 while (t & ((1 << shift) - 1)) { 649 --shift; 650 npages *= 2; 651 } 652 653 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 654 if (!dma_list) 655 goto err_free; 656 657 for (i = 0; i < npages; ++i) 658 dma_list[i] = t + i * (1 << shift); 659 } else { 660 cq->is_direct = 0; 661 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 662 shift = PAGE_SHIFT; 663 664 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 665 if (!dma_list) 666 return -ENOMEM; 667 668 cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list, 669 GFP_KERNEL); 670 if (!cq->queue.page_list) 671 goto err_out; 672 673 for (i = 0; i < npages; ++i) 674 cq->queue.page_list[i].buf = NULL; 675 676 for (i = 0; i < npages; ++i) { 677 cq->queue.page_list[i].buf = 678 pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); 679 if (!cq->queue.page_list[i].buf) 680 goto err_free; 681 682 dma_list[i] = t; 683 pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t); 684 685 memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE); 686 } 687 } 688 689 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, 690 dma_list, shift, npages, 691 0, size, 692 MTHCA_MPT_FLAG_LOCAL_WRITE | 693 MTHCA_MPT_FLAG_LOCAL_READ, 694 &cq->mr); 695 if (err) 696 goto err_free; 697 698 kfree(dma_list); 699 700 return 0; 701 702 err_free: 703 mthca_free_cq_buf(dev, cq); 704 705 err_out: 706 kfree(dma_list); 707 708 return err; 709 } 710 711 int mthca_init_cq(struct mthca_dev *dev, int nent, 712 struct mthca_cq *cq) 713 { 714 int size = nent * MTHCA_CQ_ENTRY_SIZE; 715 void *mailbox = NULL; 716 struct mthca_cq_context *cq_context; 717 int err = -ENOMEM; 718 u8 status; 719 int i; 720 721 might_sleep(); 722 723 cq->ibcq.cqe = nent - 1; 724 725 cq->cqn = mthca_alloc(&dev->cq_table.alloc); 726 if (cq->cqn == -1) 727 return -ENOMEM; 728 729 if (dev->hca_type == ARBEL_NATIVE) { 730 cq->arm_sn = 1; 731 732 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); 733 if (err) 734 goto err_out; 735 736 err = -ENOMEM; 737 738 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, 739 cq->cqn, &cq->set_ci_db); 740 if (cq->set_ci_db_index < 0) 741 goto err_out_icm; 742 743 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM, 744 cq->cqn, &cq->arm_db); 745 if (cq->arm_db_index < 0) 746 goto err_out_ci; 747 } 748 749 mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, 750 GFP_KERNEL); 751 if (!mailbox) 752 goto err_out_mailbox; 753 754 cq_context = MAILBOX_ALIGN(mailbox); 755 756 err = mthca_alloc_cq_buf(dev, size, cq); 757 if (err) 758 goto err_out_mailbox; 759 760 for (i = 0; i < nent; ++i) 761 set_cqe_hw(get_cqe(cq, i)); 762 763 spin_lock_init(&cq->lock); 764 atomic_set(&cq->refcount, 1); 765 init_waitqueue_head(&cq->wait); 766 767 memset(cq_context, 0, sizeof *cq_context); 768 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | 769 MTHCA_CQ_STATE_DISARMED | 770 MTHCA_CQ_FLAG_TR); 771 cq_context->start = cpu_to_be64(0); 772 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24 | 773 dev->driver_uar.index); 774 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); 775 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); 776 cq_context->pd = cpu_to_be32(dev->driver_pd.pd_num); 777 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey); 778 cq_context->cqn = cpu_to_be32(cq->cqn); 779 780 if (dev->hca_type == ARBEL_NATIVE) { 781 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index); 782 cq_context->state_db = cpu_to_be32(cq->arm_db_index); 783 } 784 785 err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status); 786 if (err) { 787 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); 788 goto err_out_free_mr; 789 } 790 791 if (status) { 792 mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n", 793 status); 794 err = -EINVAL; 795 goto err_out_free_mr; 796 } 797 798 spin_lock_irq(&dev->cq_table.lock); 799 if (mthca_array_set(&dev->cq_table.cq, 800 cq->cqn & (dev->limits.num_cqs - 1), 801 cq)) { 802 spin_unlock_irq(&dev->cq_table.lock); 803 goto err_out_free_mr; 804 } 805 spin_unlock_irq(&dev->cq_table.lock); 806 807 cq->cons_index = 0; 808 809 kfree(mailbox); 810 811 return 0; 812 813 err_out_free_mr: 814 mthca_free_mr(dev, &cq->mr); 815 mthca_free_cq_buf(dev, cq); 816 817 err_out_mailbox: 818 kfree(mailbox); 819 820 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 821 822 err_out_ci: 823 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 824 825 err_out_icm: 826 mthca_table_put(dev, dev->cq_table.table, cq->cqn); 827 828 err_out: 829 mthca_free(&dev->cq_table.alloc, cq->cqn); 830 831 return err; 832 } 833 834 void mthca_free_cq(struct mthca_dev *dev, 835 struct mthca_cq *cq) 836 { 837 void *mailbox; 838 int err; 839 u8 status; 840 841 might_sleep(); 842 843 mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, 844 GFP_KERNEL); 845 if (!mailbox) { 846 mthca_warn(dev, "No memory for mailbox to free CQ.\n"); 847 return; 848 } 849 850 err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status); 851 if (err) 852 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); 853 else if (status) 854 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", 855 status); 856 857 if (0) { 858 u32 *ctx = MAILBOX_ALIGN(mailbox); 859 int j; 860 861 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", 862 cq->cqn, cq->cons_index, !!next_cqe_sw(cq)); 863 for (j = 0; j < 16; ++j) 864 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j])); 865 } 866 867 spin_lock_irq(&dev->cq_table.lock); 868 mthca_array_clear(&dev->cq_table.cq, 869 cq->cqn & (dev->limits.num_cqs - 1)); 870 spin_unlock_irq(&dev->cq_table.lock); 871 872 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) 873 synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector); 874 else 875 synchronize_irq(dev->pdev->irq); 876 877 atomic_dec(&cq->refcount); 878 wait_event(cq->wait, !atomic_read(&cq->refcount)); 879 880 mthca_free_mr(dev, &cq->mr); 881 mthca_free_cq_buf(dev, cq); 882 883 if (dev->hca_type == ARBEL_NATIVE) { 884 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 885 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 886 mthca_table_put(dev, dev->cq_table.table, cq->cqn); 887 } 888 889 mthca_free(&dev->cq_table.alloc, cq->cqn); 890 kfree(mailbox); 891 } 892 893 int __devinit mthca_init_cq_table(struct mthca_dev *dev) 894 { 895 int err; 896 897 spin_lock_init(&dev->cq_table.lock); 898 899 err = mthca_alloc_init(&dev->cq_table.alloc, 900 dev->limits.num_cqs, 901 (1 << 24) - 1, 902 dev->limits.reserved_cqs); 903 if (err) 904 return err; 905 906 err = mthca_array_init(&dev->cq_table.cq, 907 dev->limits.num_cqs); 908 if (err) 909 mthca_alloc_cleanup(&dev->cq_table.alloc); 910 911 return err; 912 } 913 914 void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev) 915 { 916 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); 917 mthca_alloc_cleanup(&dev->cq_table.alloc); 918 } 919