1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/kref.h> 34 #include <rdma/ib_umem.h> 35 #include <rdma/ib_user_verbs.h> 36 #include <rdma/ib_cache.h> 37 #include "mlx5_ib.h" 38 #include "user.h" 39 40 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) 41 { 42 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 43 44 ibcq->comp_handler(ibcq, ibcq->cq_context); 45 } 46 47 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) 48 { 49 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); 50 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 51 struct ib_cq *ibcq = &cq->ibcq; 52 struct ib_event event; 53 54 if (type != MLX5_EVENT_TYPE_CQ_ERROR) { 55 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", 56 type, mcq->cqn); 57 return; 58 } 59 60 if (ibcq->event_handler) { 61 event.device = &dev->ib_dev; 62 event.event = IB_EVENT_CQ_ERR; 63 event.element.cq = ibcq; 64 ibcq->event_handler(&event, ibcq->cq_context); 65 } 66 } 67 68 static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) 69 { 70 return mlx5_buf_offset(&buf->buf, n * size); 71 } 72 73 static void *get_cqe(struct mlx5_ib_cq *cq, int n) 74 { 75 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); 76 } 77 78 static u8 sw_ownership_bit(int n, int nent) 79 { 80 return (n & nent) ? 1 : 0; 81 } 82 83 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) 84 { 85 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); 86 struct mlx5_cqe64 *cqe64; 87 88 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 89 90 if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && 91 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { 92 return cqe; 93 } else { 94 return NULL; 95 } 96 } 97 98 static void *next_cqe_sw(struct mlx5_ib_cq *cq) 99 { 100 return get_sw_cqe(cq, cq->mcq.cons_index); 101 } 102 103 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) 104 { 105 switch (wq->wr_data[idx]) { 106 case MLX5_IB_WR_UMR: 107 return 0; 108 109 case IB_WR_LOCAL_INV: 110 return IB_WC_LOCAL_INV; 111 112 case IB_WR_REG_MR: 113 return IB_WC_REG_MR; 114 115 default: 116 pr_warn("unknown completion status\n"); 117 return 0; 118 } 119 } 120 121 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 122 struct mlx5_ib_wq *wq, int idx) 123 { 124 wc->wc_flags = 0; 125 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { 126 case MLX5_OPCODE_RDMA_WRITE_IMM: 127 wc->wc_flags |= IB_WC_WITH_IMM; 128 case MLX5_OPCODE_RDMA_WRITE: 129 wc->opcode = IB_WC_RDMA_WRITE; 130 break; 131 case MLX5_OPCODE_SEND_IMM: 132 wc->wc_flags |= IB_WC_WITH_IMM; 133 case MLX5_OPCODE_SEND: 134 case MLX5_OPCODE_SEND_INVAL: 135 wc->opcode = IB_WC_SEND; 136 break; 137 case MLX5_OPCODE_RDMA_READ: 138 wc->opcode = IB_WC_RDMA_READ; 139 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 140 break; 141 case MLX5_OPCODE_ATOMIC_CS: 142 wc->opcode = IB_WC_COMP_SWAP; 143 wc->byte_len = 8; 144 break; 145 case MLX5_OPCODE_ATOMIC_FA: 146 wc->opcode = IB_WC_FETCH_ADD; 147 wc->byte_len = 8; 148 break; 149 case MLX5_OPCODE_ATOMIC_MASKED_CS: 150 wc->opcode = IB_WC_MASKED_COMP_SWAP; 151 wc->byte_len = 8; 152 break; 153 case MLX5_OPCODE_ATOMIC_MASKED_FA: 154 wc->opcode = IB_WC_MASKED_FETCH_ADD; 155 wc->byte_len = 8; 156 break; 157 case MLX5_OPCODE_UMR: 158 wc->opcode = get_umr_comp(wq, idx); 159 break; 160 } 161 } 162 163 enum { 164 MLX5_GRH_IN_BUFFER = 1, 165 MLX5_GRH_IN_CQE = 2, 166 }; 167 168 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 169 struct mlx5_ib_qp *qp) 170 { 171 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); 172 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 173 struct mlx5_ib_srq *srq; 174 struct mlx5_ib_wq *wq; 175 u16 wqe_ctr; 176 u8 g; 177 178 if (qp->ibqp.srq || qp->ibqp.xrcd) { 179 struct mlx5_core_srq *msrq = NULL; 180 181 if (qp->ibqp.xrcd) { 182 msrq = mlx5_core_get_srq(dev->mdev, 183 be32_to_cpu(cqe->srqn)); 184 srq = to_mibsrq(msrq); 185 } else { 186 srq = to_msrq(qp->ibqp.srq); 187 } 188 if (srq) { 189 wqe_ctr = be16_to_cpu(cqe->wqe_counter); 190 wc->wr_id = srq->wrid[wqe_ctr]; 191 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 192 if (msrq && atomic_dec_and_test(&msrq->refcount)) 193 complete(&msrq->free); 194 } 195 } else { 196 wq = &qp->rq; 197 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 198 ++wq->tail; 199 } 200 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 201 202 switch (cqe->op_own >> 4) { 203 case MLX5_CQE_RESP_WR_IMM: 204 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 205 wc->wc_flags = IB_WC_WITH_IMM; 206 wc->ex.imm_data = cqe->imm_inval_pkey; 207 break; 208 case MLX5_CQE_RESP_SEND: 209 wc->opcode = IB_WC_RECV; 210 wc->wc_flags = IB_WC_IP_CSUM_OK; 211 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && 212 (cqe->hds_ip_ext & CQE_L4_OK)))) 213 wc->wc_flags = 0; 214 break; 215 case MLX5_CQE_RESP_SEND_IMM: 216 wc->opcode = IB_WC_RECV; 217 wc->wc_flags = IB_WC_WITH_IMM; 218 wc->ex.imm_data = cqe->imm_inval_pkey; 219 break; 220 case MLX5_CQE_RESP_SEND_INV: 221 wc->opcode = IB_WC_RECV; 222 wc->wc_flags = IB_WC_WITH_INVALIDATE; 223 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 224 break; 225 } 226 wc->slid = be16_to_cpu(cqe->slid); 227 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 228 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 229 wc->dlid_path_bits = cqe->ml_path; 230 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 231 wc->wc_flags |= g ? IB_WC_GRH : 0; 232 if (unlikely(is_qp1(qp->ibqp.qp_type))) { 233 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; 234 235 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, 236 &wc->pkey_index); 237 } else { 238 wc->pkey_index = 0; 239 } 240 241 if (ll != IB_LINK_LAYER_ETHERNET) 242 return; 243 244 switch (wc->sl & 0x3) { 245 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: 246 wc->network_hdr_type = RDMA_NETWORK_IB; 247 break; 248 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: 249 wc->network_hdr_type = RDMA_NETWORK_IPV6; 250 break; 251 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4: 252 wc->network_hdr_type = RDMA_NETWORK_IPV4; 253 break; 254 } 255 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 256 } 257 258 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) 259 { 260 __be32 *p = (__be32 *)cqe; 261 int i; 262 263 mlx5_ib_warn(dev, "dump error cqe\n"); 264 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) 265 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), 266 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 267 be32_to_cpu(p[3])); 268 } 269 270 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, 271 struct mlx5_err_cqe *cqe, 272 struct ib_wc *wc) 273 { 274 int dump = 1; 275 276 switch (cqe->syndrome) { 277 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: 278 wc->status = IB_WC_LOC_LEN_ERR; 279 break; 280 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: 281 wc->status = IB_WC_LOC_QP_OP_ERR; 282 break; 283 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: 284 wc->status = IB_WC_LOC_PROT_ERR; 285 break; 286 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: 287 dump = 0; 288 wc->status = IB_WC_WR_FLUSH_ERR; 289 break; 290 case MLX5_CQE_SYNDROME_MW_BIND_ERR: 291 wc->status = IB_WC_MW_BIND_ERR; 292 break; 293 case MLX5_CQE_SYNDROME_BAD_RESP_ERR: 294 wc->status = IB_WC_BAD_RESP_ERR; 295 break; 296 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: 297 wc->status = IB_WC_LOC_ACCESS_ERR; 298 break; 299 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 300 wc->status = IB_WC_REM_INV_REQ_ERR; 301 break; 302 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: 303 wc->status = IB_WC_REM_ACCESS_ERR; 304 break; 305 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: 306 wc->status = IB_WC_REM_OP_ERR; 307 break; 308 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 309 wc->status = IB_WC_RETRY_EXC_ERR; 310 dump = 0; 311 break; 312 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 313 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 314 dump = 0; 315 break; 316 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: 317 wc->status = IB_WC_REM_ABORT_ERR; 318 break; 319 default: 320 wc->status = IB_WC_GENERAL_ERR; 321 break; 322 } 323 324 wc->vendor_err = cqe->vendor_err_synd; 325 if (dump) 326 dump_cqe(dev, cqe); 327 } 328 329 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) 330 { 331 /* TBD: waiting decision 332 */ 333 return 0; 334 } 335 336 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) 337 { 338 struct mlx5_wqe_data_seg *dpseg; 339 void *addr; 340 341 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + 342 sizeof(struct mlx5_wqe_raddr_seg) + 343 sizeof(struct mlx5_wqe_atomic_seg); 344 addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); 345 return addr; 346 } 347 348 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 349 uint16_t idx) 350 { 351 void *addr; 352 int byte_count; 353 int i; 354 355 if (!is_atomic_response(qp, idx)) 356 return; 357 358 byte_count = be32_to_cpu(cqe64->byte_cnt); 359 addr = mlx5_get_atomic_laddr(qp, idx); 360 361 if (byte_count == 4) { 362 *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); 363 } else { 364 for (i = 0; i < byte_count; i += 8) { 365 *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); 366 addr += 8; 367 } 368 } 369 370 return; 371 } 372 373 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 374 u16 tail, u16 head) 375 { 376 u16 idx; 377 378 do { 379 idx = tail & (qp->sq.wqe_cnt - 1); 380 handle_atomic(qp, cqe64, idx); 381 if (idx == head) 382 break; 383 384 tail = qp->sq.w_list[idx].next; 385 } while (1); 386 tail = qp->sq.w_list[idx].next; 387 qp->sq.last_poll = tail; 388 } 389 390 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 391 { 392 mlx5_buf_free(dev->mdev, &buf->buf); 393 } 394 395 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, 396 struct ib_sig_err *item) 397 { 398 u16 syndrome = be16_to_cpu(cqe->syndrome); 399 400 #define GUARD_ERR (1 << 13) 401 #define APPTAG_ERR (1 << 12) 402 #define REFTAG_ERR (1 << 11) 403 404 if (syndrome & GUARD_ERR) { 405 item->err_type = IB_SIG_BAD_GUARD; 406 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; 407 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; 408 } else 409 if (syndrome & REFTAG_ERR) { 410 item->err_type = IB_SIG_BAD_REFTAG; 411 item->expected = be32_to_cpu(cqe->expected_reftag); 412 item->actual = be32_to_cpu(cqe->actual_reftag); 413 } else 414 if (syndrome & APPTAG_ERR) { 415 item->err_type = IB_SIG_BAD_APPTAG; 416 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; 417 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; 418 } else { 419 pr_err("Got signature completion error with bad syndrome %04x\n", 420 syndrome); 421 } 422 423 item->sig_err_offset = be64_to_cpu(cqe->err_offset); 424 item->key = be32_to_cpu(cqe->mkey); 425 } 426 427 static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries, 428 struct ib_wc *wc, int *npolled) 429 { 430 struct mlx5_ib_wq *wq; 431 unsigned int cur; 432 unsigned int idx; 433 int np; 434 int i; 435 436 wq = &qp->sq; 437 cur = wq->head - wq->tail; 438 np = *npolled; 439 440 if (cur == 0) 441 return; 442 443 for (i = 0; i < cur && np < num_entries; i++) { 444 idx = wq->last_poll & (wq->wqe_cnt - 1); 445 wc->wr_id = wq->wrid[idx]; 446 wc->status = IB_WC_WR_FLUSH_ERR; 447 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 448 wq->tail++; 449 np++; 450 wc->qp = &qp->ibqp; 451 wc++; 452 wq->last_poll = wq->w_list[idx].next; 453 } 454 *npolled = np; 455 } 456 457 static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries, 458 struct ib_wc *wc, int *npolled) 459 { 460 struct mlx5_ib_wq *wq; 461 unsigned int cur; 462 int np; 463 int i; 464 465 wq = &qp->rq; 466 cur = wq->head - wq->tail; 467 np = *npolled; 468 469 if (cur == 0) 470 return; 471 472 for (i = 0; i < cur && np < num_entries; i++) { 473 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 474 wc->status = IB_WC_WR_FLUSH_ERR; 475 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 476 wq->tail++; 477 np++; 478 wc->qp = &qp->ibqp; 479 wc++; 480 } 481 *npolled = np; 482 } 483 484 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, 485 struct ib_wc *wc, int *npolled) 486 { 487 struct mlx5_ib_qp *qp; 488 489 *npolled = 0; 490 /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */ 491 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { 492 sw_send_comp(qp, num_entries, wc + *npolled, npolled); 493 if (*npolled >= num_entries) 494 return; 495 } 496 497 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { 498 sw_recv_comp(qp, num_entries, wc + *npolled, npolled); 499 if (*npolled >= num_entries) 500 return; 501 } 502 } 503 504 static int mlx5_poll_one(struct mlx5_ib_cq *cq, 505 struct mlx5_ib_qp **cur_qp, 506 struct ib_wc *wc) 507 { 508 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 509 struct mlx5_err_cqe *err_cqe; 510 struct mlx5_cqe64 *cqe64; 511 struct mlx5_core_qp *mqp; 512 struct mlx5_ib_wq *wq; 513 struct mlx5_sig_err_cqe *sig_err_cqe; 514 struct mlx5_core_mkey *mmkey; 515 struct mlx5_ib_mr *mr; 516 uint8_t opcode; 517 uint32_t qpn; 518 u16 wqe_ctr; 519 void *cqe; 520 int idx; 521 522 repoll: 523 cqe = next_cqe_sw(cq); 524 if (!cqe) 525 return -EAGAIN; 526 527 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 528 529 ++cq->mcq.cons_index; 530 531 /* Make sure we read CQ entry contents after we've checked the 532 * ownership bit. 533 */ 534 rmb(); 535 536 opcode = cqe64->op_own >> 4; 537 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { 538 if (likely(cq->resize_buf)) { 539 free_cq_buf(dev, &cq->buf); 540 cq->buf = *cq->resize_buf; 541 kfree(cq->resize_buf); 542 cq->resize_buf = NULL; 543 goto repoll; 544 } else { 545 mlx5_ib_warn(dev, "unexpected resize cqe\n"); 546 } 547 } 548 549 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; 550 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { 551 /* We do not have to take the QP table lock here, 552 * because CQs will be locked while QPs are removed 553 * from the table. 554 */ 555 mqp = __mlx5_qp_lookup(dev->mdev, qpn); 556 if (unlikely(!mqp)) { 557 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", 558 cq->mcq.cqn, qpn); 559 return -EINVAL; 560 } 561 562 *cur_qp = to_mibqp(mqp); 563 } 564 565 wc->qp = &(*cur_qp)->ibqp; 566 switch (opcode) { 567 case MLX5_CQE_REQ: 568 wq = &(*cur_qp)->sq; 569 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 570 idx = wqe_ctr & (wq->wqe_cnt - 1); 571 handle_good_req(wc, cqe64, wq, idx); 572 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); 573 wc->wr_id = wq->wrid[idx]; 574 wq->tail = wq->wqe_head[idx] + 1; 575 wc->status = IB_WC_SUCCESS; 576 break; 577 case MLX5_CQE_RESP_WR_IMM: 578 case MLX5_CQE_RESP_SEND: 579 case MLX5_CQE_RESP_SEND_IMM: 580 case MLX5_CQE_RESP_SEND_INV: 581 handle_responder(wc, cqe64, *cur_qp); 582 wc->status = IB_WC_SUCCESS; 583 break; 584 case MLX5_CQE_RESIZE_CQ: 585 break; 586 case MLX5_CQE_REQ_ERR: 587 case MLX5_CQE_RESP_ERR: 588 err_cqe = (struct mlx5_err_cqe *)cqe64; 589 mlx5_handle_error_cqe(dev, err_cqe, wc); 590 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", 591 opcode == MLX5_CQE_REQ_ERR ? 592 "Requestor" : "Responder", cq->mcq.cqn); 593 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", 594 err_cqe->syndrome, err_cqe->vendor_err_synd); 595 if (opcode == MLX5_CQE_REQ_ERR) { 596 wq = &(*cur_qp)->sq; 597 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 598 idx = wqe_ctr & (wq->wqe_cnt - 1); 599 wc->wr_id = wq->wrid[idx]; 600 wq->tail = wq->wqe_head[idx] + 1; 601 } else { 602 struct mlx5_ib_srq *srq; 603 604 if ((*cur_qp)->ibqp.srq) { 605 srq = to_msrq((*cur_qp)->ibqp.srq); 606 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 607 wc->wr_id = srq->wrid[wqe_ctr]; 608 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 609 } else { 610 wq = &(*cur_qp)->rq; 611 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 612 ++wq->tail; 613 } 614 } 615 break; 616 case MLX5_CQE_SIG_ERR: 617 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; 618 619 read_lock(&dev->mdev->priv.mkey_table.lock); 620 mmkey = __mlx5_mr_lookup(dev->mdev, 621 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 622 if (unlikely(!mmkey)) { 623 read_unlock(&dev->mdev->priv.mkey_table.lock); 624 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", 625 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); 626 return -EINVAL; 627 } 628 629 mr = to_mibmr(mmkey); 630 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); 631 mr->sig->sig_err_exists = true; 632 mr->sig->sigerr_count++; 633 634 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", 635 cq->mcq.cqn, mr->sig->err_item.key, 636 mr->sig->err_item.err_type, 637 mr->sig->err_item.sig_err_offset, 638 mr->sig->err_item.expected, 639 mr->sig->err_item.actual); 640 641 read_unlock(&dev->mdev->priv.mkey_table.lock); 642 goto repoll; 643 } 644 645 return 0; 646 } 647 648 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, 649 struct ib_wc *wc) 650 { 651 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 652 struct mlx5_ib_wc *soft_wc, *next; 653 int npolled = 0; 654 655 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { 656 if (npolled >= num_entries) 657 break; 658 659 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", 660 cq->mcq.cqn); 661 662 wc[npolled++] = soft_wc->wc; 663 list_del(&soft_wc->list); 664 kfree(soft_wc); 665 } 666 667 return npolled; 668 } 669 670 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 671 { 672 struct mlx5_ib_cq *cq = to_mcq(ibcq); 673 struct mlx5_ib_qp *cur_qp = NULL; 674 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 675 struct mlx5_core_dev *mdev = dev->mdev; 676 unsigned long flags; 677 int soft_polled = 0; 678 int npolled; 679 int err = 0; 680 681 spin_lock_irqsave(&cq->lock, flags); 682 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 683 mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); 684 goto out; 685 } 686 687 if (unlikely(!list_empty(&cq->wc_list))) 688 soft_polled = poll_soft_wc(cq, num_entries, wc); 689 690 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { 691 err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); 692 if (err) 693 break; 694 } 695 696 if (npolled) 697 mlx5_cq_set_ci(&cq->mcq); 698 out: 699 spin_unlock_irqrestore(&cq->lock, flags); 700 701 if (err == 0 || err == -EAGAIN) 702 return soft_polled + npolled; 703 else 704 return err; 705 } 706 707 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 708 { 709 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; 710 struct mlx5_ib_cq *cq = to_mcq(ibcq); 711 void __iomem *uar_page = mdev->priv.uuari.uars[0].map; 712 unsigned long irq_flags; 713 int ret = 0; 714 715 spin_lock_irqsave(&cq->lock, irq_flags); 716 if (cq->notify_flags != IB_CQ_NEXT_COMP) 717 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; 718 719 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) 720 ret = 1; 721 spin_unlock_irqrestore(&cq->lock, irq_flags); 722 723 mlx5_cq_arm(&cq->mcq, 724 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 725 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 726 uar_page, 727 MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), 728 to_mcq(ibcq)->mcq.cons_index); 729 730 return ret; 731 } 732 733 static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, 734 int nent, int cqe_size) 735 { 736 int err; 737 738 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); 739 if (err) 740 return err; 741 742 buf->cqe_size = cqe_size; 743 buf->nent = nent; 744 745 return 0; 746 } 747 748 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, 749 struct ib_ucontext *context, struct mlx5_ib_cq *cq, 750 int entries, struct mlx5_create_cq_mbox_in **cqb, 751 int *cqe_size, int *index, int *inlen) 752 { 753 struct mlx5_ib_create_cq ucmd; 754 size_t ucmdlen; 755 int page_shift; 756 int npages; 757 int ncont; 758 int err; 759 760 ucmdlen = 761 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < 762 sizeof(ucmd)) ? (sizeof(ucmd) - 763 sizeof(ucmd.reserved)) : sizeof(ucmd); 764 765 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) 766 return -EFAULT; 767 768 if (ucmdlen == sizeof(ucmd) && 769 ucmd.reserved != 0) 770 return -EINVAL; 771 772 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) 773 return -EINVAL; 774 775 *cqe_size = ucmd.cqe_size; 776 777 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, 778 entries * ucmd.cqe_size, 779 IB_ACCESS_LOCAL_WRITE, 1); 780 if (IS_ERR(cq->buf.umem)) { 781 err = PTR_ERR(cq->buf.umem); 782 return err; 783 } 784 785 err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 786 &cq->db); 787 if (err) 788 goto err_umem; 789 790 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, 791 &ncont, NULL); 792 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", 793 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); 794 795 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; 796 *cqb = mlx5_vzalloc(*inlen); 797 if (!*cqb) { 798 err = -ENOMEM; 799 goto err_db; 800 } 801 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); 802 (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 803 804 *index = to_mucontext(context)->uuari.uars[0].index; 805 806 return 0; 807 808 err_db: 809 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 810 811 err_umem: 812 ib_umem_release(cq->buf.umem); 813 return err; 814 } 815 816 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) 817 { 818 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 819 ib_umem_release(cq->buf.umem); 820 } 821 822 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) 823 { 824 int i; 825 void *cqe; 826 struct mlx5_cqe64 *cqe64; 827 828 for (i = 0; i < buf->nent; i++) { 829 cqe = get_cqe_from_buf(buf, i, buf->cqe_size); 830 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; 831 cqe64->op_own = MLX5_CQE_INVALID << 4; 832 } 833 } 834 835 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 836 int entries, int cqe_size, 837 struct mlx5_create_cq_mbox_in **cqb, 838 int *index, int *inlen) 839 { 840 int err; 841 842 err = mlx5_db_alloc(dev->mdev, &cq->db); 843 if (err) 844 return err; 845 846 cq->mcq.set_ci_db = cq->db.db; 847 cq->mcq.arm_db = cq->db.db + 1; 848 cq->mcq.cqe_sz = cqe_size; 849 850 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); 851 if (err) 852 goto err_db; 853 854 init_cq_buf(cq, &cq->buf); 855 856 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; 857 *cqb = mlx5_vzalloc(*inlen); 858 if (!*cqb) { 859 err = -ENOMEM; 860 goto err_buf; 861 } 862 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); 863 864 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 865 *index = dev->mdev->priv.uuari.uars[0].index; 866 867 return 0; 868 869 err_buf: 870 free_cq_buf(dev, &cq->buf); 871 872 err_db: 873 mlx5_db_free(dev->mdev, &cq->db); 874 return err; 875 } 876 877 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 878 { 879 free_cq_buf(dev, &cq->buf); 880 mlx5_db_free(dev->mdev, &cq->db); 881 } 882 883 static void notify_soft_wc_handler(struct work_struct *work) 884 { 885 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, 886 notify_work); 887 888 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 889 } 890 891 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 892 const struct ib_cq_init_attr *attr, 893 struct ib_ucontext *context, 894 struct ib_udata *udata) 895 { 896 int entries = attr->cqe; 897 int vector = attr->comp_vector; 898 struct mlx5_create_cq_mbox_in *cqb = NULL; 899 struct mlx5_ib_dev *dev = to_mdev(ibdev); 900 struct mlx5_ib_cq *cq; 901 int uninitialized_var(index); 902 int uninitialized_var(inlen); 903 int cqe_size; 904 unsigned int irqn; 905 int eqn; 906 int err; 907 908 if (entries < 0 || 909 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) 910 return ERR_PTR(-EINVAL); 911 912 if (check_cq_create_flags(attr->flags)) 913 return ERR_PTR(-EOPNOTSUPP); 914 915 entries = roundup_pow_of_two(entries + 1); 916 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) 917 return ERR_PTR(-EINVAL); 918 919 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 920 if (!cq) 921 return ERR_PTR(-ENOMEM); 922 923 cq->ibcq.cqe = entries - 1; 924 mutex_init(&cq->resize_mutex); 925 spin_lock_init(&cq->lock); 926 cq->resize_buf = NULL; 927 cq->resize_umem = NULL; 928 cq->create_flags = attr->flags; 929 INIT_LIST_HEAD(&cq->list_send_qp); 930 INIT_LIST_HEAD(&cq->list_recv_qp); 931 932 if (context) { 933 err = create_cq_user(dev, udata, context, cq, entries, 934 &cqb, &cqe_size, &index, &inlen); 935 if (err) 936 goto err_create; 937 } else { 938 /* for now choose 64 bytes till we have a proper interface */ 939 cqe_size = 64; 940 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 941 &index, &inlen); 942 if (err) 943 goto err_create; 944 945 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); 946 } 947 948 cq->cqe_size = cqe_size; 949 cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; 950 951 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) 952 cqb->ctx.cqe_sz_flags |= (1 << 1); 953 954 cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); 955 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); 956 if (err) 957 goto err_cqb; 958 959 cqb->ctx.c_eqn = cpu_to_be16(eqn); 960 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); 961 962 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); 963 if (err) 964 goto err_cqb; 965 966 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); 967 cq->mcq.irqn = irqn; 968 if (context) 969 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; 970 else 971 cq->mcq.comp = mlx5_ib_cq_comp; 972 cq->mcq.event = mlx5_ib_cq_event; 973 974 INIT_LIST_HEAD(&cq->wc_list); 975 976 if (context) 977 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { 978 err = -EFAULT; 979 goto err_cmd; 980 } 981 982 983 kvfree(cqb); 984 return &cq->ibcq; 985 986 err_cmd: 987 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); 988 989 err_cqb: 990 kvfree(cqb); 991 if (context) 992 destroy_cq_user(cq, context); 993 else 994 destroy_cq_kernel(dev, cq); 995 996 err_create: 997 kfree(cq); 998 999 return ERR_PTR(err); 1000 } 1001 1002 1003 int mlx5_ib_destroy_cq(struct ib_cq *cq) 1004 { 1005 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1006 struct mlx5_ib_cq *mcq = to_mcq(cq); 1007 struct ib_ucontext *context = NULL; 1008 1009 if (cq->uobject) 1010 context = cq->uobject->context; 1011 1012 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); 1013 if (context) 1014 destroy_cq_user(mcq, context); 1015 else 1016 destroy_cq_kernel(dev, mcq); 1017 1018 kfree(mcq); 1019 1020 return 0; 1021 } 1022 1023 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) 1024 { 1025 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); 1026 } 1027 1028 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) 1029 { 1030 struct mlx5_cqe64 *cqe64, *dest64; 1031 void *cqe, *dest; 1032 u32 prod_index; 1033 int nfreed = 0; 1034 u8 owner_bit; 1035 1036 if (!cq) 1037 return; 1038 1039 /* First we need to find the current producer index, so we 1040 * know where to start cleaning from. It doesn't matter if HW 1041 * adds new entries after this loop -- the QP we're worried 1042 * about is already in RESET, so the new entries won't come 1043 * from our QP and therefore don't need to be checked. 1044 */ 1045 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) 1046 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 1047 break; 1048 1049 /* Now sweep backwards through the CQ, removing CQ entries 1050 * that match our QP by copying older entries on top of them. 1051 */ 1052 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 1053 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 1054 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 1055 if (is_equal_rsn(cqe64, rsn)) { 1056 if (srq && (ntohl(cqe64->srqn) & 0xffffff)) 1057 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); 1058 ++nfreed; 1059 } else if (nfreed) { 1060 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 1061 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; 1062 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; 1063 memcpy(dest, cqe, cq->mcq.cqe_sz); 1064 dest64->op_own = owner_bit | 1065 (dest64->op_own & ~MLX5_CQE_OWNER_MASK); 1066 } 1067 } 1068 1069 if (nfreed) { 1070 cq->mcq.cons_index += nfreed; 1071 /* Make sure update of buffer contents is done before 1072 * updating consumer index. 1073 */ 1074 wmb(); 1075 mlx5_cq_set_ci(&cq->mcq); 1076 } 1077 } 1078 1079 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) 1080 { 1081 if (!cq) 1082 return; 1083 1084 spin_lock_irq(&cq->lock); 1085 __mlx5_ib_cq_clean(cq, qpn, srq); 1086 spin_unlock_irq(&cq->lock); 1087 } 1088 1089 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1090 { 1091 struct mlx5_modify_cq_mbox_in *in; 1092 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1093 struct mlx5_ib_cq *mcq = to_mcq(cq); 1094 int err; 1095 u32 fsel; 1096 1097 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) 1098 return -ENOSYS; 1099 1100 in = kzalloc(sizeof(*in), GFP_KERNEL); 1101 if (!in) 1102 return -ENOMEM; 1103 1104 in->cqn = cpu_to_be32(mcq->mcq.cqn); 1105 fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); 1106 in->ctx.cq_period = cpu_to_be16(cq_period); 1107 in->ctx.cq_max_count = cpu_to_be16(cq_count); 1108 in->field_select = cpu_to_be32(fsel); 1109 err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in)); 1110 kfree(in); 1111 1112 if (err) 1113 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); 1114 1115 return err; 1116 } 1117 1118 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1119 int entries, struct ib_udata *udata, int *npas, 1120 int *page_shift, int *cqe_size) 1121 { 1122 struct mlx5_ib_resize_cq ucmd; 1123 struct ib_umem *umem; 1124 int err; 1125 int npages; 1126 struct ib_ucontext *context = cq->buf.umem->context; 1127 1128 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 1129 if (err) 1130 return err; 1131 1132 if (ucmd.reserved0 || ucmd.reserved1) 1133 return -EINVAL; 1134 1135 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 1136 IB_ACCESS_LOCAL_WRITE, 1); 1137 if (IS_ERR(umem)) { 1138 err = PTR_ERR(umem); 1139 return err; 1140 } 1141 1142 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, 1143 npas, NULL); 1144 1145 cq->resize_umem = umem; 1146 *cqe_size = ucmd.cqe_size; 1147 1148 return 0; 1149 } 1150 1151 static void un_resize_user(struct mlx5_ib_cq *cq) 1152 { 1153 ib_umem_release(cq->resize_umem); 1154 } 1155 1156 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1157 int entries, int cqe_size) 1158 { 1159 int err; 1160 1161 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); 1162 if (!cq->resize_buf) 1163 return -ENOMEM; 1164 1165 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); 1166 if (err) 1167 goto ex; 1168 1169 init_cq_buf(cq, cq->resize_buf); 1170 1171 return 0; 1172 1173 ex: 1174 kfree(cq->resize_buf); 1175 return err; 1176 } 1177 1178 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 1179 { 1180 free_cq_buf(dev, cq->resize_buf); 1181 cq->resize_buf = NULL; 1182 } 1183 1184 static int copy_resize_cqes(struct mlx5_ib_cq *cq) 1185 { 1186 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 1187 struct mlx5_cqe64 *scqe64; 1188 struct mlx5_cqe64 *dcqe64; 1189 void *start_cqe; 1190 void *scqe; 1191 void *dcqe; 1192 int ssize; 1193 int dsize; 1194 int i; 1195 u8 sw_own; 1196 1197 ssize = cq->buf.cqe_size; 1198 dsize = cq->resize_buf->cqe_size; 1199 if (ssize != dsize) { 1200 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); 1201 return -EINVAL; 1202 } 1203 1204 i = cq->mcq.cons_index; 1205 scqe = get_sw_cqe(cq, i); 1206 scqe64 = ssize == 64 ? scqe : scqe + 64; 1207 start_cqe = scqe; 1208 if (!scqe) { 1209 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1210 return -EINVAL; 1211 } 1212 1213 while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { 1214 dcqe = get_cqe_from_buf(cq->resize_buf, 1215 (i + 1) & (cq->resize_buf->nent), 1216 dsize); 1217 dcqe64 = dsize == 64 ? dcqe : dcqe + 64; 1218 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); 1219 memcpy(dcqe, scqe, dsize); 1220 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; 1221 1222 ++i; 1223 scqe = get_sw_cqe(cq, i); 1224 scqe64 = ssize == 64 ? scqe : scqe + 64; 1225 if (!scqe) { 1226 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1227 return -EINVAL; 1228 } 1229 1230 if (scqe == start_cqe) { 1231 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", 1232 cq->mcq.cqn); 1233 return -ENOMEM; 1234 } 1235 } 1236 ++cq->mcq.cons_index; 1237 return 0; 1238 } 1239 1240 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 1241 { 1242 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); 1243 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1244 struct mlx5_modify_cq_mbox_in *in; 1245 int err; 1246 int npas; 1247 int page_shift; 1248 int inlen; 1249 int uninitialized_var(cqe_size); 1250 unsigned long flags; 1251 1252 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { 1253 pr_info("Firmware does not support resize CQ\n"); 1254 return -ENOSYS; 1255 } 1256 1257 if (entries < 1 || 1258 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { 1259 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", 1260 entries, 1261 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); 1262 return -EINVAL; 1263 } 1264 1265 entries = roundup_pow_of_two(entries + 1); 1266 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) 1267 return -EINVAL; 1268 1269 if (entries == ibcq->cqe + 1) 1270 return 0; 1271 1272 mutex_lock(&cq->resize_mutex); 1273 if (udata) { 1274 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, 1275 &cqe_size); 1276 } else { 1277 cqe_size = 64; 1278 err = resize_kernel(dev, cq, entries, cqe_size); 1279 if (!err) { 1280 npas = cq->resize_buf->buf.npages; 1281 page_shift = cq->resize_buf->buf.page_shift; 1282 } 1283 } 1284 1285 if (err) 1286 goto ex; 1287 1288 inlen = sizeof(*in) + npas * sizeof(in->pas[0]); 1289 in = mlx5_vzalloc(inlen); 1290 if (!in) { 1291 err = -ENOMEM; 1292 goto ex_resize; 1293 } 1294 1295 if (udata) 1296 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, 1297 in->pas, 0); 1298 else 1299 mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); 1300 1301 in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | 1302 MLX5_MODIFY_CQ_MASK_PG_OFFSET | 1303 MLX5_MODIFY_CQ_MASK_PG_SIZE); 1304 in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 1305 in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; 1306 in->ctx.page_offset = 0; 1307 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); 1308 in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); 1309 in->cqn = cpu_to_be32(cq->mcq.cqn); 1310 1311 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); 1312 if (err) 1313 goto ex_alloc; 1314 1315 if (udata) { 1316 cq->ibcq.cqe = entries - 1; 1317 ib_umem_release(cq->buf.umem); 1318 cq->buf.umem = cq->resize_umem; 1319 cq->resize_umem = NULL; 1320 } else { 1321 struct mlx5_ib_cq_buf tbuf; 1322 int resized = 0; 1323 1324 spin_lock_irqsave(&cq->lock, flags); 1325 if (cq->resize_buf) { 1326 err = copy_resize_cqes(cq); 1327 if (!err) { 1328 tbuf = cq->buf; 1329 cq->buf = *cq->resize_buf; 1330 kfree(cq->resize_buf); 1331 cq->resize_buf = NULL; 1332 resized = 1; 1333 } 1334 } 1335 cq->ibcq.cqe = entries - 1; 1336 spin_unlock_irqrestore(&cq->lock, flags); 1337 if (resized) 1338 free_cq_buf(dev, &tbuf); 1339 } 1340 mutex_unlock(&cq->resize_mutex); 1341 1342 kvfree(in); 1343 return 0; 1344 1345 ex_alloc: 1346 kvfree(in); 1347 1348 ex_resize: 1349 if (udata) 1350 un_resize_user(cq); 1351 else 1352 un_resize_kernel(dev, cq); 1353 ex: 1354 mutex_unlock(&cq->resize_mutex); 1355 return err; 1356 } 1357 1358 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) 1359 { 1360 struct mlx5_ib_cq *cq; 1361 1362 if (!ibcq) 1363 return 128; 1364 1365 cq = to_mcq(ibcq); 1366 return cq->cqe_size; 1367 } 1368 1369 /* Called from atomic context */ 1370 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) 1371 { 1372 struct mlx5_ib_wc *soft_wc; 1373 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1374 unsigned long flags; 1375 1376 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC); 1377 if (!soft_wc) 1378 return -ENOMEM; 1379 1380 soft_wc->wc = *wc; 1381 spin_lock_irqsave(&cq->lock, flags); 1382 list_add_tail(&soft_wc->list, &cq->wc_list); 1383 if (cq->notify_flags == IB_CQ_NEXT_COMP || 1384 wc->status != IB_WC_SUCCESS) { 1385 cq->notify_flags = 0; 1386 schedule_work(&cq->notify_work); 1387 } 1388 spin_unlock_irqrestore(&cq->lock, flags); 1389 1390 return 0; 1391 } 1392