1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/kref.h> 34 #include <rdma/ib_umem.h> 35 #include <rdma/ib_user_verbs.h> 36 #include <rdma/ib_cache.h> 37 #include "mlx5_ib.h" 38 39 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) 40 { 41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 42 43 ibcq->comp_handler(ibcq, ibcq->cq_context); 44 } 45 46 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) 47 { 48 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); 49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 50 struct ib_cq *ibcq = &cq->ibcq; 51 struct ib_event event; 52 53 if (type != MLX5_EVENT_TYPE_CQ_ERROR) { 54 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", 55 type, mcq->cqn); 56 return; 57 } 58 59 if (ibcq->event_handler) { 60 event.device = &dev->ib_dev; 61 event.event = IB_EVENT_CQ_ERR; 62 event.element.cq = ibcq; 63 ibcq->event_handler(&event, ibcq->cq_context); 64 } 65 } 66 67 static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) 68 { 69 return mlx5_buf_offset(&buf->buf, n * size); 70 } 71 72 static void *get_cqe(struct mlx5_ib_cq *cq, int n) 73 { 74 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); 75 } 76 77 static u8 sw_ownership_bit(int n, int nent) 78 { 79 return (n & nent) ? 1 : 0; 80 } 81 82 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) 83 { 84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); 85 struct mlx5_cqe64 *cqe64; 86 87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 88 89 if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && 90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { 91 return cqe; 92 } else { 93 return NULL; 94 } 95 } 96 97 static void *next_cqe_sw(struct mlx5_ib_cq *cq) 98 { 99 return get_sw_cqe(cq, cq->mcq.cons_index); 100 } 101 102 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) 103 { 104 switch (wq->wr_data[idx]) { 105 case MLX5_IB_WR_UMR: 106 return 0; 107 108 case IB_WR_LOCAL_INV: 109 return IB_WC_LOCAL_INV; 110 111 case IB_WR_REG_MR: 112 return IB_WC_REG_MR; 113 114 default: 115 pr_warn("unknown completion status\n"); 116 return 0; 117 } 118 } 119 120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 121 struct mlx5_ib_wq *wq, int idx) 122 { 123 wc->wc_flags = 0; 124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { 125 case MLX5_OPCODE_RDMA_WRITE_IMM: 126 wc->wc_flags |= IB_WC_WITH_IMM; 127 case MLX5_OPCODE_RDMA_WRITE: 128 wc->opcode = IB_WC_RDMA_WRITE; 129 break; 130 case MLX5_OPCODE_SEND_IMM: 131 wc->wc_flags |= IB_WC_WITH_IMM; 132 case MLX5_OPCODE_SEND: 133 case MLX5_OPCODE_SEND_INVAL: 134 wc->opcode = IB_WC_SEND; 135 break; 136 case MLX5_OPCODE_RDMA_READ: 137 wc->opcode = IB_WC_RDMA_READ; 138 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 139 break; 140 case MLX5_OPCODE_ATOMIC_CS: 141 wc->opcode = IB_WC_COMP_SWAP; 142 wc->byte_len = 8; 143 break; 144 case MLX5_OPCODE_ATOMIC_FA: 145 wc->opcode = IB_WC_FETCH_ADD; 146 wc->byte_len = 8; 147 break; 148 case MLX5_OPCODE_ATOMIC_MASKED_CS: 149 wc->opcode = IB_WC_MASKED_COMP_SWAP; 150 wc->byte_len = 8; 151 break; 152 case MLX5_OPCODE_ATOMIC_MASKED_FA: 153 wc->opcode = IB_WC_MASKED_FETCH_ADD; 154 wc->byte_len = 8; 155 break; 156 case MLX5_OPCODE_UMR: 157 wc->opcode = get_umr_comp(wq, idx); 158 break; 159 } 160 } 161 162 enum { 163 MLX5_GRH_IN_BUFFER = 1, 164 MLX5_GRH_IN_CQE = 2, 165 }; 166 167 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 168 struct mlx5_ib_qp *qp) 169 { 170 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); 171 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 172 struct mlx5_ib_srq *srq; 173 struct mlx5_ib_wq *wq; 174 u16 wqe_ctr; 175 u8 roce_packet_type; 176 bool vlan_present; 177 u8 g; 178 179 if (qp->ibqp.srq || qp->ibqp.xrcd) { 180 struct mlx5_core_srq *msrq = NULL; 181 182 if (qp->ibqp.xrcd) { 183 msrq = mlx5_core_get_srq(dev->mdev, 184 be32_to_cpu(cqe->srqn)); 185 srq = to_mibsrq(msrq); 186 } else { 187 srq = to_msrq(qp->ibqp.srq); 188 } 189 if (srq) { 190 wqe_ctr = be16_to_cpu(cqe->wqe_counter); 191 wc->wr_id = srq->wrid[wqe_ctr]; 192 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 193 if (msrq && atomic_dec_and_test(&msrq->refcount)) 194 complete(&msrq->free); 195 } 196 } else { 197 wq = &qp->rq; 198 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 199 ++wq->tail; 200 } 201 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 202 203 switch (cqe->op_own >> 4) { 204 case MLX5_CQE_RESP_WR_IMM: 205 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 206 wc->wc_flags = IB_WC_WITH_IMM; 207 wc->ex.imm_data = cqe->imm_inval_pkey; 208 break; 209 case MLX5_CQE_RESP_SEND: 210 wc->opcode = IB_WC_RECV; 211 wc->wc_flags = IB_WC_IP_CSUM_OK; 212 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && 213 (cqe->hds_ip_ext & CQE_L4_OK)))) 214 wc->wc_flags = 0; 215 break; 216 case MLX5_CQE_RESP_SEND_IMM: 217 wc->opcode = IB_WC_RECV; 218 wc->wc_flags = IB_WC_WITH_IMM; 219 wc->ex.imm_data = cqe->imm_inval_pkey; 220 break; 221 case MLX5_CQE_RESP_SEND_INV: 222 wc->opcode = IB_WC_RECV; 223 wc->wc_flags = IB_WC_WITH_INVALIDATE; 224 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 225 break; 226 } 227 wc->slid = be16_to_cpu(cqe->slid); 228 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 229 wc->dlid_path_bits = cqe->ml_path; 230 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 231 wc->wc_flags |= g ? IB_WC_GRH : 0; 232 if (unlikely(is_qp1(qp->ibqp.qp_type))) { 233 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; 234 235 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, 236 &wc->pkey_index); 237 } else { 238 wc->pkey_index = 0; 239 } 240 241 if (ll != IB_LINK_LAYER_ETHERNET) { 242 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 243 return; 244 } 245 246 vlan_present = cqe->l4_l3_hdr_type & 0x1; 247 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 248 if (vlan_present) { 249 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff; 250 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7; 251 wc->wc_flags |= IB_WC_WITH_VLAN; 252 } else { 253 wc->sl = 0; 254 } 255 256 switch (roce_packet_type) { 257 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: 258 wc->network_hdr_type = RDMA_NETWORK_IB; 259 break; 260 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: 261 wc->network_hdr_type = RDMA_NETWORK_IPV6; 262 break; 263 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4: 264 wc->network_hdr_type = RDMA_NETWORK_IPV4; 265 break; 266 } 267 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 268 } 269 270 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) 271 { 272 __be32 *p = (__be32 *)cqe; 273 int i; 274 275 mlx5_ib_warn(dev, "dump error cqe\n"); 276 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) 277 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), 278 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 279 be32_to_cpu(p[3])); 280 } 281 282 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, 283 struct mlx5_err_cqe *cqe, 284 struct ib_wc *wc) 285 { 286 int dump = 1; 287 288 switch (cqe->syndrome) { 289 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: 290 wc->status = IB_WC_LOC_LEN_ERR; 291 break; 292 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: 293 wc->status = IB_WC_LOC_QP_OP_ERR; 294 break; 295 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: 296 wc->status = IB_WC_LOC_PROT_ERR; 297 break; 298 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: 299 dump = 0; 300 wc->status = IB_WC_WR_FLUSH_ERR; 301 break; 302 case MLX5_CQE_SYNDROME_MW_BIND_ERR: 303 wc->status = IB_WC_MW_BIND_ERR; 304 break; 305 case MLX5_CQE_SYNDROME_BAD_RESP_ERR: 306 wc->status = IB_WC_BAD_RESP_ERR; 307 break; 308 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: 309 wc->status = IB_WC_LOC_ACCESS_ERR; 310 break; 311 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 312 wc->status = IB_WC_REM_INV_REQ_ERR; 313 break; 314 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: 315 wc->status = IB_WC_REM_ACCESS_ERR; 316 break; 317 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: 318 wc->status = IB_WC_REM_OP_ERR; 319 break; 320 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 321 wc->status = IB_WC_RETRY_EXC_ERR; 322 dump = 0; 323 break; 324 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 325 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 326 dump = 0; 327 break; 328 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: 329 wc->status = IB_WC_REM_ABORT_ERR; 330 break; 331 default: 332 wc->status = IB_WC_GENERAL_ERR; 333 break; 334 } 335 336 wc->vendor_err = cqe->vendor_err_synd; 337 if (dump) 338 dump_cqe(dev, cqe); 339 } 340 341 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) 342 { 343 /* TBD: waiting decision 344 */ 345 return 0; 346 } 347 348 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) 349 { 350 struct mlx5_wqe_data_seg *dpseg; 351 void *addr; 352 353 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + 354 sizeof(struct mlx5_wqe_raddr_seg) + 355 sizeof(struct mlx5_wqe_atomic_seg); 356 addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); 357 return addr; 358 } 359 360 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 361 uint16_t idx) 362 { 363 void *addr; 364 int byte_count; 365 int i; 366 367 if (!is_atomic_response(qp, idx)) 368 return; 369 370 byte_count = be32_to_cpu(cqe64->byte_cnt); 371 addr = mlx5_get_atomic_laddr(qp, idx); 372 373 if (byte_count == 4) { 374 *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); 375 } else { 376 for (i = 0; i < byte_count; i += 8) { 377 *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); 378 addr += 8; 379 } 380 } 381 382 return; 383 } 384 385 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 386 u16 tail, u16 head) 387 { 388 u16 idx; 389 390 do { 391 idx = tail & (qp->sq.wqe_cnt - 1); 392 handle_atomic(qp, cqe64, idx); 393 if (idx == head) 394 break; 395 396 tail = qp->sq.w_list[idx].next; 397 } while (1); 398 tail = qp->sq.w_list[idx].next; 399 qp->sq.last_poll = tail; 400 } 401 402 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 403 { 404 mlx5_buf_free(dev->mdev, &buf->buf); 405 } 406 407 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, 408 struct ib_sig_err *item) 409 { 410 u16 syndrome = be16_to_cpu(cqe->syndrome); 411 412 #define GUARD_ERR (1 << 13) 413 #define APPTAG_ERR (1 << 12) 414 #define REFTAG_ERR (1 << 11) 415 416 if (syndrome & GUARD_ERR) { 417 item->err_type = IB_SIG_BAD_GUARD; 418 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; 419 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; 420 } else 421 if (syndrome & REFTAG_ERR) { 422 item->err_type = IB_SIG_BAD_REFTAG; 423 item->expected = be32_to_cpu(cqe->expected_reftag); 424 item->actual = be32_to_cpu(cqe->actual_reftag); 425 } else 426 if (syndrome & APPTAG_ERR) { 427 item->err_type = IB_SIG_BAD_APPTAG; 428 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; 429 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; 430 } else { 431 pr_err("Got signature completion error with bad syndrome %04x\n", 432 syndrome); 433 } 434 435 item->sig_err_offset = be64_to_cpu(cqe->err_offset); 436 item->key = be32_to_cpu(cqe->mkey); 437 } 438 439 static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries, 440 struct ib_wc *wc, int *npolled) 441 { 442 struct mlx5_ib_wq *wq; 443 unsigned int cur; 444 unsigned int idx; 445 int np; 446 int i; 447 448 wq = &qp->sq; 449 cur = wq->head - wq->tail; 450 np = *npolled; 451 452 if (cur == 0) 453 return; 454 455 for (i = 0; i < cur && np < num_entries; i++) { 456 idx = wq->last_poll & (wq->wqe_cnt - 1); 457 wc->wr_id = wq->wrid[idx]; 458 wc->status = IB_WC_WR_FLUSH_ERR; 459 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 460 wq->tail++; 461 np++; 462 wc->qp = &qp->ibqp; 463 wc++; 464 wq->last_poll = wq->w_list[idx].next; 465 } 466 *npolled = np; 467 } 468 469 static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries, 470 struct ib_wc *wc, int *npolled) 471 { 472 struct mlx5_ib_wq *wq; 473 unsigned int cur; 474 int np; 475 int i; 476 477 wq = &qp->rq; 478 cur = wq->head - wq->tail; 479 np = *npolled; 480 481 if (cur == 0) 482 return; 483 484 for (i = 0; i < cur && np < num_entries; i++) { 485 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 486 wc->status = IB_WC_WR_FLUSH_ERR; 487 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 488 wq->tail++; 489 np++; 490 wc->qp = &qp->ibqp; 491 wc++; 492 } 493 *npolled = np; 494 } 495 496 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, 497 struct ib_wc *wc, int *npolled) 498 { 499 struct mlx5_ib_qp *qp; 500 501 *npolled = 0; 502 /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */ 503 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { 504 sw_send_comp(qp, num_entries, wc + *npolled, npolled); 505 if (*npolled >= num_entries) 506 return; 507 } 508 509 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { 510 sw_recv_comp(qp, num_entries, wc + *npolled, npolled); 511 if (*npolled >= num_entries) 512 return; 513 } 514 } 515 516 static int mlx5_poll_one(struct mlx5_ib_cq *cq, 517 struct mlx5_ib_qp **cur_qp, 518 struct ib_wc *wc) 519 { 520 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 521 struct mlx5_err_cqe *err_cqe; 522 struct mlx5_cqe64 *cqe64; 523 struct mlx5_core_qp *mqp; 524 struct mlx5_ib_wq *wq; 525 struct mlx5_sig_err_cqe *sig_err_cqe; 526 struct mlx5_core_mkey *mmkey; 527 struct mlx5_ib_mr *mr; 528 uint8_t opcode; 529 uint32_t qpn; 530 u16 wqe_ctr; 531 void *cqe; 532 int idx; 533 534 repoll: 535 cqe = next_cqe_sw(cq); 536 if (!cqe) 537 return -EAGAIN; 538 539 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 540 541 ++cq->mcq.cons_index; 542 543 /* Make sure we read CQ entry contents after we've checked the 544 * ownership bit. 545 */ 546 rmb(); 547 548 opcode = cqe64->op_own >> 4; 549 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { 550 if (likely(cq->resize_buf)) { 551 free_cq_buf(dev, &cq->buf); 552 cq->buf = *cq->resize_buf; 553 kfree(cq->resize_buf); 554 cq->resize_buf = NULL; 555 goto repoll; 556 } else { 557 mlx5_ib_warn(dev, "unexpected resize cqe\n"); 558 } 559 } 560 561 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; 562 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { 563 /* We do not have to take the QP table lock here, 564 * because CQs will be locked while QPs are removed 565 * from the table. 566 */ 567 mqp = __mlx5_qp_lookup(dev->mdev, qpn); 568 *cur_qp = to_mibqp(mqp); 569 } 570 571 wc->qp = &(*cur_qp)->ibqp; 572 switch (opcode) { 573 case MLX5_CQE_REQ: 574 wq = &(*cur_qp)->sq; 575 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 576 idx = wqe_ctr & (wq->wqe_cnt - 1); 577 handle_good_req(wc, cqe64, wq, idx); 578 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); 579 wc->wr_id = wq->wrid[idx]; 580 wq->tail = wq->wqe_head[idx] + 1; 581 wc->status = IB_WC_SUCCESS; 582 break; 583 case MLX5_CQE_RESP_WR_IMM: 584 case MLX5_CQE_RESP_SEND: 585 case MLX5_CQE_RESP_SEND_IMM: 586 case MLX5_CQE_RESP_SEND_INV: 587 handle_responder(wc, cqe64, *cur_qp); 588 wc->status = IB_WC_SUCCESS; 589 break; 590 case MLX5_CQE_RESIZE_CQ: 591 break; 592 case MLX5_CQE_REQ_ERR: 593 case MLX5_CQE_RESP_ERR: 594 err_cqe = (struct mlx5_err_cqe *)cqe64; 595 mlx5_handle_error_cqe(dev, err_cqe, wc); 596 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", 597 opcode == MLX5_CQE_REQ_ERR ? 598 "Requestor" : "Responder", cq->mcq.cqn); 599 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", 600 err_cqe->syndrome, err_cqe->vendor_err_synd); 601 if (opcode == MLX5_CQE_REQ_ERR) { 602 wq = &(*cur_qp)->sq; 603 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 604 idx = wqe_ctr & (wq->wqe_cnt - 1); 605 wc->wr_id = wq->wrid[idx]; 606 wq->tail = wq->wqe_head[idx] + 1; 607 } else { 608 struct mlx5_ib_srq *srq; 609 610 if ((*cur_qp)->ibqp.srq) { 611 srq = to_msrq((*cur_qp)->ibqp.srq); 612 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 613 wc->wr_id = srq->wrid[wqe_ctr]; 614 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 615 } else { 616 wq = &(*cur_qp)->rq; 617 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 618 ++wq->tail; 619 } 620 } 621 break; 622 case MLX5_CQE_SIG_ERR: 623 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; 624 625 read_lock(&dev->mdev->priv.mkey_table.lock); 626 mmkey = __mlx5_mr_lookup(dev->mdev, 627 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 628 mr = to_mibmr(mmkey); 629 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); 630 mr->sig->sig_err_exists = true; 631 mr->sig->sigerr_count++; 632 633 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", 634 cq->mcq.cqn, mr->sig->err_item.key, 635 mr->sig->err_item.err_type, 636 mr->sig->err_item.sig_err_offset, 637 mr->sig->err_item.expected, 638 mr->sig->err_item.actual); 639 640 read_unlock(&dev->mdev->priv.mkey_table.lock); 641 goto repoll; 642 } 643 644 return 0; 645 } 646 647 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, 648 struct ib_wc *wc) 649 { 650 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 651 struct mlx5_ib_wc *soft_wc, *next; 652 int npolled = 0; 653 654 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { 655 if (npolled >= num_entries) 656 break; 657 658 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", 659 cq->mcq.cqn); 660 661 wc[npolled++] = soft_wc->wc; 662 list_del(&soft_wc->list); 663 kfree(soft_wc); 664 } 665 666 return npolled; 667 } 668 669 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 670 { 671 struct mlx5_ib_cq *cq = to_mcq(ibcq); 672 struct mlx5_ib_qp *cur_qp = NULL; 673 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 674 struct mlx5_core_dev *mdev = dev->mdev; 675 unsigned long flags; 676 int soft_polled = 0; 677 int npolled; 678 679 spin_lock_irqsave(&cq->lock, flags); 680 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 681 mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); 682 goto out; 683 } 684 685 if (unlikely(!list_empty(&cq->wc_list))) 686 soft_polled = poll_soft_wc(cq, num_entries, wc); 687 688 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { 689 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) 690 break; 691 } 692 693 if (npolled) 694 mlx5_cq_set_ci(&cq->mcq); 695 out: 696 spin_unlock_irqrestore(&cq->lock, flags); 697 698 return soft_polled + npolled; 699 } 700 701 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 702 { 703 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; 704 struct mlx5_ib_cq *cq = to_mcq(ibcq); 705 void __iomem *uar_page = mdev->priv.uar->map; 706 unsigned long irq_flags; 707 int ret = 0; 708 709 spin_lock_irqsave(&cq->lock, irq_flags); 710 if (cq->notify_flags != IB_CQ_NEXT_COMP) 711 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; 712 713 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) 714 ret = 1; 715 spin_unlock_irqrestore(&cq->lock, irq_flags); 716 717 mlx5_cq_arm(&cq->mcq, 718 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 719 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 720 uar_page, to_mcq(ibcq)->mcq.cons_index); 721 722 return ret; 723 } 724 725 static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, 726 int nent, int cqe_size) 727 { 728 int err; 729 730 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); 731 if (err) 732 return err; 733 734 buf->cqe_size = cqe_size; 735 buf->nent = nent; 736 737 return 0; 738 } 739 740 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, 741 struct ib_ucontext *context, struct mlx5_ib_cq *cq, 742 int entries, u32 **cqb, 743 int *cqe_size, int *index, int *inlen) 744 { 745 struct mlx5_ib_create_cq ucmd = {}; 746 size_t ucmdlen; 747 int page_shift; 748 __be64 *pas; 749 int npages; 750 int ncont; 751 void *cqc; 752 int err; 753 754 ucmdlen = 755 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < 756 sizeof(ucmd)) ? (sizeof(ucmd) - 757 sizeof(ucmd.reserved)) : sizeof(ucmd); 758 759 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) 760 return -EFAULT; 761 762 if (ucmdlen == sizeof(ucmd) && 763 ucmd.reserved != 0) 764 return -EINVAL; 765 766 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) 767 return -EINVAL; 768 769 *cqe_size = ucmd.cqe_size; 770 771 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, 772 entries * ucmd.cqe_size, 773 IB_ACCESS_LOCAL_WRITE, 1); 774 if (IS_ERR(cq->buf.umem)) { 775 err = PTR_ERR(cq->buf.umem); 776 return err; 777 } 778 779 err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 780 &cq->db); 781 if (err) 782 goto err_umem; 783 784 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift, 785 &ncont, NULL); 786 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", 787 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); 788 789 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 790 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; 791 *cqb = kvzalloc(*inlen, GFP_KERNEL); 792 if (!*cqb) { 793 err = -ENOMEM; 794 goto err_db; 795 } 796 797 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 798 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); 799 800 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 801 MLX5_SET(cqc, cqc, log_page_size, 802 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 803 804 *index = to_mucontext(context)->bfregi.sys_pages[0]; 805 806 if (ucmd.cqe_comp_en == 1) { 807 if (unlikely((*cqe_size != 64) || 808 !MLX5_CAP_GEN(dev->mdev, cqe_compression))) { 809 err = -EOPNOTSUPP; 810 mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n", 811 *cqe_size); 812 goto err_cqb; 813 } 814 815 if (unlikely(!ucmd.cqe_comp_res_format || 816 !(ucmd.cqe_comp_res_format < 817 MLX5_IB_CQE_RES_RESERVED) || 818 (ucmd.cqe_comp_res_format & 819 (ucmd.cqe_comp_res_format - 1)))) { 820 err = -EOPNOTSUPP; 821 mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n", 822 ucmd.cqe_comp_res_format); 823 goto err_cqb; 824 } 825 826 MLX5_SET(cqc, cqc, cqe_comp_en, 1); 827 MLX5_SET(cqc, cqc, mini_cqe_res_format, 828 ilog2(ucmd.cqe_comp_res_format)); 829 } 830 831 return 0; 832 833 err_cqb: 834 kfree(*cqb); 835 836 err_db: 837 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 838 839 err_umem: 840 ib_umem_release(cq->buf.umem); 841 return err; 842 } 843 844 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) 845 { 846 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 847 ib_umem_release(cq->buf.umem); 848 } 849 850 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) 851 { 852 int i; 853 void *cqe; 854 struct mlx5_cqe64 *cqe64; 855 856 for (i = 0; i < buf->nent; i++) { 857 cqe = get_cqe_from_buf(buf, i, buf->cqe_size); 858 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; 859 cqe64->op_own = MLX5_CQE_INVALID << 4; 860 } 861 } 862 863 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 864 int entries, int cqe_size, 865 u32 **cqb, int *index, int *inlen) 866 { 867 __be64 *pas; 868 void *cqc; 869 int err; 870 871 err = mlx5_db_alloc(dev->mdev, &cq->db); 872 if (err) 873 return err; 874 875 cq->mcq.set_ci_db = cq->db.db; 876 cq->mcq.arm_db = cq->db.db + 1; 877 cq->mcq.cqe_sz = cqe_size; 878 879 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); 880 if (err) 881 goto err_db; 882 883 init_cq_buf(cq, &cq->buf); 884 885 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 886 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; 887 *cqb = kvzalloc(*inlen, GFP_KERNEL); 888 if (!*cqb) { 889 err = -ENOMEM; 890 goto err_buf; 891 } 892 893 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 894 mlx5_fill_page_array(&cq->buf.buf, pas); 895 896 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 897 MLX5_SET(cqc, cqc, log_page_size, 898 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 899 900 *index = dev->mdev->priv.uar->index; 901 902 return 0; 903 904 err_buf: 905 free_cq_buf(dev, &cq->buf); 906 907 err_db: 908 mlx5_db_free(dev->mdev, &cq->db); 909 return err; 910 } 911 912 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 913 { 914 free_cq_buf(dev, &cq->buf); 915 mlx5_db_free(dev->mdev, &cq->db); 916 } 917 918 static void notify_soft_wc_handler(struct work_struct *work) 919 { 920 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, 921 notify_work); 922 923 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 924 } 925 926 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 927 const struct ib_cq_init_attr *attr, 928 struct ib_ucontext *context, 929 struct ib_udata *udata) 930 { 931 int entries = attr->cqe; 932 int vector = attr->comp_vector; 933 struct mlx5_ib_dev *dev = to_mdev(ibdev); 934 struct mlx5_ib_cq *cq; 935 int uninitialized_var(index); 936 int uninitialized_var(inlen); 937 u32 *cqb = NULL; 938 void *cqc; 939 int cqe_size; 940 unsigned int irqn; 941 int eqn; 942 int err; 943 944 if (entries < 0 || 945 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) 946 return ERR_PTR(-EINVAL); 947 948 if (check_cq_create_flags(attr->flags)) 949 return ERR_PTR(-EOPNOTSUPP); 950 951 entries = roundup_pow_of_two(entries + 1); 952 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) 953 return ERR_PTR(-EINVAL); 954 955 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 956 if (!cq) 957 return ERR_PTR(-ENOMEM); 958 959 cq->ibcq.cqe = entries - 1; 960 mutex_init(&cq->resize_mutex); 961 spin_lock_init(&cq->lock); 962 cq->resize_buf = NULL; 963 cq->resize_umem = NULL; 964 cq->create_flags = attr->flags; 965 INIT_LIST_HEAD(&cq->list_send_qp); 966 INIT_LIST_HEAD(&cq->list_recv_qp); 967 968 if (context) { 969 err = create_cq_user(dev, udata, context, cq, entries, 970 &cqb, &cqe_size, &index, &inlen); 971 if (err) 972 goto err_create; 973 } else { 974 cqe_size = cache_line_size() == 128 ? 128 : 64; 975 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 976 &index, &inlen); 977 if (err) 978 goto err_create; 979 980 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); 981 } 982 983 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); 984 if (err) 985 goto err_cqb; 986 987 cq->cqe_size = cqe_size; 988 989 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context); 990 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); 991 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 992 MLX5_SET(cqc, cqc, uar_page, index); 993 MLX5_SET(cqc, cqc, c_eqn, eqn); 994 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); 995 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) 996 MLX5_SET(cqc, cqc, oi, 1); 997 998 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); 999 if (err) 1000 goto err_cqb; 1001 1002 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); 1003 cq->mcq.irqn = irqn; 1004 if (context) 1005 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; 1006 else 1007 cq->mcq.comp = mlx5_ib_cq_comp; 1008 cq->mcq.event = mlx5_ib_cq_event; 1009 1010 INIT_LIST_HEAD(&cq->wc_list); 1011 1012 if (context) 1013 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { 1014 err = -EFAULT; 1015 goto err_cmd; 1016 } 1017 1018 1019 kvfree(cqb); 1020 return &cq->ibcq; 1021 1022 err_cmd: 1023 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); 1024 1025 err_cqb: 1026 kvfree(cqb); 1027 if (context) 1028 destroy_cq_user(cq, context); 1029 else 1030 destroy_cq_kernel(dev, cq); 1031 1032 err_create: 1033 kfree(cq); 1034 1035 return ERR_PTR(err); 1036 } 1037 1038 1039 int mlx5_ib_destroy_cq(struct ib_cq *cq) 1040 { 1041 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1042 struct mlx5_ib_cq *mcq = to_mcq(cq); 1043 struct ib_ucontext *context = NULL; 1044 1045 if (cq->uobject) 1046 context = cq->uobject->context; 1047 1048 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); 1049 if (context) 1050 destroy_cq_user(mcq, context); 1051 else 1052 destroy_cq_kernel(dev, mcq); 1053 1054 kfree(mcq); 1055 1056 return 0; 1057 } 1058 1059 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) 1060 { 1061 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); 1062 } 1063 1064 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) 1065 { 1066 struct mlx5_cqe64 *cqe64, *dest64; 1067 void *cqe, *dest; 1068 u32 prod_index; 1069 int nfreed = 0; 1070 u8 owner_bit; 1071 1072 if (!cq) 1073 return; 1074 1075 /* First we need to find the current producer index, so we 1076 * know where to start cleaning from. It doesn't matter if HW 1077 * adds new entries after this loop -- the QP we're worried 1078 * about is already in RESET, so the new entries won't come 1079 * from our QP and therefore don't need to be checked. 1080 */ 1081 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) 1082 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 1083 break; 1084 1085 /* Now sweep backwards through the CQ, removing CQ entries 1086 * that match our QP by copying older entries on top of them. 1087 */ 1088 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 1089 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 1090 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 1091 if (is_equal_rsn(cqe64, rsn)) { 1092 if (srq && (ntohl(cqe64->srqn) & 0xffffff)) 1093 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); 1094 ++nfreed; 1095 } else if (nfreed) { 1096 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 1097 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; 1098 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; 1099 memcpy(dest, cqe, cq->mcq.cqe_sz); 1100 dest64->op_own = owner_bit | 1101 (dest64->op_own & ~MLX5_CQE_OWNER_MASK); 1102 } 1103 } 1104 1105 if (nfreed) { 1106 cq->mcq.cons_index += nfreed; 1107 /* Make sure update of buffer contents is done before 1108 * updating consumer index. 1109 */ 1110 wmb(); 1111 mlx5_cq_set_ci(&cq->mcq); 1112 } 1113 } 1114 1115 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) 1116 { 1117 if (!cq) 1118 return; 1119 1120 spin_lock_irq(&cq->lock); 1121 __mlx5_ib_cq_clean(cq, qpn, srq); 1122 spin_unlock_irq(&cq->lock); 1123 } 1124 1125 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1126 { 1127 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1128 struct mlx5_ib_cq *mcq = to_mcq(cq); 1129 int err; 1130 1131 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) 1132 return -ENOSYS; 1133 1134 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq, 1135 cq_period, cq_count); 1136 if (err) 1137 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); 1138 1139 return err; 1140 } 1141 1142 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1143 int entries, struct ib_udata *udata, int *npas, 1144 int *page_shift, int *cqe_size) 1145 { 1146 struct mlx5_ib_resize_cq ucmd; 1147 struct ib_umem *umem; 1148 int err; 1149 int npages; 1150 struct ib_ucontext *context = cq->buf.umem->context; 1151 1152 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 1153 if (err) 1154 return err; 1155 1156 if (ucmd.reserved0 || ucmd.reserved1) 1157 return -EINVAL; 1158 1159 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 1160 IB_ACCESS_LOCAL_WRITE, 1); 1161 if (IS_ERR(umem)) { 1162 err = PTR_ERR(umem); 1163 return err; 1164 } 1165 1166 mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift, 1167 npas, NULL); 1168 1169 cq->resize_umem = umem; 1170 *cqe_size = ucmd.cqe_size; 1171 1172 return 0; 1173 } 1174 1175 static void un_resize_user(struct mlx5_ib_cq *cq) 1176 { 1177 ib_umem_release(cq->resize_umem); 1178 } 1179 1180 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1181 int entries, int cqe_size) 1182 { 1183 int err; 1184 1185 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); 1186 if (!cq->resize_buf) 1187 return -ENOMEM; 1188 1189 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); 1190 if (err) 1191 goto ex; 1192 1193 init_cq_buf(cq, cq->resize_buf); 1194 1195 return 0; 1196 1197 ex: 1198 kfree(cq->resize_buf); 1199 return err; 1200 } 1201 1202 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 1203 { 1204 free_cq_buf(dev, cq->resize_buf); 1205 cq->resize_buf = NULL; 1206 } 1207 1208 static int copy_resize_cqes(struct mlx5_ib_cq *cq) 1209 { 1210 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 1211 struct mlx5_cqe64 *scqe64; 1212 struct mlx5_cqe64 *dcqe64; 1213 void *start_cqe; 1214 void *scqe; 1215 void *dcqe; 1216 int ssize; 1217 int dsize; 1218 int i; 1219 u8 sw_own; 1220 1221 ssize = cq->buf.cqe_size; 1222 dsize = cq->resize_buf->cqe_size; 1223 if (ssize != dsize) { 1224 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); 1225 return -EINVAL; 1226 } 1227 1228 i = cq->mcq.cons_index; 1229 scqe = get_sw_cqe(cq, i); 1230 scqe64 = ssize == 64 ? scqe : scqe + 64; 1231 start_cqe = scqe; 1232 if (!scqe) { 1233 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1234 return -EINVAL; 1235 } 1236 1237 while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { 1238 dcqe = get_cqe_from_buf(cq->resize_buf, 1239 (i + 1) & (cq->resize_buf->nent), 1240 dsize); 1241 dcqe64 = dsize == 64 ? dcqe : dcqe + 64; 1242 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); 1243 memcpy(dcqe, scqe, dsize); 1244 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; 1245 1246 ++i; 1247 scqe = get_sw_cqe(cq, i); 1248 scqe64 = ssize == 64 ? scqe : scqe + 64; 1249 if (!scqe) { 1250 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1251 return -EINVAL; 1252 } 1253 1254 if (scqe == start_cqe) { 1255 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", 1256 cq->mcq.cqn); 1257 return -ENOMEM; 1258 } 1259 } 1260 ++cq->mcq.cons_index; 1261 return 0; 1262 } 1263 1264 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 1265 { 1266 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); 1267 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1268 void *cqc; 1269 u32 *in; 1270 int err; 1271 int npas; 1272 __be64 *pas; 1273 int page_shift; 1274 int inlen; 1275 int uninitialized_var(cqe_size); 1276 unsigned long flags; 1277 1278 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { 1279 pr_info("Firmware does not support resize CQ\n"); 1280 return -ENOSYS; 1281 } 1282 1283 if (entries < 1 || 1284 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { 1285 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", 1286 entries, 1287 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); 1288 return -EINVAL; 1289 } 1290 1291 entries = roundup_pow_of_two(entries + 1); 1292 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) 1293 return -EINVAL; 1294 1295 if (entries == ibcq->cqe + 1) 1296 return 0; 1297 1298 mutex_lock(&cq->resize_mutex); 1299 if (udata) { 1300 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, 1301 &cqe_size); 1302 } else { 1303 cqe_size = 64; 1304 err = resize_kernel(dev, cq, entries, cqe_size); 1305 if (!err) { 1306 npas = cq->resize_buf->buf.npages; 1307 page_shift = cq->resize_buf->buf.page_shift; 1308 } 1309 } 1310 1311 if (err) 1312 goto ex; 1313 1314 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + 1315 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; 1316 1317 in = kvzalloc(inlen, GFP_KERNEL); 1318 if (!in) { 1319 err = -ENOMEM; 1320 goto ex_resize; 1321 } 1322 1323 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas); 1324 if (udata) 1325 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, 1326 pas, 0); 1327 else 1328 mlx5_fill_page_array(&cq->resize_buf->buf, pas); 1329 1330 MLX5_SET(modify_cq_in, in, 1331 modify_field_select_resize_field_select.resize_field_select.resize_field_select, 1332 MLX5_MODIFY_CQ_MASK_LOG_SIZE | 1333 MLX5_MODIFY_CQ_MASK_PG_OFFSET | 1334 MLX5_MODIFY_CQ_MASK_PG_SIZE); 1335 1336 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 1337 1338 MLX5_SET(cqc, cqc, log_page_size, 1339 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 1340 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); 1341 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 1342 1343 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE); 1344 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); 1345 1346 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); 1347 if (err) 1348 goto ex_alloc; 1349 1350 if (udata) { 1351 cq->ibcq.cqe = entries - 1; 1352 ib_umem_release(cq->buf.umem); 1353 cq->buf.umem = cq->resize_umem; 1354 cq->resize_umem = NULL; 1355 } else { 1356 struct mlx5_ib_cq_buf tbuf; 1357 int resized = 0; 1358 1359 spin_lock_irqsave(&cq->lock, flags); 1360 if (cq->resize_buf) { 1361 err = copy_resize_cqes(cq); 1362 if (!err) { 1363 tbuf = cq->buf; 1364 cq->buf = *cq->resize_buf; 1365 kfree(cq->resize_buf); 1366 cq->resize_buf = NULL; 1367 resized = 1; 1368 } 1369 } 1370 cq->ibcq.cqe = entries - 1; 1371 spin_unlock_irqrestore(&cq->lock, flags); 1372 if (resized) 1373 free_cq_buf(dev, &tbuf); 1374 } 1375 mutex_unlock(&cq->resize_mutex); 1376 1377 kvfree(in); 1378 return 0; 1379 1380 ex_alloc: 1381 kvfree(in); 1382 1383 ex_resize: 1384 if (udata) 1385 un_resize_user(cq); 1386 else 1387 un_resize_kernel(dev, cq); 1388 ex: 1389 mutex_unlock(&cq->resize_mutex); 1390 return err; 1391 } 1392 1393 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) 1394 { 1395 struct mlx5_ib_cq *cq; 1396 1397 if (!ibcq) 1398 return 128; 1399 1400 cq = to_mcq(ibcq); 1401 return cq->cqe_size; 1402 } 1403 1404 /* Called from atomic context */ 1405 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) 1406 { 1407 struct mlx5_ib_wc *soft_wc; 1408 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1409 unsigned long flags; 1410 1411 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC); 1412 if (!soft_wc) 1413 return -ENOMEM; 1414 1415 soft_wc->wc = *wc; 1416 spin_lock_irqsave(&cq->lock, flags); 1417 list_add_tail(&soft_wc->list, &cq->wc_list); 1418 if (cq->notify_flags == IB_CQ_NEXT_COMP || 1419 wc->status != IB_WC_SUCCESS) { 1420 cq->notify_flags = 0; 1421 schedule_work(&cq->notify_work); 1422 } 1423 spin_unlock_irqrestore(&cq->lock, flags); 1424 1425 return 0; 1426 } 1427