1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/kref.h> 34 #include <rdma/ib_umem.h> 35 #include <rdma/ib_user_verbs.h> 36 #include <rdma/ib_cache.h> 37 #include "mlx5_ib.h" 38 #include "srq.h" 39 #include "qp.h" 40 41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) 42 { 43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 44 45 ibcq->comp_handler(ibcq, ibcq->cq_context); 46 } 47 48 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) 49 { 50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); 51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 52 struct ib_cq *ibcq = &cq->ibcq; 53 struct ib_event event; 54 55 if (type != MLX5_EVENT_TYPE_CQ_ERROR) { 56 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", 57 type, mcq->cqn); 58 return; 59 } 60 61 if (ibcq->event_handler) { 62 event.device = &dev->ib_dev; 63 event.event = IB_EVENT_CQ_ERR; 64 event.element.cq = ibcq; 65 ibcq->event_handler(&event, ibcq->cq_context); 66 } 67 } 68 69 static void *get_cqe(struct mlx5_ib_cq *cq, int n) 70 { 71 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); 72 } 73 74 static u8 sw_ownership_bit(int n, int nent) 75 { 76 return (n & nent) ? 1 : 0; 77 } 78 79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) 80 { 81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); 82 struct mlx5_cqe64 *cqe64; 83 84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 85 86 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) && 87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { 88 return cqe; 89 } else { 90 return NULL; 91 } 92 } 93 94 static void *next_cqe_sw(struct mlx5_ib_cq *cq) 95 { 96 return get_sw_cqe(cq, cq->mcq.cons_index); 97 } 98 99 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) 100 { 101 switch (wq->wr_data[idx]) { 102 case MLX5_IB_WR_UMR: 103 return 0; 104 105 case IB_WR_LOCAL_INV: 106 return IB_WC_LOCAL_INV; 107 108 case IB_WR_REG_MR: 109 return IB_WC_REG_MR; 110 111 default: 112 pr_warn("unknown completion status\n"); 113 return 0; 114 } 115 } 116 117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 118 struct mlx5_ib_wq *wq, int idx) 119 { 120 wc->wc_flags = 0; 121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { 122 case MLX5_OPCODE_RDMA_WRITE_IMM: 123 wc->wc_flags |= IB_WC_WITH_IMM; 124 fallthrough; 125 case MLX5_OPCODE_RDMA_WRITE: 126 wc->opcode = IB_WC_RDMA_WRITE; 127 break; 128 case MLX5_OPCODE_SEND_IMM: 129 wc->wc_flags |= IB_WC_WITH_IMM; 130 fallthrough; 131 case MLX5_OPCODE_SEND: 132 case MLX5_OPCODE_SEND_INVAL: 133 wc->opcode = IB_WC_SEND; 134 break; 135 case MLX5_OPCODE_RDMA_READ: 136 wc->opcode = IB_WC_RDMA_READ; 137 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 138 break; 139 case MLX5_OPCODE_ATOMIC_CS: 140 wc->opcode = IB_WC_COMP_SWAP; 141 wc->byte_len = 8; 142 break; 143 case MLX5_OPCODE_ATOMIC_FA: 144 wc->opcode = IB_WC_FETCH_ADD; 145 wc->byte_len = 8; 146 break; 147 case MLX5_OPCODE_ATOMIC_MASKED_CS: 148 wc->opcode = IB_WC_MASKED_COMP_SWAP; 149 wc->byte_len = 8; 150 break; 151 case MLX5_OPCODE_ATOMIC_MASKED_FA: 152 wc->opcode = IB_WC_MASKED_FETCH_ADD; 153 wc->byte_len = 8; 154 break; 155 case MLX5_OPCODE_UMR: 156 wc->opcode = get_umr_comp(wq, idx); 157 break; 158 } 159 } 160 161 enum { 162 MLX5_GRH_IN_BUFFER = 1, 163 MLX5_GRH_IN_CQE = 2, 164 }; 165 166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 167 struct mlx5_ib_qp *qp) 168 { 169 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); 170 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 171 struct mlx5_ib_srq *srq = NULL; 172 struct mlx5_ib_wq *wq; 173 u16 wqe_ctr; 174 u8 roce_packet_type; 175 bool vlan_present; 176 u8 g; 177 178 if (qp->ibqp.srq || qp->ibqp.xrcd) { 179 struct mlx5_core_srq *msrq = NULL; 180 181 if (qp->ibqp.xrcd) { 182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); 183 if (msrq) 184 srq = to_mibsrq(msrq); 185 } else { 186 srq = to_msrq(qp->ibqp.srq); 187 } 188 if (srq) { 189 wqe_ctr = be16_to_cpu(cqe->wqe_counter); 190 wc->wr_id = srq->wrid[wqe_ctr]; 191 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 192 if (msrq) 193 mlx5_core_res_put(&msrq->common); 194 } 195 } else { 196 wq = &qp->rq; 197 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 198 ++wq->tail; 199 } 200 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 201 202 switch (get_cqe_opcode(cqe)) { 203 case MLX5_CQE_RESP_WR_IMM: 204 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 205 wc->wc_flags = IB_WC_WITH_IMM; 206 wc->ex.imm_data = cqe->immediate; 207 break; 208 case MLX5_CQE_RESP_SEND: 209 wc->opcode = IB_WC_RECV; 210 wc->wc_flags = IB_WC_IP_CSUM_OK; 211 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && 212 (cqe->hds_ip_ext & CQE_L4_OK)))) 213 wc->wc_flags = 0; 214 break; 215 case MLX5_CQE_RESP_SEND_IMM: 216 wc->opcode = IB_WC_RECV; 217 wc->wc_flags = IB_WC_WITH_IMM; 218 wc->ex.imm_data = cqe->immediate; 219 break; 220 case MLX5_CQE_RESP_SEND_INV: 221 wc->opcode = IB_WC_RECV; 222 wc->wc_flags = IB_WC_WITH_INVALIDATE; 223 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey); 224 break; 225 } 226 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 227 wc->dlid_path_bits = cqe->ml_path; 228 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 229 wc->wc_flags |= g ? IB_WC_GRH : 0; 230 if (unlikely(is_qp1(qp->ibqp.qp_type))) { 231 u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff; 232 233 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, 234 &wc->pkey_index); 235 } else { 236 wc->pkey_index = 0; 237 } 238 239 if (ll != IB_LINK_LAYER_ETHERNET) { 240 wc->slid = be16_to_cpu(cqe->slid); 241 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 242 return; 243 } 244 245 wc->slid = 0; 246 vlan_present = cqe->l4_l3_hdr_type & 0x1; 247 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 248 if (vlan_present) { 249 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff; 250 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7; 251 wc->wc_flags |= IB_WC_WITH_VLAN; 252 } else { 253 wc->sl = 0; 254 } 255 256 switch (roce_packet_type) { 257 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: 258 wc->network_hdr_type = RDMA_NETWORK_ROCE_V1; 259 break; 260 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: 261 wc->network_hdr_type = RDMA_NETWORK_IPV6; 262 break; 263 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4: 264 wc->network_hdr_type = RDMA_NETWORK_IPV4; 265 break; 266 } 267 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 268 } 269 270 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) 271 { 272 mlx5_ib_warn(dev, "dump error cqe\n"); 273 mlx5_dump_err_cqe(dev->mdev, cqe); 274 } 275 276 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, 277 struct mlx5_err_cqe *cqe, 278 struct ib_wc *wc) 279 { 280 int dump = 1; 281 282 switch (cqe->syndrome) { 283 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: 284 wc->status = IB_WC_LOC_LEN_ERR; 285 break; 286 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: 287 wc->status = IB_WC_LOC_QP_OP_ERR; 288 break; 289 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: 290 wc->status = IB_WC_LOC_PROT_ERR; 291 break; 292 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: 293 dump = 0; 294 wc->status = IB_WC_WR_FLUSH_ERR; 295 break; 296 case MLX5_CQE_SYNDROME_MW_BIND_ERR: 297 wc->status = IB_WC_MW_BIND_ERR; 298 break; 299 case MLX5_CQE_SYNDROME_BAD_RESP_ERR: 300 wc->status = IB_WC_BAD_RESP_ERR; 301 break; 302 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: 303 wc->status = IB_WC_LOC_ACCESS_ERR; 304 break; 305 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 306 wc->status = IB_WC_REM_INV_REQ_ERR; 307 break; 308 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: 309 wc->status = IB_WC_REM_ACCESS_ERR; 310 break; 311 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: 312 wc->status = IB_WC_REM_OP_ERR; 313 break; 314 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 315 wc->status = IB_WC_RETRY_EXC_ERR; 316 dump = 0; 317 break; 318 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 319 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 320 dump = 0; 321 break; 322 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: 323 wc->status = IB_WC_REM_ABORT_ERR; 324 break; 325 default: 326 wc->status = IB_WC_GENERAL_ERR; 327 break; 328 } 329 330 wc->vendor_err = cqe->vendor_err_synd; 331 if (dump) 332 dump_cqe(dev, cqe); 333 } 334 335 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 336 u16 tail, u16 head) 337 { 338 u16 idx; 339 340 do { 341 idx = tail & (qp->sq.wqe_cnt - 1); 342 if (idx == head) 343 break; 344 345 tail = qp->sq.w_list[idx].next; 346 } while (1); 347 tail = qp->sq.w_list[idx].next; 348 qp->sq.last_poll = tail; 349 } 350 351 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 352 { 353 mlx5_frag_buf_free(dev->mdev, &buf->frag_buf); 354 } 355 356 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, 357 struct ib_sig_err *item) 358 { 359 u16 syndrome = be16_to_cpu(cqe->syndrome); 360 361 #define GUARD_ERR (1 << 13) 362 #define APPTAG_ERR (1 << 12) 363 #define REFTAG_ERR (1 << 11) 364 365 if (syndrome & GUARD_ERR) { 366 item->err_type = IB_SIG_BAD_GUARD; 367 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; 368 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; 369 } else 370 if (syndrome & REFTAG_ERR) { 371 item->err_type = IB_SIG_BAD_REFTAG; 372 item->expected = be32_to_cpu(cqe->expected_reftag); 373 item->actual = be32_to_cpu(cqe->actual_reftag); 374 } else 375 if (syndrome & APPTAG_ERR) { 376 item->err_type = IB_SIG_BAD_APPTAG; 377 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; 378 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; 379 } else { 380 pr_err("Got signature completion error with bad syndrome %04x\n", 381 syndrome); 382 } 383 384 item->sig_err_offset = be64_to_cpu(cqe->err_offset); 385 item->key = be32_to_cpu(cqe->mkey); 386 } 387 388 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc, 389 int *npolled, bool is_send) 390 { 391 struct mlx5_ib_wq *wq; 392 unsigned int cur; 393 int np; 394 int i; 395 396 wq = (is_send) ? &qp->sq : &qp->rq; 397 cur = wq->head - wq->tail; 398 np = *npolled; 399 400 if (cur == 0) 401 return; 402 403 for (i = 0; i < cur && np < num_entries; i++) { 404 unsigned int idx; 405 406 idx = (is_send) ? wq->last_poll : wq->tail; 407 idx &= (wq->wqe_cnt - 1); 408 wc->wr_id = wq->wrid[idx]; 409 wc->status = IB_WC_WR_FLUSH_ERR; 410 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 411 wq->tail++; 412 if (is_send) 413 wq->last_poll = wq->w_list[idx].next; 414 np++; 415 wc->qp = &qp->ibqp; 416 wc++; 417 } 418 *npolled = np; 419 } 420 421 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, 422 struct ib_wc *wc, int *npolled) 423 { 424 struct mlx5_ib_qp *qp; 425 426 *npolled = 0; 427 /* Find uncompleted WQEs belonging to that cq and return mmics ones */ 428 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { 429 sw_comp(qp, num_entries, wc + *npolled, npolled, true); 430 if (*npolled >= num_entries) 431 return; 432 } 433 434 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { 435 sw_comp(qp, num_entries, wc + *npolled, npolled, false); 436 if (*npolled >= num_entries) 437 return; 438 } 439 } 440 441 static int mlx5_poll_one(struct mlx5_ib_cq *cq, 442 struct mlx5_ib_qp **cur_qp, 443 struct ib_wc *wc) 444 { 445 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 446 struct mlx5_err_cqe *err_cqe; 447 struct mlx5_cqe64 *cqe64; 448 struct mlx5_core_qp *mqp; 449 struct mlx5_ib_wq *wq; 450 uint8_t opcode; 451 uint32_t qpn; 452 u16 wqe_ctr; 453 void *cqe; 454 int idx; 455 456 repoll: 457 cqe = next_cqe_sw(cq); 458 if (!cqe) 459 return -EAGAIN; 460 461 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 462 463 ++cq->mcq.cons_index; 464 465 /* Make sure we read CQ entry contents after we've checked the 466 * ownership bit. 467 */ 468 rmb(); 469 470 opcode = get_cqe_opcode(cqe64); 471 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { 472 if (likely(cq->resize_buf)) { 473 free_cq_buf(dev, &cq->buf); 474 cq->buf = *cq->resize_buf; 475 kfree(cq->resize_buf); 476 cq->resize_buf = NULL; 477 goto repoll; 478 } else { 479 mlx5_ib_warn(dev, "unexpected resize cqe\n"); 480 } 481 } 482 483 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; 484 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { 485 /* We do not have to take the QP table lock here, 486 * because CQs will be locked while QPs are removed 487 * from the table. 488 */ 489 mqp = radix_tree_lookup(&dev->qp_table.tree, qpn); 490 *cur_qp = to_mibqp(mqp); 491 } 492 493 wc->qp = &(*cur_qp)->ibqp; 494 switch (opcode) { 495 case MLX5_CQE_REQ: 496 wq = &(*cur_qp)->sq; 497 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 498 idx = wqe_ctr & (wq->wqe_cnt - 1); 499 handle_good_req(wc, cqe64, wq, idx); 500 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); 501 wc->wr_id = wq->wrid[idx]; 502 wq->tail = wq->wqe_head[idx] + 1; 503 wc->status = IB_WC_SUCCESS; 504 break; 505 case MLX5_CQE_RESP_WR_IMM: 506 case MLX5_CQE_RESP_SEND: 507 case MLX5_CQE_RESP_SEND_IMM: 508 case MLX5_CQE_RESP_SEND_INV: 509 handle_responder(wc, cqe64, *cur_qp); 510 wc->status = IB_WC_SUCCESS; 511 break; 512 case MLX5_CQE_RESIZE_CQ: 513 break; 514 case MLX5_CQE_REQ_ERR: 515 case MLX5_CQE_RESP_ERR: 516 err_cqe = (struct mlx5_err_cqe *)cqe64; 517 mlx5_handle_error_cqe(dev, err_cqe, wc); 518 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", 519 opcode == MLX5_CQE_REQ_ERR ? 520 "Requestor" : "Responder", cq->mcq.cqn); 521 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", 522 err_cqe->syndrome, err_cqe->vendor_err_synd); 523 if (opcode == MLX5_CQE_REQ_ERR) { 524 wq = &(*cur_qp)->sq; 525 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 526 idx = wqe_ctr & (wq->wqe_cnt - 1); 527 wc->wr_id = wq->wrid[idx]; 528 wq->tail = wq->wqe_head[idx] + 1; 529 } else { 530 struct mlx5_ib_srq *srq; 531 532 if ((*cur_qp)->ibqp.srq) { 533 srq = to_msrq((*cur_qp)->ibqp.srq); 534 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 535 wc->wr_id = srq->wrid[wqe_ctr]; 536 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 537 } else { 538 wq = &(*cur_qp)->rq; 539 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 540 ++wq->tail; 541 } 542 } 543 break; 544 case MLX5_CQE_SIG_ERR: { 545 struct mlx5_sig_err_cqe *sig_err_cqe = 546 (struct mlx5_sig_err_cqe *)cqe64; 547 struct mlx5_core_sig_ctx *sig; 548 549 xa_lock(&dev->sig_mrs); 550 sig = xa_load(&dev->sig_mrs, 551 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 552 get_sig_err_item(sig_err_cqe, &sig->err_item); 553 sig->sig_err_exists = true; 554 sig->sigerr_count++; 555 556 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", 557 cq->mcq.cqn, sig->err_item.key, 558 sig->err_item.err_type, 559 sig->err_item.sig_err_offset, 560 sig->err_item.expected, 561 sig->err_item.actual); 562 563 xa_unlock(&dev->sig_mrs); 564 goto repoll; 565 } 566 } 567 568 return 0; 569 } 570 571 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, 572 struct ib_wc *wc, bool is_fatal_err) 573 { 574 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 575 struct mlx5_ib_wc *soft_wc, *next; 576 int npolled = 0; 577 578 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { 579 if (npolled >= num_entries) 580 break; 581 582 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", 583 cq->mcq.cqn); 584 585 if (unlikely(is_fatal_err)) { 586 soft_wc->wc.status = IB_WC_WR_FLUSH_ERR; 587 soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 588 } 589 wc[npolled++] = soft_wc->wc; 590 list_del(&soft_wc->list); 591 kfree(soft_wc); 592 } 593 594 return npolled; 595 } 596 597 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 598 { 599 struct mlx5_ib_cq *cq = to_mcq(ibcq); 600 struct mlx5_ib_qp *cur_qp = NULL; 601 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 602 struct mlx5_core_dev *mdev = dev->mdev; 603 unsigned long flags; 604 int soft_polled = 0; 605 int npolled; 606 607 spin_lock_irqsave(&cq->lock, flags); 608 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 609 /* make sure no soft wqe's are waiting */ 610 if (unlikely(!list_empty(&cq->wc_list))) 611 soft_polled = poll_soft_wc(cq, num_entries, wc, true); 612 613 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled, 614 wc + soft_polled, &npolled); 615 goto out; 616 } 617 618 if (unlikely(!list_empty(&cq->wc_list))) 619 soft_polled = poll_soft_wc(cq, num_entries, wc, false); 620 621 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { 622 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) 623 break; 624 } 625 626 if (npolled) 627 mlx5_cq_set_ci(&cq->mcq); 628 out: 629 spin_unlock_irqrestore(&cq->lock, flags); 630 631 return soft_polled + npolled; 632 } 633 634 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 635 { 636 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; 637 struct mlx5_ib_cq *cq = to_mcq(ibcq); 638 void __iomem *uar_page = mdev->priv.uar->map; 639 unsigned long irq_flags; 640 int ret = 0; 641 642 spin_lock_irqsave(&cq->lock, irq_flags); 643 if (cq->notify_flags != IB_CQ_NEXT_COMP) 644 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; 645 646 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) 647 ret = 1; 648 spin_unlock_irqrestore(&cq->lock, irq_flags); 649 650 mlx5_cq_arm(&cq->mcq, 651 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 652 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 653 uar_page, to_mcq(ibcq)->mcq.cons_index); 654 655 return ret; 656 } 657 658 static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev, 659 struct mlx5_ib_cq_buf *buf, 660 int nent, 661 int cqe_size) 662 { 663 struct mlx5_frag_buf *frag_buf = &buf->frag_buf; 664 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0); 665 u8 log_wq_sz = ilog2(cqe_size); 666 int err; 667 668 err = mlx5_frag_buf_alloc_node(dev->mdev, 669 nent * cqe_size, 670 frag_buf, 671 dev->mdev->priv.numa_node); 672 if (err) 673 return err; 674 675 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc); 676 677 buf->cqe_size = cqe_size; 678 buf->nent = nent; 679 680 return 0; 681 } 682 683 enum { 684 MLX5_CQE_RES_FORMAT_HASH = 0, 685 MLX5_CQE_RES_FORMAT_CSUM = 1, 686 MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3, 687 }; 688 689 static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format) 690 { 691 switch (format) { 692 case MLX5_IB_CQE_RES_FORMAT_HASH: 693 return MLX5_CQE_RES_FORMAT_HASH; 694 case MLX5_IB_CQE_RES_FORMAT_CSUM: 695 return MLX5_CQE_RES_FORMAT_CSUM; 696 case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX: 697 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index)) 698 return MLX5_CQE_RES_FORMAT_CSUM_STRIDX; 699 return -EOPNOTSUPP; 700 default: 701 return -EINVAL; 702 } 703 } 704 705 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, 706 struct mlx5_ib_cq *cq, int entries, u32 **cqb, 707 int *cqe_size, int *index, int *inlen) 708 { 709 struct mlx5_ib_create_cq ucmd = {}; 710 unsigned long page_size; 711 unsigned int page_offset_quantized; 712 size_t ucmdlen; 713 __be64 *pas; 714 int ncont; 715 void *cqc; 716 int err; 717 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 718 udata, struct mlx5_ib_ucontext, ibucontext); 719 720 ucmdlen = min(udata->inlen, sizeof(ucmd)); 721 if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags)) 722 return -EINVAL; 723 724 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) 725 return -EFAULT; 726 727 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | 728 MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX))) 729 return -EINVAL; 730 731 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || 732 ucmd.reserved0 || ucmd.reserved1) 733 return -EINVAL; 734 735 *cqe_size = ucmd.cqe_size; 736 737 cq->buf.umem = 738 ib_umem_get(&dev->ib_dev, ucmd.buf_addr, 739 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); 740 if (IS_ERR(cq->buf.umem)) { 741 err = PTR_ERR(cq->buf.umem); 742 return err; 743 } 744 745 page_size = mlx5_umem_find_best_cq_quantized_pgoff( 746 cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT, 747 page_offset, 64, &page_offset_quantized); 748 if (!page_size) { 749 err = -EINVAL; 750 goto err_umem; 751 } 752 753 err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); 754 if (err) 755 goto err_umem; 756 757 ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size); 758 mlx5_ib_dbg( 759 dev, 760 "addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n", 761 ucmd.buf_addr, entries * ucmd.cqe_size, 762 ib_umem_num_pages(cq->buf.umem), page_size, ncont); 763 764 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 765 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; 766 *cqb = kvzalloc(*inlen, GFP_KERNEL); 767 if (!*cqb) { 768 err = -ENOMEM; 769 goto err_db; 770 } 771 772 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 773 mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0); 774 775 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 776 MLX5_SET(cqc, cqc, log_page_size, 777 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); 778 MLX5_SET(cqc, cqc, page_offset, page_offset_quantized); 779 780 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) { 781 *index = ucmd.uar_page_index; 782 } else if (context->bfregi.lib_uar_dyn) { 783 err = -EINVAL; 784 goto err_cqb; 785 } else { 786 *index = context->bfregi.sys_pages[0]; 787 } 788 789 if (ucmd.cqe_comp_en == 1) { 790 int mini_cqe_format; 791 792 if (!((*cqe_size == 128 && 793 MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) || 794 (*cqe_size == 64 && 795 MLX5_CAP_GEN(dev->mdev, cqe_compression)))) { 796 err = -EOPNOTSUPP; 797 mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n", 798 *cqe_size); 799 goto err_cqb; 800 } 801 802 mini_cqe_format = 803 mini_cqe_res_format_to_hw(dev, 804 ucmd.cqe_comp_res_format); 805 if (mini_cqe_format < 0) { 806 err = mini_cqe_format; 807 mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n", 808 ucmd.cqe_comp_res_format, err); 809 goto err_cqb; 810 } 811 812 MLX5_SET(cqc, cqc, cqe_comp_en, 1); 813 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format); 814 } 815 816 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) { 817 if (*cqe_size != 128 || 818 !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) { 819 err = -EOPNOTSUPP; 820 mlx5_ib_warn(dev, 821 "CQE padding is not supported for CQE size of %dB!\n", 822 *cqe_size); 823 goto err_cqb; 824 } 825 826 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; 827 } 828 829 MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid); 830 return 0; 831 832 err_cqb: 833 kvfree(*cqb); 834 835 err_db: 836 mlx5_ib_db_unmap_user(context, &cq->db); 837 838 err_umem: 839 ib_umem_release(cq->buf.umem); 840 return err; 841 } 842 843 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) 844 { 845 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 846 udata, struct mlx5_ib_ucontext, ibucontext); 847 848 mlx5_ib_db_unmap_user(context, &cq->db); 849 ib_umem_release(cq->buf.umem); 850 } 851 852 static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf) 853 { 854 int i; 855 void *cqe; 856 struct mlx5_cqe64 *cqe64; 857 858 for (i = 0; i < buf->nent; i++) { 859 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i); 860 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; 861 cqe64->op_own = MLX5_CQE_INVALID << 4; 862 } 863 } 864 865 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 866 int entries, int cqe_size, 867 u32 **cqb, int *index, int *inlen) 868 { 869 __be64 *pas; 870 void *cqc; 871 int err; 872 873 err = mlx5_db_alloc(dev->mdev, &cq->db); 874 if (err) 875 return err; 876 877 cq->mcq.set_ci_db = cq->db.db; 878 cq->mcq.arm_db = cq->db.db + 1; 879 cq->mcq.cqe_sz = cqe_size; 880 881 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size); 882 if (err) 883 goto err_db; 884 885 init_cq_frag_buf(&cq->buf); 886 887 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 888 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * 889 cq->buf.frag_buf.npages; 890 *cqb = kvzalloc(*inlen, GFP_KERNEL); 891 if (!*cqb) { 892 err = -ENOMEM; 893 goto err_buf; 894 } 895 896 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 897 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas); 898 899 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 900 MLX5_SET(cqc, cqc, log_page_size, 901 cq->buf.frag_buf.page_shift - 902 MLX5_ADAPTER_PAGE_SHIFT); 903 904 *index = dev->mdev->priv.uar->index; 905 906 return 0; 907 908 err_buf: 909 free_cq_buf(dev, &cq->buf); 910 911 err_db: 912 mlx5_db_free(dev->mdev, &cq->db); 913 return err; 914 } 915 916 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 917 { 918 free_cq_buf(dev, &cq->buf); 919 mlx5_db_free(dev->mdev, &cq->db); 920 } 921 922 static void notify_soft_wc_handler(struct work_struct *work) 923 { 924 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, 925 notify_work); 926 927 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 928 } 929 930 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 931 struct ib_udata *udata) 932 { 933 struct ib_device *ibdev = ibcq->device; 934 int entries = attr->cqe; 935 int vector = attr->comp_vector; 936 struct mlx5_ib_dev *dev = to_mdev(ibdev); 937 struct mlx5_ib_cq *cq = to_mcq(ibcq); 938 u32 out[MLX5_ST_SZ_DW(create_cq_out)]; 939 int index; 940 int inlen; 941 u32 *cqb = NULL; 942 void *cqc; 943 int cqe_size; 944 unsigned int irqn; 945 int eqn; 946 int err; 947 948 if (entries < 0 || 949 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) 950 return -EINVAL; 951 952 if (check_cq_create_flags(attr->flags)) 953 return -EOPNOTSUPP; 954 955 entries = roundup_pow_of_two(entries + 1); 956 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) 957 return -EINVAL; 958 959 cq->ibcq.cqe = entries - 1; 960 mutex_init(&cq->resize_mutex); 961 spin_lock_init(&cq->lock); 962 cq->resize_buf = NULL; 963 cq->resize_umem = NULL; 964 cq->create_flags = attr->flags; 965 INIT_LIST_HEAD(&cq->list_send_qp); 966 INIT_LIST_HEAD(&cq->list_recv_qp); 967 968 if (udata) { 969 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, 970 &index, &inlen); 971 if (err) 972 return err; 973 } else { 974 cqe_size = cache_line_size() == 128 ? 128 : 64; 975 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 976 &index, &inlen); 977 if (err) 978 return err; 979 980 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); 981 } 982 983 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); 984 if (err) 985 goto err_cqb; 986 987 cq->cqe_size = cqe_size; 988 989 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context); 990 MLX5_SET(cqc, cqc, cqe_sz, 991 cqe_sz_to_mlx_sz(cqe_size, 992 cq->private_flags & 993 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD)); 994 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 995 MLX5_SET(cqc, cqc, uar_page, index); 996 MLX5_SET(cqc, cqc, c_eqn, eqn); 997 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); 998 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) 999 MLX5_SET(cqc, cqc, oi, 1); 1000 1001 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); 1002 if (err) 1003 goto err_cqb; 1004 1005 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); 1006 cq->mcq.irqn = irqn; 1007 if (udata) 1008 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; 1009 else 1010 cq->mcq.comp = mlx5_ib_cq_comp; 1011 cq->mcq.event = mlx5_ib_cq_event; 1012 1013 INIT_LIST_HEAD(&cq->wc_list); 1014 1015 if (udata) 1016 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { 1017 err = -EFAULT; 1018 goto err_cmd; 1019 } 1020 1021 1022 kvfree(cqb); 1023 return 0; 1024 1025 err_cmd: 1026 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); 1027 1028 err_cqb: 1029 kvfree(cqb); 1030 if (udata) 1031 destroy_cq_user(cq, udata); 1032 else 1033 destroy_cq_kernel(dev, cq); 1034 return err; 1035 } 1036 1037 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) 1038 { 1039 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1040 struct mlx5_ib_cq *mcq = to_mcq(cq); 1041 int ret; 1042 1043 ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); 1044 if (ret) 1045 return ret; 1046 1047 if (udata) 1048 destroy_cq_user(mcq, udata); 1049 else 1050 destroy_cq_kernel(dev, mcq); 1051 return 0; 1052 } 1053 1054 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) 1055 { 1056 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); 1057 } 1058 1059 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) 1060 { 1061 struct mlx5_cqe64 *cqe64, *dest64; 1062 void *cqe, *dest; 1063 u32 prod_index; 1064 int nfreed = 0; 1065 u8 owner_bit; 1066 1067 if (!cq) 1068 return; 1069 1070 /* First we need to find the current producer index, so we 1071 * know where to start cleaning from. It doesn't matter if HW 1072 * adds new entries after this loop -- the QP we're worried 1073 * about is already in RESET, so the new entries won't come 1074 * from our QP and therefore don't need to be checked. 1075 */ 1076 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) 1077 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 1078 break; 1079 1080 /* Now sweep backwards through the CQ, removing CQ entries 1081 * that match our QP by copying older entries on top of them. 1082 */ 1083 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 1084 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 1085 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 1086 if (is_equal_rsn(cqe64, rsn)) { 1087 if (srq && (ntohl(cqe64->srqn) & 0xffffff)) 1088 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); 1089 ++nfreed; 1090 } else if (nfreed) { 1091 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 1092 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; 1093 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; 1094 memcpy(dest, cqe, cq->mcq.cqe_sz); 1095 dest64->op_own = owner_bit | 1096 (dest64->op_own & ~MLX5_CQE_OWNER_MASK); 1097 } 1098 } 1099 1100 if (nfreed) { 1101 cq->mcq.cons_index += nfreed; 1102 /* Make sure update of buffer contents is done before 1103 * updating consumer index. 1104 */ 1105 wmb(); 1106 mlx5_cq_set_ci(&cq->mcq); 1107 } 1108 } 1109 1110 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) 1111 { 1112 if (!cq) 1113 return; 1114 1115 spin_lock_irq(&cq->lock); 1116 __mlx5_ib_cq_clean(cq, qpn, srq); 1117 spin_unlock_irq(&cq->lock); 1118 } 1119 1120 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1121 { 1122 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1123 struct mlx5_ib_cq *mcq = to_mcq(cq); 1124 int err; 1125 1126 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) 1127 return -EOPNOTSUPP; 1128 1129 if (cq_period > MLX5_MAX_CQ_PERIOD) 1130 return -EINVAL; 1131 1132 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq, 1133 cq_period, cq_count); 1134 if (err) 1135 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); 1136 1137 return err; 1138 } 1139 1140 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1141 int entries, struct ib_udata *udata, 1142 int *cqe_size) 1143 { 1144 struct mlx5_ib_resize_cq ucmd; 1145 struct ib_umem *umem; 1146 int err; 1147 1148 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 1149 if (err) 1150 return err; 1151 1152 if (ucmd.reserved0 || ucmd.reserved1) 1153 return -EINVAL; 1154 1155 /* check multiplication overflow */ 1156 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) 1157 return -EINVAL; 1158 1159 umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, 1160 (size_t)ucmd.cqe_size * entries, 1161 IB_ACCESS_LOCAL_WRITE); 1162 if (IS_ERR(umem)) { 1163 err = PTR_ERR(umem); 1164 return err; 1165 } 1166 1167 cq->resize_umem = umem; 1168 *cqe_size = ucmd.cqe_size; 1169 1170 return 0; 1171 } 1172 1173 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1174 int entries, int cqe_size) 1175 { 1176 int err; 1177 1178 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); 1179 if (!cq->resize_buf) 1180 return -ENOMEM; 1181 1182 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size); 1183 if (err) 1184 goto ex; 1185 1186 init_cq_frag_buf(cq->resize_buf); 1187 1188 return 0; 1189 1190 ex: 1191 kfree(cq->resize_buf); 1192 return err; 1193 } 1194 1195 static int copy_resize_cqes(struct mlx5_ib_cq *cq) 1196 { 1197 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 1198 struct mlx5_cqe64 *scqe64; 1199 struct mlx5_cqe64 *dcqe64; 1200 void *start_cqe; 1201 void *scqe; 1202 void *dcqe; 1203 int ssize; 1204 int dsize; 1205 int i; 1206 u8 sw_own; 1207 1208 ssize = cq->buf.cqe_size; 1209 dsize = cq->resize_buf->cqe_size; 1210 if (ssize != dsize) { 1211 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); 1212 return -EINVAL; 1213 } 1214 1215 i = cq->mcq.cons_index; 1216 scqe = get_sw_cqe(cq, i); 1217 scqe64 = ssize == 64 ? scqe : scqe + 64; 1218 start_cqe = scqe; 1219 if (!scqe) { 1220 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1221 return -EINVAL; 1222 } 1223 1224 while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) { 1225 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, 1226 (i + 1) & cq->resize_buf->nent); 1227 dcqe64 = dsize == 64 ? dcqe : dcqe + 64; 1228 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); 1229 memcpy(dcqe, scqe, dsize); 1230 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; 1231 1232 ++i; 1233 scqe = get_sw_cqe(cq, i); 1234 scqe64 = ssize == 64 ? scqe : scqe + 64; 1235 if (!scqe) { 1236 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1237 return -EINVAL; 1238 } 1239 1240 if (scqe == start_cqe) { 1241 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", 1242 cq->mcq.cqn); 1243 return -ENOMEM; 1244 } 1245 } 1246 ++cq->mcq.cons_index; 1247 return 0; 1248 } 1249 1250 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 1251 { 1252 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); 1253 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1254 void *cqc; 1255 u32 *in; 1256 int err; 1257 int npas; 1258 __be64 *pas; 1259 unsigned int page_offset_quantized = 0; 1260 unsigned int page_shift; 1261 int inlen; 1262 int cqe_size; 1263 unsigned long flags; 1264 1265 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { 1266 pr_info("Firmware does not support resize CQ\n"); 1267 return -ENOSYS; 1268 } 1269 1270 if (entries < 1 || 1271 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { 1272 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", 1273 entries, 1274 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); 1275 return -EINVAL; 1276 } 1277 1278 entries = roundup_pow_of_two(entries + 1); 1279 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) 1280 return -EINVAL; 1281 1282 if (entries == ibcq->cqe + 1) 1283 return 0; 1284 1285 mutex_lock(&cq->resize_mutex); 1286 if (udata) { 1287 unsigned long page_size; 1288 1289 err = resize_user(dev, cq, entries, udata, &cqe_size); 1290 if (err) 1291 goto ex; 1292 1293 page_size = mlx5_umem_find_best_cq_quantized_pgoff( 1294 cq->resize_umem, cqc, log_page_size, 1295 MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64, 1296 &page_offset_quantized); 1297 if (!page_size) { 1298 err = -EINVAL; 1299 goto ex_resize; 1300 } 1301 npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size); 1302 page_shift = order_base_2(page_size); 1303 } else { 1304 struct mlx5_frag_buf *frag_buf; 1305 1306 cqe_size = 64; 1307 err = resize_kernel(dev, cq, entries, cqe_size); 1308 if (err) 1309 goto ex; 1310 frag_buf = &cq->resize_buf->frag_buf; 1311 npas = frag_buf->npages; 1312 page_shift = frag_buf->page_shift; 1313 } 1314 1315 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + 1316 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; 1317 1318 in = kvzalloc(inlen, GFP_KERNEL); 1319 if (!in) { 1320 err = -ENOMEM; 1321 goto ex_resize; 1322 } 1323 1324 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas); 1325 if (udata) 1326 mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas, 1327 0); 1328 else 1329 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas); 1330 1331 MLX5_SET(modify_cq_in, in, 1332 modify_field_select_resize_field_select.resize_field_select.resize_field_select, 1333 MLX5_MODIFY_CQ_MASK_LOG_SIZE | 1334 MLX5_MODIFY_CQ_MASK_PG_OFFSET | 1335 MLX5_MODIFY_CQ_MASK_PG_SIZE); 1336 1337 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 1338 1339 MLX5_SET(cqc, cqc, log_page_size, 1340 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 1341 MLX5_SET(cqc, cqc, page_offset, page_offset_quantized); 1342 MLX5_SET(cqc, cqc, cqe_sz, 1343 cqe_sz_to_mlx_sz(cqe_size, 1344 cq->private_flags & 1345 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD)); 1346 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 1347 1348 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE); 1349 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); 1350 1351 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); 1352 if (err) 1353 goto ex_alloc; 1354 1355 if (udata) { 1356 cq->ibcq.cqe = entries - 1; 1357 ib_umem_release(cq->buf.umem); 1358 cq->buf.umem = cq->resize_umem; 1359 cq->resize_umem = NULL; 1360 } else { 1361 struct mlx5_ib_cq_buf tbuf; 1362 int resized = 0; 1363 1364 spin_lock_irqsave(&cq->lock, flags); 1365 if (cq->resize_buf) { 1366 err = copy_resize_cqes(cq); 1367 if (!err) { 1368 tbuf = cq->buf; 1369 cq->buf = *cq->resize_buf; 1370 kfree(cq->resize_buf); 1371 cq->resize_buf = NULL; 1372 resized = 1; 1373 } 1374 } 1375 cq->ibcq.cqe = entries - 1; 1376 spin_unlock_irqrestore(&cq->lock, flags); 1377 if (resized) 1378 free_cq_buf(dev, &tbuf); 1379 } 1380 mutex_unlock(&cq->resize_mutex); 1381 1382 kvfree(in); 1383 return 0; 1384 1385 ex_alloc: 1386 kvfree(in); 1387 1388 ex_resize: 1389 ib_umem_release(cq->resize_umem); 1390 if (!udata) { 1391 free_cq_buf(dev, cq->resize_buf); 1392 cq->resize_buf = NULL; 1393 } 1394 ex: 1395 mutex_unlock(&cq->resize_mutex); 1396 return err; 1397 } 1398 1399 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq) 1400 { 1401 struct mlx5_ib_cq *cq; 1402 1403 if (!ibcq) 1404 return 128; 1405 1406 cq = to_mcq(ibcq); 1407 return cq->cqe_size; 1408 } 1409 1410 /* Called from atomic context */ 1411 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) 1412 { 1413 struct mlx5_ib_wc *soft_wc; 1414 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1415 unsigned long flags; 1416 1417 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC); 1418 if (!soft_wc) 1419 return -ENOMEM; 1420 1421 soft_wc->wc = *wc; 1422 spin_lock_irqsave(&cq->lock, flags); 1423 list_add_tail(&soft_wc->list, &cq->wc_list); 1424 if (cq->notify_flags == IB_CQ_NEXT_COMP || 1425 wc->status != IB_WC_SUCCESS) { 1426 cq->notify_flags = 0; 1427 schedule_work(&cq->notify_work); 1428 } 1429 spin_unlock_irqrestore(&cq->lock, flags); 1430 1431 return 0; 1432 } 1433