1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/mlx4/cq.h> 35 #include <linux/mlx4/qp.h> 36 #include <linux/mlx4/srq.h> 37 #include <linux/slab.h> 38 39 #include "mlx4_ib.h" 40 #include <rdma/mlx4-abi.h> 41 #include <rdma/uverbs_ioctl.h> 42 43 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) 44 { 45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 46 ibcq->comp_handler(ibcq, ibcq->cq_context); 47 } 48 49 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) 50 { 51 struct ib_event event; 52 struct ib_cq *ibcq; 53 54 if (type != MLX4_EVENT_TYPE_CQ_ERROR) { 55 pr_warn("Unexpected event type %d " 56 "on CQ %06x\n", type, cq->cqn); 57 return; 58 } 59 60 ibcq = &to_mibcq(cq)->ibcq; 61 if (ibcq->event_handler) { 62 event.device = ibcq->device; 63 event.event = IB_EVENT_CQ_ERR; 64 event.element.cq = ibcq; 65 ibcq->event_handler(&event, ibcq->cq_context); 66 } 67 } 68 69 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) 70 { 71 return mlx4_buf_offset(&buf->buf, n * buf->entry_size); 72 } 73 74 static void *get_cqe(struct mlx4_ib_cq *cq, int n) 75 { 76 return get_cqe_from_buf(&cq->buf, n); 77 } 78 79 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) 80 { 81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); 82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); 83 84 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ 85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; 86 } 87 88 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) 89 { 90 return get_sw_cqe(cq, cq->mcq.cons_index); 91 } 92 93 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 94 { 95 struct mlx4_ib_cq *mcq = to_mcq(cq); 96 struct mlx4_ib_dev *dev = to_mdev(cq->device); 97 98 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); 99 } 100 101 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) 102 { 103 int err; 104 105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, 106 PAGE_SIZE * 2, &buf->buf); 107 108 if (err) 109 goto out; 110 111 buf->entry_size = dev->dev->caps.cqe_size; 112 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, 113 &buf->mtt); 114 if (err) 115 goto err_buf; 116 117 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); 118 if (err) 119 goto err_mtt; 120 121 return 0; 122 123 err_mtt: 124 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 125 126 err_buf: 127 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); 128 129 out: 130 return err; 131 } 132 133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) 134 { 135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); 136 } 137 138 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, 139 struct mlx4_ib_cq_buf *buf, 140 struct ib_umem **umem, u64 buf_addr, int cqe) 141 { 142 int err; 143 int cqe_size = dev->dev->caps.cqe_size; 144 int shift; 145 int n; 146 147 *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size, 148 IB_ACCESS_LOCAL_WRITE, 1); 149 if (IS_ERR(*umem)) 150 return PTR_ERR(*umem); 151 152 n = ib_umem_page_count(*umem); 153 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); 154 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); 155 156 if (err) 157 goto err_buf; 158 159 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); 160 if (err) 161 goto err_mtt; 162 163 return 0; 164 165 err_mtt: 166 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 167 168 err_buf: 169 ib_umem_release(*umem); 170 171 return err; 172 } 173 174 #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION 175 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, 176 const struct ib_cq_init_attr *attr, 177 struct ib_udata *udata) 178 { 179 int entries = attr->cqe; 180 int vector = attr->comp_vector; 181 struct mlx4_ib_dev *dev = to_mdev(ibdev); 182 struct mlx4_ib_cq *cq; 183 struct mlx4_uar *uar; 184 void *buf_addr; 185 int err; 186 struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( 187 udata, struct mlx4_ib_ucontext, ibucontext); 188 189 if (entries < 1 || entries > dev->dev->caps.max_cqes) 190 return ERR_PTR(-EINVAL); 191 192 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED) 193 return ERR_PTR(-EINVAL); 194 195 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 196 if (!cq) 197 return ERR_PTR(-ENOMEM); 198 199 entries = roundup_pow_of_two(entries + 1); 200 cq->ibcq.cqe = entries - 1; 201 mutex_init(&cq->resize_mutex); 202 spin_lock_init(&cq->lock); 203 cq->resize_buf = NULL; 204 cq->resize_umem = NULL; 205 cq->create_flags = attr->flags; 206 INIT_LIST_HEAD(&cq->send_qp_list); 207 INIT_LIST_HEAD(&cq->recv_qp_list); 208 209 if (udata) { 210 struct mlx4_ib_create_cq ucmd; 211 212 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 213 err = -EFAULT; 214 goto err_cq; 215 } 216 217 buf_addr = (void *)(unsigned long)ucmd.buf_addr; 218 err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem, 219 ucmd.buf_addr, entries); 220 if (err) 221 goto err_cq; 222 223 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); 224 if (err) 225 goto err_mtt; 226 227 uar = &context->uar; 228 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; 229 } else { 230 err = mlx4_db_alloc(dev->dev, &cq->db, 1); 231 if (err) 232 goto err_cq; 233 234 cq->mcq.set_ci_db = cq->db.db; 235 cq->mcq.arm_db = cq->db.db + 1; 236 *cq->mcq.set_ci_db = 0; 237 *cq->mcq.arm_db = 0; 238 239 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); 240 if (err) 241 goto err_db; 242 243 buf_addr = &cq->buf.buf; 244 245 uar = &dev->priv_uar; 246 cq->mcq.usage = MLX4_RES_USAGE_DRIVER; 247 } 248 249 if (dev->eq_table) 250 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; 251 252 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, 253 &cq->mcq, vector, 0, 254 !!(cq->create_flags & 255 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION), 256 buf_addr, !!udata); 257 if (err) 258 goto err_dbmap; 259 260 if (udata) 261 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; 262 else 263 cq->mcq.comp = mlx4_ib_cq_comp; 264 cq->mcq.event = mlx4_ib_cq_event; 265 266 if (udata) 267 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 268 err = -EFAULT; 269 goto err_cq_free; 270 } 271 272 return &cq->ibcq; 273 274 err_cq_free: 275 mlx4_cq_free(dev->dev, &cq->mcq); 276 277 err_dbmap: 278 if (udata) 279 mlx4_ib_db_unmap_user(context, &cq->db); 280 281 err_mtt: 282 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); 283 284 if (udata) 285 ib_umem_release(cq->umem); 286 else 287 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 288 289 err_db: 290 if (!udata) 291 mlx4_db_free(dev->dev, &cq->db); 292 293 err_cq: 294 kfree(cq); 295 296 return ERR_PTR(err); 297 } 298 299 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 300 int entries) 301 { 302 int err; 303 304 if (cq->resize_buf) 305 return -EBUSY; 306 307 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL); 308 if (!cq->resize_buf) 309 return -ENOMEM; 310 311 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 312 if (err) { 313 kfree(cq->resize_buf); 314 cq->resize_buf = NULL; 315 return err; 316 } 317 318 cq->resize_buf->cqe = entries - 1; 319 320 return 0; 321 } 322 323 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 324 int entries, struct ib_udata *udata) 325 { 326 struct mlx4_ib_resize_cq ucmd; 327 int err; 328 329 if (cq->resize_umem) 330 return -EBUSY; 331 332 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 333 return -EFAULT; 334 335 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL); 336 if (!cq->resize_buf) 337 return -ENOMEM; 338 339 err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf, 340 &cq->resize_umem, ucmd.buf_addr, entries); 341 if (err) { 342 kfree(cq->resize_buf); 343 cq->resize_buf = NULL; 344 return err; 345 } 346 347 cq->resize_buf->cqe = entries - 1; 348 349 return 0; 350 } 351 352 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) 353 { 354 u32 i; 355 356 i = cq->mcq.cons_index; 357 while (get_sw_cqe(cq, i)) 358 ++i; 359 360 return i - cq->mcq.cons_index; 361 } 362 363 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) 364 { 365 struct mlx4_cqe *cqe, *new_cqe; 366 int i; 367 int cqe_size = cq->buf.entry_size; 368 int cqe_inc = cqe_size == 64 ? 1 : 0; 369 370 i = cq->mcq.cons_index; 371 cqe = get_cqe(cq, i & cq->ibcq.cqe); 372 cqe += cqe_inc; 373 374 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { 375 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, 376 (i + 1) & cq->resize_buf->cqe); 377 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); 378 new_cqe += cqe_inc; 379 380 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | 381 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); 382 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); 383 cqe += cqe_inc; 384 } 385 ++cq->mcq.cons_index; 386 } 387 388 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 389 { 390 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 391 struct mlx4_ib_cq *cq = to_mcq(ibcq); 392 struct mlx4_mtt mtt; 393 int outst_cqe; 394 int err; 395 396 mutex_lock(&cq->resize_mutex); 397 if (entries < 1 || entries > dev->dev->caps.max_cqes) { 398 err = -EINVAL; 399 goto out; 400 } 401 402 entries = roundup_pow_of_two(entries + 1); 403 if (entries == ibcq->cqe + 1) { 404 err = 0; 405 goto out; 406 } 407 408 if (entries > dev->dev->caps.max_cqes + 1) { 409 err = -EINVAL; 410 goto out; 411 } 412 413 if (ibcq->uobject) { 414 err = mlx4_alloc_resize_umem(dev, cq, entries, udata); 415 if (err) 416 goto out; 417 } else { 418 /* Can't be smaller than the number of outstanding CQEs */ 419 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 420 if (entries < outst_cqe + 1) { 421 err = -EINVAL; 422 goto out; 423 } 424 425 err = mlx4_alloc_resize_buf(dev, cq, entries); 426 if (err) 427 goto out; 428 } 429 430 mtt = cq->buf.mtt; 431 432 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); 433 if (err) 434 goto err_buf; 435 436 mlx4_mtt_cleanup(dev->dev, &mtt); 437 if (ibcq->uobject) { 438 cq->buf = cq->resize_buf->buf; 439 cq->ibcq.cqe = cq->resize_buf->cqe; 440 ib_umem_release(cq->umem); 441 cq->umem = cq->resize_umem; 442 443 kfree(cq->resize_buf); 444 cq->resize_buf = NULL; 445 cq->resize_umem = NULL; 446 } else { 447 struct mlx4_ib_cq_buf tmp_buf; 448 int tmp_cqe = 0; 449 450 spin_lock_irq(&cq->lock); 451 if (cq->resize_buf) { 452 mlx4_ib_cq_resize_copy_cqes(cq); 453 tmp_buf = cq->buf; 454 tmp_cqe = cq->ibcq.cqe; 455 cq->buf = cq->resize_buf->buf; 456 cq->ibcq.cqe = cq->resize_buf->cqe; 457 458 kfree(cq->resize_buf); 459 cq->resize_buf = NULL; 460 } 461 spin_unlock_irq(&cq->lock); 462 463 if (tmp_cqe) 464 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); 465 } 466 467 goto out; 468 469 err_buf: 470 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); 471 if (!ibcq->uobject) 472 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, 473 cq->resize_buf->cqe); 474 475 kfree(cq->resize_buf); 476 cq->resize_buf = NULL; 477 478 if (cq->resize_umem) { 479 ib_umem_release(cq->resize_umem); 480 cq->resize_umem = NULL; 481 } 482 483 out: 484 mutex_unlock(&cq->resize_mutex); 485 486 return err; 487 } 488 489 int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) 490 { 491 struct mlx4_ib_dev *dev = to_mdev(cq->device); 492 struct mlx4_ib_cq *mcq = to_mcq(cq); 493 494 mlx4_cq_free(dev->dev, &mcq->mcq); 495 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); 496 497 if (udata) { 498 mlx4_ib_db_unmap_user( 499 rdma_udata_to_drv_context( 500 udata, 501 struct mlx4_ib_ucontext, 502 ibucontext), 503 &mcq->db); 504 ib_umem_release(mcq->umem); 505 } else { 506 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); 507 mlx4_db_free(dev->dev, &mcq->db); 508 } 509 510 kfree(mcq); 511 512 return 0; 513 } 514 515 static void dump_cqe(void *cqe) 516 { 517 __be32 *buf = cqe; 518 519 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", 520 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), 521 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), 522 be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); 523 } 524 525 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, 526 struct ib_wc *wc) 527 { 528 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { 529 pr_debug("local QP operation err " 530 "(QPN %06x, WQE index %x, vendor syndrome %02x, " 531 "opcode = %02x)\n", 532 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), 533 cqe->vendor_err_syndrome, 534 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 535 dump_cqe(cqe); 536 } 537 538 switch (cqe->syndrome) { 539 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: 540 wc->status = IB_WC_LOC_LEN_ERR; 541 break; 542 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: 543 wc->status = IB_WC_LOC_QP_OP_ERR; 544 break; 545 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: 546 wc->status = IB_WC_LOC_PROT_ERR; 547 break; 548 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: 549 wc->status = IB_WC_WR_FLUSH_ERR; 550 break; 551 case MLX4_CQE_SYNDROME_MW_BIND_ERR: 552 wc->status = IB_WC_MW_BIND_ERR; 553 break; 554 case MLX4_CQE_SYNDROME_BAD_RESP_ERR: 555 wc->status = IB_WC_BAD_RESP_ERR; 556 break; 557 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: 558 wc->status = IB_WC_LOC_ACCESS_ERR; 559 break; 560 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 561 wc->status = IB_WC_REM_INV_REQ_ERR; 562 break; 563 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: 564 wc->status = IB_WC_REM_ACCESS_ERR; 565 break; 566 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: 567 wc->status = IB_WC_REM_OP_ERR; 568 break; 569 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 570 wc->status = IB_WC_RETRY_EXC_ERR; 571 break; 572 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 573 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 574 break; 575 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: 576 wc->status = IB_WC_REM_ABORT_ERR; 577 break; 578 default: 579 wc->status = IB_WC_GENERAL_ERR; 580 break; 581 } 582 583 wc->vendor_err = cqe->vendor_err_syndrome; 584 } 585 586 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) 587 { 588 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 589 MLX4_CQE_STATUS_IPV4F | 590 MLX4_CQE_STATUS_IPV4OPT | 591 MLX4_CQE_STATUS_IPV6 | 592 MLX4_CQE_STATUS_IPOK)) == 593 cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 594 MLX4_CQE_STATUS_IPOK)) && 595 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP | 596 MLX4_CQE_STATUS_TCP)) && 597 checksum == cpu_to_be16(0xffff); 598 } 599 600 static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, 601 unsigned tail, struct mlx4_cqe *cqe, int is_eth) 602 { 603 struct mlx4_ib_proxy_sqp_hdr *hdr; 604 605 ib_dma_sync_single_for_cpu(qp->ibqp.device, 606 qp->sqp_proxy_rcv[tail].map, 607 sizeof (struct mlx4_ib_proxy_sqp_hdr), 608 DMA_FROM_DEVICE); 609 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); 610 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index); 611 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF; 612 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0; 613 wc->dlid_path_bits = 0; 614 615 if (is_eth) { 616 wc->slid = 0; 617 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); 618 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); 619 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); 620 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); 621 } else { 622 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); 623 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); 624 } 625 } 626 627 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, 628 struct ib_wc *wc, int *npolled, int is_send) 629 { 630 struct mlx4_ib_wq *wq; 631 unsigned cur; 632 int i; 633 634 wq = is_send ? &qp->sq : &qp->rq; 635 cur = wq->head - wq->tail; 636 637 if (cur == 0) 638 return; 639 640 for (i = 0; i < cur && *npolled < num_entries; i++) { 641 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 642 wc->status = IB_WC_WR_FLUSH_ERR; 643 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR; 644 wq->tail++; 645 (*npolled)++; 646 wc->qp = &qp->ibqp; 647 wc++; 648 } 649 } 650 651 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, 652 struct ib_wc *wc, int *npolled) 653 { 654 struct mlx4_ib_qp *qp; 655 656 *npolled = 0; 657 /* Find uncompleted WQEs belonging to that cq and return 658 * simulated FLUSH_ERR completions 659 */ 660 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { 661 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); 662 if (*npolled >= num_entries) 663 goto out; 664 } 665 666 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { 667 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0); 668 if (*npolled >= num_entries) 669 goto out; 670 } 671 672 out: 673 return; 674 } 675 676 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, 677 struct mlx4_ib_qp **cur_qp, 678 struct ib_wc *wc) 679 { 680 struct mlx4_cqe *cqe; 681 struct mlx4_qp *mqp; 682 struct mlx4_ib_wq *wq; 683 struct mlx4_ib_srq *srq; 684 struct mlx4_srq *msrq = NULL; 685 int is_send; 686 int is_error; 687 int is_eth; 688 u32 g_mlpath_rqpn; 689 u16 wqe_ctr; 690 unsigned tail = 0; 691 692 repoll: 693 cqe = next_cqe_sw(cq); 694 if (!cqe) 695 return -EAGAIN; 696 697 if (cq->buf.entry_size == 64) 698 cqe++; 699 700 ++cq->mcq.cons_index; 701 702 /* 703 * Make sure we read CQ entry contents after we've checked the 704 * ownership bit. 705 */ 706 rmb(); 707 708 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; 709 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 710 MLX4_CQE_OPCODE_ERROR; 711 712 /* Resize CQ in progress */ 713 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { 714 if (cq->resize_buf) { 715 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); 716 717 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 718 cq->buf = cq->resize_buf->buf; 719 cq->ibcq.cqe = cq->resize_buf->cqe; 720 721 kfree(cq->resize_buf); 722 cq->resize_buf = NULL; 723 } 724 725 goto repoll; 726 } 727 728 if (!*cur_qp || 729 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { 730 /* 731 * We do not have to take the QP table lock here, 732 * because CQs will be locked while QPs are removed 733 * from the table. 734 */ 735 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 736 be32_to_cpu(cqe->vlan_my_qpn)); 737 *cur_qp = to_mibqp(mqp); 738 } 739 740 wc->qp = &(*cur_qp)->ibqp; 741 742 if (wc->qp->qp_type == IB_QPT_XRC_TGT) { 743 u32 srq_num; 744 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 745 srq_num = g_mlpath_rqpn & 0xffffff; 746 /* SRQ is also in the radix tree */ 747 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, 748 srq_num); 749 } 750 751 if (is_send) { 752 wq = &(*cur_qp)->sq; 753 if (!(*cur_qp)->sq_signal_bits) { 754 wqe_ctr = be16_to_cpu(cqe->wqe_index); 755 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); 756 } 757 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 758 ++wq->tail; 759 } else if ((*cur_qp)->ibqp.srq) { 760 srq = to_msrq((*cur_qp)->ibqp.srq); 761 wqe_ctr = be16_to_cpu(cqe->wqe_index); 762 wc->wr_id = srq->wrid[wqe_ctr]; 763 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 764 } else if (msrq) { 765 srq = to_mibsrq(msrq); 766 wqe_ctr = be16_to_cpu(cqe->wqe_index); 767 wc->wr_id = srq->wrid[wqe_ctr]; 768 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 769 } else { 770 wq = &(*cur_qp)->rq; 771 tail = wq->tail & (wq->wqe_cnt - 1); 772 wc->wr_id = wq->wrid[tail]; 773 ++wq->tail; 774 } 775 776 if (unlikely(is_error)) { 777 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); 778 return 0; 779 } 780 781 wc->status = IB_WC_SUCCESS; 782 783 if (is_send) { 784 wc->wc_flags = 0; 785 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 786 case MLX4_OPCODE_RDMA_WRITE_IMM: 787 wc->wc_flags |= IB_WC_WITH_IMM; 788 /* fall through */ 789 case MLX4_OPCODE_RDMA_WRITE: 790 wc->opcode = IB_WC_RDMA_WRITE; 791 break; 792 case MLX4_OPCODE_SEND_IMM: 793 wc->wc_flags |= IB_WC_WITH_IMM; 794 /* fall through */ 795 case MLX4_OPCODE_SEND: 796 case MLX4_OPCODE_SEND_INVAL: 797 wc->opcode = IB_WC_SEND; 798 break; 799 case MLX4_OPCODE_RDMA_READ: 800 wc->opcode = IB_WC_RDMA_READ; 801 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 802 break; 803 case MLX4_OPCODE_ATOMIC_CS: 804 wc->opcode = IB_WC_COMP_SWAP; 805 wc->byte_len = 8; 806 break; 807 case MLX4_OPCODE_ATOMIC_FA: 808 wc->opcode = IB_WC_FETCH_ADD; 809 wc->byte_len = 8; 810 break; 811 case MLX4_OPCODE_MASKED_ATOMIC_CS: 812 wc->opcode = IB_WC_MASKED_COMP_SWAP; 813 wc->byte_len = 8; 814 break; 815 case MLX4_OPCODE_MASKED_ATOMIC_FA: 816 wc->opcode = IB_WC_MASKED_FETCH_ADD; 817 wc->byte_len = 8; 818 break; 819 case MLX4_OPCODE_LSO: 820 wc->opcode = IB_WC_LSO; 821 break; 822 case MLX4_OPCODE_FMR: 823 wc->opcode = IB_WC_REG_MR; 824 break; 825 case MLX4_OPCODE_LOCAL_INVAL: 826 wc->opcode = IB_WC_LOCAL_INV; 827 break; 828 } 829 } else { 830 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 831 832 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 833 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: 834 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 835 wc->wc_flags = IB_WC_WITH_IMM; 836 wc->ex.imm_data = cqe->immed_rss_invalid; 837 break; 838 case MLX4_RECV_OPCODE_SEND_INVAL: 839 wc->opcode = IB_WC_RECV; 840 wc->wc_flags = IB_WC_WITH_INVALIDATE; 841 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); 842 break; 843 case MLX4_RECV_OPCODE_SEND: 844 wc->opcode = IB_WC_RECV; 845 wc->wc_flags = 0; 846 break; 847 case MLX4_RECV_OPCODE_SEND_IMM: 848 wc->opcode = IB_WC_RECV; 849 wc->wc_flags = IB_WC_WITH_IMM; 850 wc->ex.imm_data = cqe->immed_rss_invalid; 851 break; 852 } 853 854 is_eth = (rdma_port_get_link_layer(wc->qp->device, 855 (*cur_qp)->port) == 856 IB_LINK_LAYER_ETHERNET); 857 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { 858 if ((*cur_qp)->mlx4_ib_qp_type & 859 (MLX4_IB_QPT_PROXY_SMI_OWNER | 860 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { 861 use_tunnel_data(*cur_qp, cq, wc, tail, cqe, 862 is_eth); 863 return 0; 864 } 865 } 866 867 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 868 wc->src_qp = g_mlpath_rqpn & 0xffffff; 869 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 870 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 871 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 872 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, 873 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; 874 if (is_eth) { 875 wc->slid = 0; 876 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 877 if (be32_to_cpu(cqe->vlan_my_qpn) & 878 MLX4_CQE_CVLAN_PRESENT_MASK) { 879 wc->vlan_id = be16_to_cpu(cqe->sl_vid) & 880 MLX4_CQE_VID_MASK; 881 } else { 882 wc->vlan_id = 0xffff; 883 } 884 memcpy(wc->smac, cqe->smac, ETH_ALEN); 885 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); 886 } else { 887 wc->slid = be16_to_cpu(cqe->rlid); 888 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 889 wc->vlan_id = 0xffff; 890 } 891 } 892 893 return 0; 894 } 895 896 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 897 { 898 struct mlx4_ib_cq *cq = to_mcq(ibcq); 899 struct mlx4_ib_qp *cur_qp = NULL; 900 unsigned long flags; 901 int npolled; 902 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); 903 904 spin_lock_irqsave(&cq->lock, flags); 905 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { 906 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled); 907 goto out; 908 } 909 910 for (npolled = 0; npolled < num_entries; ++npolled) { 911 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled)) 912 break; 913 } 914 915 mlx4_cq_set_ci(&cq->mcq); 916 917 out: 918 spin_unlock_irqrestore(&cq->lock, flags); 919 920 return npolled; 921 } 922 923 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 924 { 925 mlx4_cq_arm(&to_mcq(ibcq)->mcq, 926 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 927 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT, 928 to_mdev(ibcq->device)->uar_map, 929 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)); 930 931 return 0; 932 } 933 934 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 935 { 936 u32 prod_index; 937 int nfreed = 0; 938 struct mlx4_cqe *cqe, *dest; 939 u8 owner_bit; 940 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; 941 942 /* 943 * First we need to find the current producer index, so we 944 * know where to start cleaning from. It doesn't matter if HW 945 * adds new entries after this loop -- the QP we're worried 946 * about is already in RESET, so the new entries won't come 947 * from our QP and therefore don't need to be checked. 948 */ 949 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) 950 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 951 break; 952 953 /* 954 * Now sweep backwards through the CQ, removing CQ entries 955 * that match our QP by copying older entries on top of them. 956 */ 957 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 958 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 959 cqe += cqe_inc; 960 961 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { 962 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 963 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 964 ++nfreed; 965 } else if (nfreed) { 966 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 967 dest += cqe_inc; 968 969 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; 970 memcpy(dest, cqe, sizeof *cqe); 971 dest->owner_sr_opcode = owner_bit | 972 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 973 } 974 } 975 976 if (nfreed) { 977 cq->mcq.cons_index += nfreed; 978 /* 979 * Make sure update of buffer contents is done before 980 * updating consumer index. 981 */ 982 wmb(); 983 mlx4_cq_set_ci(&cq->mcq); 984 } 985 } 986 987 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 988 { 989 spin_lock_irq(&cq->lock); 990 __mlx4_ib_cq_clean(cq, qpn, srq); 991 spin_unlock_irq(&cq->lock); 992 } 993