1 /* This file is part of the Emulex RoCE Device Driver for 2 * RoCE (RDMA over Converged Ethernet) adapters. 3 * Copyright (C) 2012-2015 Emulex. All rights reserved. 4 * EMULEX and SLI are trademarks of Emulex. 5 * www.emulex.com 6 * 7 * This software is available to you under a choice of one of two licenses. 8 * You may choose to be licensed under the terms of the GNU General Public 9 * License (GPL) Version 2, available from the file COPYING in the main 10 * directory of this source tree, or the BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * - Redistributions of source code must retain the above copyright notice, 17 * this list of conditions and the following disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * Contact Information: 36 * linux-drivers@emulex.com 37 * 38 * Emulex 39 * 3333 Susan Street 40 * Costa Mesa, CA 92626 41 */ 42 43 #include <linux/dma-mapping.h> 44 #include <rdma/ib_verbs.h> 45 #include <rdma/ib_user_verbs.h> 46 #include <rdma/iw_cm.h> 47 #include <rdma/ib_umem.h> 48 #include <rdma/ib_addr.h> 49 #include <rdma/ib_cache.h> 50 #include <rdma/uverbs_ioctl.h> 51 52 #include "ocrdma.h" 53 #include "ocrdma_hw.h" 54 #include "ocrdma_verbs.h" 55 #include <rdma/ocrdma-abi.h> 56 57 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 58 { 59 if (index > 0) 60 return -EINVAL; 61 62 *pkey = 0xffff; 63 return 0; 64 } 65 66 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, 67 struct ib_udata *uhw) 68 { 69 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 70 71 if (uhw->inlen || uhw->outlen) 72 return -EINVAL; 73 74 memset(attr, 0, sizeof *attr); 75 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], 76 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); 77 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); 78 attr->max_mr_size = dev->attr.max_mr_size; 79 attr->page_size_cap = 0xffff000; 80 attr->vendor_id = dev->nic_info.pdev->vendor; 81 attr->vendor_part_id = dev->nic_info.pdev->device; 82 attr->hw_ver = dev->asic_id; 83 attr->max_qp = dev->attr.max_qp; 84 attr->max_ah = OCRDMA_MAX_AH; 85 attr->max_qp_wr = dev->attr.max_wqe; 86 87 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | 88 IB_DEVICE_RC_RNR_NAK_GEN | 89 IB_DEVICE_SHUTDOWN_PORT | 90 IB_DEVICE_SYS_IMAGE_GUID | 91 IB_DEVICE_LOCAL_DMA_LKEY | 92 IB_DEVICE_MEM_MGT_EXTENSIONS; 93 attr->max_send_sge = dev->attr.max_send_sge; 94 attr->max_recv_sge = dev->attr.max_recv_sge; 95 attr->max_sge_rd = dev->attr.max_rdma_sge; 96 attr->max_cq = dev->attr.max_cq; 97 attr->max_cqe = dev->attr.max_cqe; 98 attr->max_mr = dev->attr.max_mr; 99 attr->max_mw = dev->attr.max_mw; 100 attr->max_pd = dev->attr.max_pd; 101 attr->atomic_cap = 0; 102 attr->max_fmr = 0; 103 attr->max_map_per_fmr = 0; 104 attr->max_qp_rd_atom = 105 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); 106 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; 107 attr->max_srq = dev->attr.max_srq; 108 attr->max_srq_sge = dev->attr.max_srq_sge; 109 attr->max_srq_wr = dev->attr.max_rqe; 110 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 111 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr; 112 attr->max_pkeys = 1; 113 return 0; 114 } 115 116 static inline void get_link_speed_and_width(struct ocrdma_dev *dev, 117 u8 *ib_speed, u8 *ib_width) 118 { 119 int status; 120 u8 speed; 121 122 status = ocrdma_mbx_get_link_speed(dev, &speed, NULL); 123 if (status) 124 speed = OCRDMA_PHYS_LINK_SPEED_ZERO; 125 126 switch (speed) { 127 case OCRDMA_PHYS_LINK_SPEED_1GBPS: 128 *ib_speed = IB_SPEED_SDR; 129 *ib_width = IB_WIDTH_1X; 130 break; 131 132 case OCRDMA_PHYS_LINK_SPEED_10GBPS: 133 *ib_speed = IB_SPEED_QDR; 134 *ib_width = IB_WIDTH_1X; 135 break; 136 137 case OCRDMA_PHYS_LINK_SPEED_20GBPS: 138 *ib_speed = IB_SPEED_DDR; 139 *ib_width = IB_WIDTH_4X; 140 break; 141 142 case OCRDMA_PHYS_LINK_SPEED_40GBPS: 143 *ib_speed = IB_SPEED_QDR; 144 *ib_width = IB_WIDTH_4X; 145 break; 146 147 default: 148 /* Unsupported */ 149 *ib_speed = IB_SPEED_SDR; 150 *ib_width = IB_WIDTH_1X; 151 } 152 } 153 154 int ocrdma_query_port(struct ib_device *ibdev, 155 u8 port, struct ib_port_attr *props) 156 { 157 enum ib_port_state port_state; 158 struct ocrdma_dev *dev; 159 struct net_device *netdev; 160 161 /* props being zeroed by the caller, avoid zeroing it here */ 162 dev = get_ocrdma_dev(ibdev); 163 netdev = dev->nic_info.netdev; 164 if (netif_running(netdev) && netif_oper_up(netdev)) { 165 port_state = IB_PORT_ACTIVE; 166 props->phys_state = 5; 167 } else { 168 port_state = IB_PORT_DOWN; 169 props->phys_state = 3; 170 } 171 props->max_mtu = IB_MTU_4096; 172 props->active_mtu = iboe_get_mtu(netdev->mtu); 173 props->lid = 0; 174 props->lmc = 0; 175 props->sm_lid = 0; 176 props->sm_sl = 0; 177 props->state = port_state; 178 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 179 IB_PORT_DEVICE_MGMT_SUP | 180 IB_PORT_VENDOR_CLASS_SUP; 181 props->ip_gids = true; 182 props->gid_tbl_len = OCRDMA_MAX_SGID; 183 props->pkey_tbl_len = 1; 184 props->bad_pkey_cntr = 0; 185 props->qkey_viol_cntr = 0; 186 get_link_speed_and_width(dev, &props->active_speed, 187 &props->active_width); 188 props->max_msg_sz = 0x80000000; 189 props->max_vl_num = 4; 190 return 0; 191 } 192 193 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, 194 struct ib_port_modify *props) 195 { 196 return 0; 197 } 198 199 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 200 unsigned long len) 201 { 202 struct ocrdma_mm *mm; 203 204 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 205 if (mm == NULL) 206 return -ENOMEM; 207 mm->key.phy_addr = phy_addr; 208 mm->key.len = len; 209 INIT_LIST_HEAD(&mm->entry); 210 211 mutex_lock(&uctx->mm_list_lock); 212 list_add_tail(&mm->entry, &uctx->mm_head); 213 mutex_unlock(&uctx->mm_list_lock); 214 return 0; 215 } 216 217 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 218 unsigned long len) 219 { 220 struct ocrdma_mm *mm, *tmp; 221 222 mutex_lock(&uctx->mm_list_lock); 223 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 224 if (len != mm->key.len && phy_addr != mm->key.phy_addr) 225 continue; 226 227 list_del(&mm->entry); 228 kfree(mm); 229 break; 230 } 231 mutex_unlock(&uctx->mm_list_lock); 232 } 233 234 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 235 unsigned long len) 236 { 237 bool found = false; 238 struct ocrdma_mm *mm; 239 240 mutex_lock(&uctx->mm_list_lock); 241 list_for_each_entry(mm, &uctx->mm_head, entry) { 242 if (len != mm->key.len && phy_addr != mm->key.phy_addr) 243 continue; 244 245 found = true; 246 break; 247 } 248 mutex_unlock(&uctx->mm_list_lock); 249 return found; 250 } 251 252 253 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) 254 { 255 u16 pd_bitmap_idx = 0; 256 const unsigned long *pd_bitmap; 257 258 if (dpp_pool) { 259 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; 260 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 261 dev->pd_mgr->max_dpp_pd); 262 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); 263 dev->pd_mgr->pd_dpp_count++; 264 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) 265 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; 266 } else { 267 pd_bitmap = dev->pd_mgr->pd_norm_bitmap; 268 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 269 dev->pd_mgr->max_normal_pd); 270 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); 271 dev->pd_mgr->pd_norm_count++; 272 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) 273 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; 274 } 275 return pd_bitmap_idx; 276 } 277 278 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, 279 bool dpp_pool) 280 { 281 u16 pd_count; 282 u16 pd_bit_index; 283 284 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count : 285 dev->pd_mgr->pd_norm_count; 286 if (pd_count == 0) 287 return -EINVAL; 288 289 if (dpp_pool) { 290 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start; 291 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) { 292 return -EINVAL; 293 } else { 294 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap); 295 dev->pd_mgr->pd_dpp_count--; 296 } 297 } else { 298 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start; 299 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) { 300 return -EINVAL; 301 } else { 302 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap); 303 dev->pd_mgr->pd_norm_count--; 304 } 305 } 306 307 return 0; 308 } 309 310 static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, 311 bool dpp_pool) 312 { 313 int status; 314 315 mutex_lock(&dev->dev_lock); 316 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool); 317 mutex_unlock(&dev->dev_lock); 318 return status; 319 } 320 321 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) 322 { 323 u16 pd_idx = 0; 324 int status = 0; 325 326 mutex_lock(&dev->dev_lock); 327 if (pd->dpp_enabled) { 328 /* try allocating DPP PD, if not available then normal PD */ 329 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) { 330 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); 331 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; 332 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; 333 } else if (dev->pd_mgr->pd_norm_count < 334 dev->pd_mgr->max_normal_pd) { 335 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 336 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 337 pd->dpp_enabled = false; 338 } else { 339 status = -EINVAL; 340 } 341 } else { 342 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) { 343 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 344 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 345 } else { 346 status = -EINVAL; 347 } 348 } 349 mutex_unlock(&dev->dev_lock); 350 return status; 351 } 352 353 /* 354 * NOTE: 355 * 356 * ocrdma_ucontext must be used here because this function is also 357 * called from ocrdma_alloc_ucontext where ib_udata does not have 358 * valid ib_ucontext pointer. ib_uverbs_get_context does not call 359 * uobj_{alloc|get_xxx} helpers which are used to store the 360 * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so 361 * ib_udata does NOT imply valid ib_ucontext here! 362 */ 363 static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, 364 struct ocrdma_ucontext *uctx, 365 struct ib_udata *udata) 366 { 367 int status; 368 369 if (udata && uctx && dev->attr.max_dpp_pds) { 370 pd->dpp_enabled = 371 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; 372 pd->num_dpp_qp = 373 pd->dpp_enabled ? (dev->nic_info.db_page_size / 374 dev->attr.wqe_size) : 0; 375 } 376 377 if (dev->pd_mgr->pd_prealloc_valid) 378 return ocrdma_get_pd_num(dev, pd); 379 380 retry: 381 status = ocrdma_mbx_alloc_pd(dev, pd); 382 if (status) { 383 if (pd->dpp_enabled) { 384 pd->dpp_enabled = false; 385 pd->num_dpp_qp = 0; 386 goto retry; 387 } 388 return status; 389 } 390 391 return 0; 392 } 393 394 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, 395 struct ocrdma_pd *pd) 396 { 397 return (uctx->cntxt_pd == pd); 398 } 399 400 static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev, 401 struct ocrdma_pd *pd) 402 { 403 if (dev->pd_mgr->pd_prealloc_valid) 404 ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); 405 else 406 ocrdma_mbx_dealloc_pd(dev, pd); 407 } 408 409 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, 410 struct ocrdma_ucontext *uctx, 411 struct ib_udata *udata) 412 { 413 struct ib_device *ibdev = &dev->ibdev; 414 struct ib_pd *pd; 415 int status; 416 417 pd = rdma_zalloc_drv_obj(ibdev, ib_pd); 418 if (!pd) 419 return -ENOMEM; 420 421 pd->device = ibdev; 422 uctx->cntxt_pd = get_ocrdma_pd(pd); 423 424 status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata); 425 if (status) { 426 kfree(uctx->cntxt_pd); 427 goto err; 428 } 429 430 uctx->cntxt_pd->uctx = uctx; 431 uctx->cntxt_pd->ibpd.device = &dev->ibdev; 432 err: 433 return status; 434 } 435 436 static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 437 { 438 struct ocrdma_pd *pd = uctx->cntxt_pd; 439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 440 441 if (uctx->pd_in_use) { 442 pr_err("%s(%d) Freeing in use pdid=0x%x.\n", 443 __func__, dev->id, pd->id); 444 } 445 kfree(uctx->cntxt_pd); 446 uctx->cntxt_pd = NULL; 447 _ocrdma_dealloc_pd(dev, pd); 448 } 449 450 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) 451 { 452 struct ocrdma_pd *pd = NULL; 453 454 mutex_lock(&uctx->mm_list_lock); 455 if (!uctx->pd_in_use) { 456 uctx->pd_in_use = true; 457 pd = uctx->cntxt_pd; 458 } 459 mutex_unlock(&uctx->mm_list_lock); 460 461 return pd; 462 } 463 464 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) 465 { 466 mutex_lock(&uctx->mm_list_lock); 467 uctx->pd_in_use = false; 468 mutex_unlock(&uctx->mm_list_lock); 469 } 470 471 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) 472 { 473 struct ib_device *ibdev = uctx->device; 474 int status; 475 struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx); 476 struct ocrdma_alloc_ucontext_resp resp = {}; 477 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 478 struct pci_dev *pdev = dev->nic_info.pdev; 479 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); 480 481 if (!udata) 482 return -EFAULT; 483 INIT_LIST_HEAD(&ctx->mm_head); 484 mutex_init(&ctx->mm_list_lock); 485 486 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, 487 &ctx->ah_tbl.pa, GFP_KERNEL); 488 if (!ctx->ah_tbl.va) 489 return -ENOMEM; 490 491 ctx->ah_tbl.len = map_len; 492 493 resp.ah_tbl_len = ctx->ah_tbl.len; 494 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va); 495 496 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); 497 if (status) 498 goto map_err; 499 500 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata); 501 if (status) 502 goto pd_err; 503 504 resp.dev_id = dev->id; 505 resp.max_inline_data = dev->attr.max_inline_data; 506 resp.wqe_size = dev->attr.wqe_size; 507 resp.rqe_size = dev->attr.rqe_size; 508 resp.dpp_wqe_size = dev->attr.wqe_size; 509 510 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); 511 status = ib_copy_to_udata(udata, &resp, sizeof(resp)); 512 if (status) 513 goto cpy_err; 514 return 0; 515 516 cpy_err: 517 ocrdma_dealloc_ucontext_pd(ctx); 518 pd_err: 519 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); 520 map_err: 521 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, 522 ctx->ah_tbl.pa); 523 return status; 524 } 525 526 void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) 527 { 528 struct ocrdma_mm *mm, *tmp; 529 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); 530 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); 531 struct pci_dev *pdev = dev->nic_info.pdev; 532 533 ocrdma_dealloc_ucontext_pd(uctx); 534 535 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); 536 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, 537 uctx->ah_tbl.pa); 538 539 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 540 list_del(&mm->entry); 541 kfree(mm); 542 } 543 } 544 545 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 546 { 547 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); 548 struct ocrdma_dev *dev = get_ocrdma_dev(context->device); 549 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 550 u64 unmapped_db = (u64) dev->nic_info.unmapped_db; 551 unsigned long len = (vma->vm_end - vma->vm_start); 552 int status; 553 bool found; 554 555 if (vma->vm_start & (PAGE_SIZE - 1)) 556 return -EINVAL; 557 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); 558 if (!found) 559 return -EINVAL; 560 561 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 562 dev->nic_info.db_total_size)) && 563 (len <= dev->nic_info.db_page_size)) { 564 if (vma->vm_flags & VM_READ) 565 return -EPERM; 566 567 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 568 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 569 len, vma->vm_page_prot); 570 } else if (dev->nic_info.dpp_unmapped_len && 571 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && 572 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + 573 dev->nic_info.dpp_unmapped_len)) && 574 (len <= dev->nic_info.dpp_unmapped_len)) { 575 if (vma->vm_flags & VM_READ) 576 return -EPERM; 577 578 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 579 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 580 len, vma->vm_page_prot); 581 } else { 582 status = remap_pfn_range(vma, vma->vm_start, 583 vma->vm_pgoff, len, vma->vm_page_prot); 584 } 585 return status; 586 } 587 588 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, 589 struct ib_udata *udata) 590 { 591 int status; 592 u64 db_page_addr; 593 u64 dpp_page_addr = 0; 594 u32 db_page_size; 595 struct ocrdma_alloc_pd_uresp rsp; 596 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 597 udata, struct ocrdma_ucontext, ibucontext); 598 599 memset(&rsp, 0, sizeof(rsp)); 600 rsp.id = pd->id; 601 rsp.dpp_enabled = pd->dpp_enabled; 602 db_page_addr = ocrdma_get_db_addr(dev, pd->id); 603 db_page_size = dev->nic_info.db_page_size; 604 605 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); 606 if (status) 607 return status; 608 609 if (pd->dpp_enabled) { 610 dpp_page_addr = dev->nic_info.dpp_unmapped_addr + 611 (pd->id * PAGE_SIZE); 612 status = ocrdma_add_mmap(uctx, dpp_page_addr, 613 PAGE_SIZE); 614 if (status) 615 goto dpp_map_err; 616 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); 617 rsp.dpp_page_addr_lo = dpp_page_addr; 618 } 619 620 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); 621 if (status) 622 goto ucopy_err; 623 624 pd->uctx = uctx; 625 return 0; 626 627 ucopy_err: 628 if (pd->dpp_enabled) 629 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE); 630 dpp_map_err: 631 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); 632 return status; 633 } 634 635 int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 636 { 637 struct ib_device *ibdev = ibpd->device; 638 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 639 struct ocrdma_pd *pd; 640 int status; 641 u8 is_uctx_pd = false; 642 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 643 udata, struct ocrdma_ucontext, ibucontext); 644 645 if (udata) { 646 pd = ocrdma_get_ucontext_pd(uctx); 647 if (pd) { 648 is_uctx_pd = true; 649 goto pd_mapping; 650 } 651 } 652 653 pd = get_ocrdma_pd(ibpd); 654 status = _ocrdma_alloc_pd(dev, pd, uctx, udata); 655 if (status) 656 goto exit; 657 658 pd_mapping: 659 if (udata) { 660 status = ocrdma_copy_pd_uresp(dev, pd, udata); 661 if (status) 662 goto err; 663 } 664 return 0; 665 666 err: 667 if (is_uctx_pd) 668 ocrdma_release_ucontext_pd(uctx); 669 else 670 _ocrdma_dealloc_pd(dev, pd); 671 exit: 672 return status; 673 } 674 675 void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 676 { 677 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 678 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 679 struct ocrdma_ucontext *uctx = NULL; 680 u64 usr_db; 681 682 uctx = pd->uctx; 683 if (uctx) { 684 u64 dpp_db = dev->nic_info.dpp_unmapped_addr + 685 (pd->id * PAGE_SIZE); 686 if (pd->dpp_enabled) 687 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE); 688 usr_db = ocrdma_get_db_addr(dev, pd->id); 689 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); 690 691 if (is_ucontext_pd(uctx, pd)) { 692 ocrdma_release_ucontext_pd(uctx); 693 return; 694 } 695 } 696 _ocrdma_dealloc_pd(dev, pd); 697 } 698 699 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 700 u32 pdid, int acc, u32 num_pbls, u32 addr_check) 701 { 702 int status; 703 704 mr->hwmr.fr_mr = 0; 705 mr->hwmr.local_rd = 1; 706 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 707 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 708 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 709 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; 710 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 711 mr->hwmr.num_pbls = num_pbls; 712 713 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); 714 if (status) 715 return status; 716 717 mr->ibmr.lkey = mr->hwmr.lkey; 718 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 719 mr->ibmr.rkey = mr->hwmr.lkey; 720 return 0; 721 } 722 723 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) 724 { 725 int status; 726 struct ocrdma_mr *mr; 727 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 728 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 729 730 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { 731 pr_err("%s err, invalid access rights\n", __func__); 732 return ERR_PTR(-EINVAL); 733 } 734 735 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 736 if (!mr) 737 return ERR_PTR(-ENOMEM); 738 739 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0, 740 OCRDMA_ADDR_CHECK_DISABLE); 741 if (status) { 742 kfree(mr); 743 return ERR_PTR(status); 744 } 745 746 return &mr->ibmr; 747 } 748 749 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, 750 struct ocrdma_hw_mr *mr) 751 { 752 struct pci_dev *pdev = dev->nic_info.pdev; 753 int i = 0; 754 755 if (mr->pbl_table) { 756 for (i = 0; i < mr->num_pbls; i++) { 757 if (!mr->pbl_table[i].va) 758 continue; 759 dma_free_coherent(&pdev->dev, mr->pbl_size, 760 mr->pbl_table[i].va, 761 mr->pbl_table[i].pa); 762 } 763 kfree(mr->pbl_table); 764 mr->pbl_table = NULL; 765 } 766 } 767 768 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 769 u32 num_pbes) 770 { 771 u32 num_pbls = 0; 772 u32 idx = 0; 773 int status = 0; 774 u32 pbl_size; 775 776 do { 777 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); 778 if (pbl_size > MAX_OCRDMA_PBL_SIZE) { 779 status = -EFAULT; 780 break; 781 } 782 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); 783 num_pbls = num_pbls / (pbl_size / sizeof(u64)); 784 idx++; 785 } while (num_pbls >= dev->attr.max_num_mr_pbl); 786 787 mr->hwmr.num_pbes = num_pbes; 788 mr->hwmr.num_pbls = num_pbls; 789 mr->hwmr.pbl_size = pbl_size; 790 return status; 791 } 792 793 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) 794 { 795 int status = 0; 796 int i; 797 u32 dma_len = mr->pbl_size; 798 struct pci_dev *pdev = dev->nic_info.pdev; 799 void *va; 800 dma_addr_t pa; 801 802 mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl), 803 GFP_KERNEL); 804 805 if (!mr->pbl_table) 806 return -ENOMEM; 807 808 for (i = 0; i < mr->num_pbls; i++) { 809 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 810 if (!va) { 811 ocrdma_free_mr_pbl_tbl(dev, mr); 812 status = -ENOMEM; 813 break; 814 } 815 mr->pbl_table[i].va = va; 816 mr->pbl_table[i].pa = pa; 817 } 818 return status; 819 } 820 821 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 822 u32 num_pbes) 823 { 824 struct ocrdma_pbe *pbe; 825 struct sg_dma_page_iter sg_iter; 826 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; 827 struct ib_umem *umem = mr->umem; 828 int pbe_cnt, total_num_pbes = 0; 829 u64 pg_addr; 830 831 if (!mr->hwmr.num_pbes) 832 return; 833 834 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 835 pbe_cnt = 0; 836 837 for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { 838 /* store the page address in pbe */ 839 pg_addr = sg_page_iter_dma_address(&sg_iter); 840 pbe->pa_lo = cpu_to_le32(pg_addr); 841 pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr)); 842 pbe_cnt += 1; 843 total_num_pbes += 1; 844 pbe++; 845 846 /* if done building pbes, issue the mbx cmd. */ 847 if (total_num_pbes == num_pbes) 848 return; 849 850 /* if the given pbl is full storing the pbes, 851 * move to next pbl. 852 */ 853 if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) { 854 pbl_tbl++; 855 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 856 pbe_cnt = 0; 857 } 858 } 859 } 860 861 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, 862 u64 usr_addr, int acc, struct ib_udata *udata) 863 { 864 int status = -ENOMEM; 865 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 866 struct ocrdma_mr *mr; 867 struct ocrdma_pd *pd; 868 u32 num_pbes; 869 870 pd = get_ocrdma_pd(ibpd); 871 872 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) 873 return ERR_PTR(-EINVAL); 874 875 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 876 if (!mr) 877 return ERR_PTR(status); 878 mr->umem = ib_umem_get(udata, start, len, acc, 0); 879 if (IS_ERR(mr->umem)) { 880 status = -EFAULT; 881 goto umem_err; 882 } 883 num_pbes = ib_umem_page_count(mr->umem); 884 status = ocrdma_get_pbl_info(dev, mr, num_pbes); 885 if (status) 886 goto umem_err; 887 888 mr->hwmr.pbe_size = PAGE_SIZE; 889 mr->hwmr.fbo = ib_umem_offset(mr->umem); 890 mr->hwmr.va = usr_addr; 891 mr->hwmr.len = len; 892 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 893 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 894 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 895 mr->hwmr.local_rd = 1; 896 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 897 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 898 if (status) 899 goto umem_err; 900 build_user_pbes(dev, mr, num_pbes); 901 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); 902 if (status) 903 goto mbx_err; 904 mr->ibmr.lkey = mr->hwmr.lkey; 905 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 906 mr->ibmr.rkey = mr->hwmr.lkey; 907 908 return &mr->ibmr; 909 910 mbx_err: 911 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 912 umem_err: 913 kfree(mr); 914 return ERR_PTR(status); 915 } 916 917 int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 918 { 919 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 920 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 921 922 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 923 924 kfree(mr->pages); 925 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 926 927 /* it could be user registered memory. */ 928 if (mr->umem) 929 ib_umem_release(mr->umem); 930 kfree(mr); 931 932 /* Don't stop cleanup, in case FW is unresponsive */ 933 if (dev->mqe_ctx.fw_error_state) { 934 pr_err("%s(%d) fw not responding.\n", 935 __func__, dev->id); 936 } 937 return 0; 938 } 939 940 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 941 struct ib_udata *udata) 942 { 943 int status; 944 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 945 udata, struct ocrdma_ucontext, ibucontext); 946 struct ocrdma_create_cq_uresp uresp; 947 948 /* this must be user flow! */ 949 if (!udata) 950 return -EINVAL; 951 952 memset(&uresp, 0, sizeof(uresp)); 953 uresp.cq_id = cq->id; 954 uresp.page_size = PAGE_ALIGN(cq->len); 955 uresp.num_pages = 1; 956 uresp.max_hw_cqe = cq->max_hw_cqe; 957 uresp.page_addr[0] = virt_to_phys(cq->va); 958 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); 959 uresp.db_page_size = dev->nic_info.db_page_size; 960 uresp.phase_change = cq->phase_change ? 1 : 0; 961 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 962 if (status) { 963 pr_err("%s(%d) copy error cqid=0x%x.\n", 964 __func__, dev->id, cq->id); 965 goto err; 966 } 967 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 968 if (status) 969 goto err; 970 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); 971 if (status) { 972 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 973 goto err; 974 } 975 cq->ucontext = uctx; 976 err: 977 return status; 978 } 979 980 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, 981 const struct ib_cq_init_attr *attr, 982 struct ib_udata *udata) 983 { 984 int entries = attr->cqe; 985 struct ocrdma_cq *cq; 986 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 987 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 988 udata, struct ocrdma_ucontext, ibucontext); 989 u16 pd_id = 0; 990 int status; 991 struct ocrdma_create_cq_ureq ureq; 992 993 if (attr->flags) 994 return ERR_PTR(-EINVAL); 995 996 if (udata) { 997 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 998 return ERR_PTR(-EFAULT); 999 } else 1000 ureq.dpp_cq = 0; 1001 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 1002 if (!cq) 1003 return ERR_PTR(-ENOMEM); 1004 1005 spin_lock_init(&cq->cq_lock); 1006 spin_lock_init(&cq->comp_handler_lock); 1007 INIT_LIST_HEAD(&cq->sq_head); 1008 INIT_LIST_HEAD(&cq->rq_head); 1009 1010 if (udata) 1011 pd_id = uctx->cntxt_pd->id; 1012 1013 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); 1014 if (status) { 1015 kfree(cq); 1016 return ERR_PTR(status); 1017 } 1018 if (udata) { 1019 status = ocrdma_copy_cq_uresp(dev, cq, udata); 1020 if (status) 1021 goto ctx_err; 1022 } 1023 cq->phase = OCRDMA_CQE_VALID; 1024 dev->cq_tbl[cq->id] = cq; 1025 return &cq->ibcq; 1026 1027 ctx_err: 1028 ocrdma_mbx_destroy_cq(dev, cq); 1029 kfree(cq); 1030 return ERR_PTR(status); 1031 } 1032 1033 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, 1034 struct ib_udata *udata) 1035 { 1036 int status = 0; 1037 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1038 1039 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { 1040 status = -EINVAL; 1041 return status; 1042 } 1043 ibcq->cqe = new_cnt; 1044 return status; 1045 } 1046 1047 static void ocrdma_flush_cq(struct ocrdma_cq *cq) 1048 { 1049 int cqe_cnt; 1050 int valid_count = 0; 1051 unsigned long flags; 1052 1053 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); 1054 struct ocrdma_cqe *cqe = NULL; 1055 1056 cqe = cq->va; 1057 cqe_cnt = cq->cqe_cnt; 1058 1059 /* Last irq might have scheduled a polling thread 1060 * sync-up with it before hard flushing. 1061 */ 1062 spin_lock_irqsave(&cq->cq_lock, flags); 1063 while (cqe_cnt) { 1064 if (is_cqe_valid(cq, cqe)) 1065 valid_count++; 1066 cqe++; 1067 cqe_cnt--; 1068 } 1069 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); 1070 spin_unlock_irqrestore(&cq->cq_lock, flags); 1071 } 1072 1073 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) 1074 { 1075 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1076 struct ocrdma_eq *eq = NULL; 1077 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 1078 int pdid = 0; 1079 u32 irq, indx; 1080 1081 dev->cq_tbl[cq->id] = NULL; 1082 indx = ocrdma_get_eq_table_index(dev, cq->eqn); 1083 BUG_ON(indx == -EINVAL); 1084 1085 eq = &dev->eq_tbl[indx]; 1086 irq = ocrdma_get_irq(dev, eq); 1087 synchronize_irq(irq); 1088 ocrdma_flush_cq(cq); 1089 1090 (void)ocrdma_mbx_destroy_cq(dev, cq); 1091 if (cq->ucontext) { 1092 pdid = cq->ucontext->cntxt_pd->id; 1093 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, 1094 PAGE_ALIGN(cq->len)); 1095 ocrdma_del_mmap(cq->ucontext, 1096 ocrdma_get_db_addr(dev, pdid), 1097 dev->nic_info.db_page_size); 1098 } 1099 1100 kfree(cq); 1101 return 0; 1102 } 1103 1104 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1105 { 1106 int status = -EINVAL; 1107 1108 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { 1109 dev->qp_tbl[qp->id] = qp; 1110 status = 0; 1111 } 1112 return status; 1113 } 1114 1115 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1116 { 1117 dev->qp_tbl[qp->id] = NULL; 1118 } 1119 1120 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, 1121 struct ib_qp_init_attr *attrs, 1122 struct ib_udata *udata) 1123 { 1124 if ((attrs->qp_type != IB_QPT_GSI) && 1125 (attrs->qp_type != IB_QPT_RC) && 1126 (attrs->qp_type != IB_QPT_UC) && 1127 (attrs->qp_type != IB_QPT_UD)) { 1128 pr_err("%s(%d) unsupported qp type=0x%x requested\n", 1129 __func__, dev->id, attrs->qp_type); 1130 return -EINVAL; 1131 } 1132 /* Skip the check for QP1 to support CM size of 128 */ 1133 if ((attrs->qp_type != IB_QPT_GSI) && 1134 (attrs->cap.max_send_wr > dev->attr.max_wqe)) { 1135 pr_err("%s(%d) unsupported send_wr=0x%x requested\n", 1136 __func__, dev->id, attrs->cap.max_send_wr); 1137 pr_err("%s(%d) supported send_wr=0x%x\n", 1138 __func__, dev->id, dev->attr.max_wqe); 1139 return -EINVAL; 1140 } 1141 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { 1142 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", 1143 __func__, dev->id, attrs->cap.max_recv_wr); 1144 pr_err("%s(%d) supported recv_wr=0x%x\n", 1145 __func__, dev->id, dev->attr.max_rqe); 1146 return -EINVAL; 1147 } 1148 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { 1149 pr_err("%s(%d) unsupported inline data size=0x%x requested\n", 1150 __func__, dev->id, attrs->cap.max_inline_data); 1151 pr_err("%s(%d) supported inline data size=0x%x\n", 1152 __func__, dev->id, dev->attr.max_inline_data); 1153 return -EINVAL; 1154 } 1155 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { 1156 pr_err("%s(%d) unsupported send_sge=0x%x requested\n", 1157 __func__, dev->id, attrs->cap.max_send_sge); 1158 pr_err("%s(%d) supported send_sge=0x%x\n", 1159 __func__, dev->id, dev->attr.max_send_sge); 1160 return -EINVAL; 1161 } 1162 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { 1163 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", 1164 __func__, dev->id, attrs->cap.max_recv_sge); 1165 pr_err("%s(%d) supported recv_sge=0x%x\n", 1166 __func__, dev->id, dev->attr.max_recv_sge); 1167 return -EINVAL; 1168 } 1169 /* unprivileged user space cannot create special QP */ 1170 if (udata && attrs->qp_type == IB_QPT_GSI) { 1171 pr_err 1172 ("%s(%d) Userspace can't create special QPs of type=0x%x\n", 1173 __func__, dev->id, attrs->qp_type); 1174 return -EINVAL; 1175 } 1176 /* allow creating only one GSI type of QP */ 1177 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { 1178 pr_err("%s(%d) GSI special QPs already created.\n", 1179 __func__, dev->id); 1180 return -EINVAL; 1181 } 1182 /* verify consumer QPs are not trying to use GSI QP's CQ */ 1183 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { 1184 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || 1185 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { 1186 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", 1187 __func__, dev->id); 1188 return -EINVAL; 1189 } 1190 } 1191 return 0; 1192 } 1193 1194 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, 1195 struct ib_udata *udata, int dpp_offset, 1196 int dpp_credit_lmt, int srq) 1197 { 1198 int status; 1199 u64 usr_db; 1200 struct ocrdma_create_qp_uresp uresp; 1201 struct ocrdma_pd *pd = qp->pd; 1202 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 1203 1204 memset(&uresp, 0, sizeof(uresp)); 1205 usr_db = dev->nic_info.unmapped_db + 1206 (pd->id * dev->nic_info.db_page_size); 1207 uresp.qp_id = qp->id; 1208 uresp.sq_dbid = qp->sq.dbid; 1209 uresp.num_sq_pages = 1; 1210 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); 1211 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); 1212 uresp.num_wqe_allocated = qp->sq.max_cnt; 1213 if (!srq) { 1214 uresp.rq_dbid = qp->rq.dbid; 1215 uresp.num_rq_pages = 1; 1216 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); 1217 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); 1218 uresp.num_rqe_allocated = qp->rq.max_cnt; 1219 } 1220 uresp.db_page_addr = usr_db; 1221 uresp.db_page_size = dev->nic_info.db_page_size; 1222 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; 1223 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1224 uresp.db_shift = OCRDMA_DB_RQ_SHIFT; 1225 1226 if (qp->dpp_enabled) { 1227 uresp.dpp_credit = dpp_credit_lmt; 1228 uresp.dpp_offset = dpp_offset; 1229 } 1230 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1231 if (status) { 1232 pr_err("%s(%d) user copy error.\n", __func__, dev->id); 1233 goto err; 1234 } 1235 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], 1236 uresp.sq_page_size); 1237 if (status) 1238 goto err; 1239 1240 if (!srq) { 1241 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], 1242 uresp.rq_page_size); 1243 if (status) 1244 goto rq_map_err; 1245 } 1246 return status; 1247 rq_map_err: 1248 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); 1249 err: 1250 return status; 1251 } 1252 1253 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 1254 struct ocrdma_pd *pd) 1255 { 1256 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1257 qp->sq_db = dev->nic_info.db + 1258 (pd->id * dev->nic_info.db_page_size) + 1259 OCRDMA_DB_GEN2_SQ_OFFSET; 1260 qp->rq_db = dev->nic_info.db + 1261 (pd->id * dev->nic_info.db_page_size) + 1262 OCRDMA_DB_GEN2_RQ_OFFSET; 1263 } else { 1264 qp->sq_db = dev->nic_info.db + 1265 (pd->id * dev->nic_info.db_page_size) + 1266 OCRDMA_DB_SQ_OFFSET; 1267 qp->rq_db = dev->nic_info.db + 1268 (pd->id * dev->nic_info.db_page_size) + 1269 OCRDMA_DB_RQ_OFFSET; 1270 } 1271 } 1272 1273 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) 1274 { 1275 qp->wqe_wr_id_tbl = 1276 kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)), 1277 GFP_KERNEL); 1278 if (qp->wqe_wr_id_tbl == NULL) 1279 return -ENOMEM; 1280 qp->rqe_wr_id_tbl = 1281 kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL); 1282 if (qp->rqe_wr_id_tbl == NULL) 1283 return -ENOMEM; 1284 1285 return 0; 1286 } 1287 1288 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, 1289 struct ocrdma_pd *pd, 1290 struct ib_qp_init_attr *attrs) 1291 { 1292 qp->pd = pd; 1293 spin_lock_init(&qp->q_lock); 1294 INIT_LIST_HEAD(&qp->sq_entry); 1295 INIT_LIST_HEAD(&qp->rq_entry); 1296 1297 qp->qp_type = attrs->qp_type; 1298 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; 1299 qp->max_inline_data = attrs->cap.max_inline_data; 1300 qp->sq.max_sges = attrs->cap.max_send_sge; 1301 qp->rq.max_sges = attrs->cap.max_recv_sge; 1302 qp->state = OCRDMA_QPS_RST; 1303 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; 1304 } 1305 1306 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, 1307 struct ib_qp_init_attr *attrs) 1308 { 1309 if (attrs->qp_type == IB_QPT_GSI) { 1310 dev->gsi_qp_created = 1; 1311 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); 1312 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); 1313 } 1314 } 1315 1316 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, 1317 struct ib_qp_init_attr *attrs, 1318 struct ib_udata *udata) 1319 { 1320 int status; 1321 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1322 struct ocrdma_qp *qp; 1323 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 1324 struct ocrdma_create_qp_ureq ureq; 1325 u16 dpp_credit_lmt, dpp_offset; 1326 1327 status = ocrdma_check_qp_params(ibpd, dev, attrs, udata); 1328 if (status) 1329 goto gen_err; 1330 1331 memset(&ureq, 0, sizeof(ureq)); 1332 if (udata) { 1333 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1334 return ERR_PTR(-EFAULT); 1335 } 1336 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1337 if (!qp) { 1338 status = -ENOMEM; 1339 goto gen_err; 1340 } 1341 ocrdma_set_qp_init_params(qp, pd, attrs); 1342 if (udata == NULL) 1343 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1344 OCRDMA_QP_FAST_REG); 1345 1346 mutex_lock(&dev->dev_lock); 1347 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, 1348 ureq.dpp_cq_id, 1349 &dpp_offset, &dpp_credit_lmt); 1350 if (status) 1351 goto mbx_err; 1352 1353 /* user space QP's wr_id table are managed in library */ 1354 if (udata == NULL) { 1355 status = ocrdma_alloc_wr_id_tbl(qp); 1356 if (status) 1357 goto map_err; 1358 } 1359 1360 status = ocrdma_add_qpn_map(dev, qp); 1361 if (status) 1362 goto map_err; 1363 ocrdma_set_qp_db(dev, qp, pd); 1364 if (udata) { 1365 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, 1366 dpp_credit_lmt, 1367 (attrs->srq != NULL)); 1368 if (status) 1369 goto cpy_err; 1370 } 1371 ocrdma_store_gsi_qp_cq(dev, attrs); 1372 qp->ibqp.qp_num = qp->id; 1373 mutex_unlock(&dev->dev_lock); 1374 return &qp->ibqp; 1375 1376 cpy_err: 1377 ocrdma_del_qpn_map(dev, qp); 1378 map_err: 1379 ocrdma_mbx_destroy_qp(dev, qp); 1380 mbx_err: 1381 mutex_unlock(&dev->dev_lock); 1382 kfree(qp->wqe_wr_id_tbl); 1383 kfree(qp->rqe_wr_id_tbl); 1384 kfree(qp); 1385 pr_err("%s(%d) error=%d\n", __func__, dev->id, status); 1386 gen_err: 1387 return ERR_PTR(status); 1388 } 1389 1390 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1391 int attr_mask) 1392 { 1393 int status = 0; 1394 struct ocrdma_qp *qp; 1395 struct ocrdma_dev *dev; 1396 enum ib_qp_state old_qps; 1397 1398 qp = get_ocrdma_qp(ibqp); 1399 dev = get_ocrdma_dev(ibqp->device); 1400 if (attr_mask & IB_QP_STATE) 1401 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1402 /* if new and previous states are same hw doesn't need to 1403 * know about it. 1404 */ 1405 if (status < 0) 1406 return status; 1407 return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); 1408 } 1409 1410 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1411 int attr_mask, struct ib_udata *udata) 1412 { 1413 unsigned long flags; 1414 int status = -EINVAL; 1415 struct ocrdma_qp *qp; 1416 struct ocrdma_dev *dev; 1417 enum ib_qp_state old_qps, new_qps; 1418 1419 qp = get_ocrdma_qp(ibqp); 1420 dev = get_ocrdma_dev(ibqp->device); 1421 1422 /* syncronize with multiple context trying to change, retrive qps */ 1423 mutex_lock(&dev->dev_lock); 1424 /* syncronize with wqe, rqe posting and cqe processing contexts */ 1425 spin_lock_irqsave(&qp->q_lock, flags); 1426 old_qps = get_ibqp_state(qp->state); 1427 if (attr_mask & IB_QP_STATE) 1428 new_qps = attr->qp_state; 1429 else 1430 new_qps = old_qps; 1431 spin_unlock_irqrestore(&qp->q_lock, flags); 1432 1433 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { 1434 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" 1435 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", 1436 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, 1437 old_qps, new_qps); 1438 goto param_err; 1439 } 1440 1441 status = _ocrdma_modify_qp(ibqp, attr, attr_mask); 1442 if (status > 0) 1443 status = 0; 1444 param_err: 1445 mutex_unlock(&dev->dev_lock); 1446 return status; 1447 } 1448 1449 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) 1450 { 1451 switch (mtu) { 1452 case 256: 1453 return IB_MTU_256; 1454 case 512: 1455 return IB_MTU_512; 1456 case 1024: 1457 return IB_MTU_1024; 1458 case 2048: 1459 return IB_MTU_2048; 1460 case 4096: 1461 return IB_MTU_4096; 1462 default: 1463 return IB_MTU_1024; 1464 } 1465 } 1466 1467 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) 1468 { 1469 int ib_qp_acc_flags = 0; 1470 1471 if (qp_cap_flags & OCRDMA_QP_INB_WR) 1472 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; 1473 if (qp_cap_flags & OCRDMA_QP_INB_RD) 1474 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; 1475 return ib_qp_acc_flags; 1476 } 1477 1478 int ocrdma_query_qp(struct ib_qp *ibqp, 1479 struct ib_qp_attr *qp_attr, 1480 int attr_mask, struct ib_qp_init_attr *qp_init_attr) 1481 { 1482 int status; 1483 u32 qp_state; 1484 struct ocrdma_qp_params params; 1485 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1486 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); 1487 1488 memset(¶ms, 0, sizeof(params)); 1489 mutex_lock(&dev->dev_lock); 1490 status = ocrdma_mbx_query_qp(dev, qp, ¶ms); 1491 mutex_unlock(&dev->dev_lock); 1492 if (status) 1493 goto mbx_err; 1494 if (qp->qp_type == IB_QPT_UD) 1495 qp_attr->qkey = params.qkey; 1496 qp_attr->path_mtu = 1497 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1498 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1499 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; 1500 qp_attr->path_mig_state = IB_MIG_MIGRATED; 1501 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; 1502 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; 1503 qp_attr->dest_qp_num = 1504 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; 1505 1506 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); 1507 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; 1508 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; 1509 qp_attr->cap.max_send_sge = qp->sq.max_sges; 1510 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 1511 qp_attr->cap.max_inline_data = qp->max_inline_data; 1512 qp_init_attr->cap = qp_attr->cap; 1513 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 1514 1515 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, 1516 params.rnt_rc_sl_fl & 1517 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK, 1518 qp->sgid_idx, 1519 (params.hop_lmt_rq_psn & 1520 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> 1521 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT, 1522 (params.tclass_sq_psn & 1523 OCRDMA_QP_PARAMS_TCLASS_MASK) >> 1524 OCRDMA_QP_PARAMS_TCLASS_SHIFT); 1525 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid[0]); 1526 1527 rdma_ah_set_port_num(&qp_attr->ah_attr, 1); 1528 rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl & 1529 OCRDMA_QP_PARAMS_SL_MASK) >> 1530 OCRDMA_QP_PARAMS_SL_SHIFT); 1531 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & 1532 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> 1533 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; 1534 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & 1535 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> 1536 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; 1537 qp_attr->retry_cnt = 1538 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> 1539 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; 1540 qp_attr->min_rnr_timer = 0; 1541 qp_attr->pkey_index = 0; 1542 qp_attr->port_num = 1; 1543 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0); 1544 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0); 1545 qp_attr->alt_pkey_index = 0; 1546 qp_attr->alt_port_num = 0; 1547 qp_attr->alt_timeout = 0; 1548 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1549 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1550 OCRDMA_QP_PARAMS_STATE_SHIFT; 1551 qp_attr->qp_state = get_ibqp_state(qp_state); 1552 qp_attr->cur_qp_state = qp_attr->qp_state; 1553 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1554 qp_attr->max_dest_rd_atomic = 1555 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1556 qp_attr->max_rd_atomic = 1557 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1558 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1559 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1560 /* Sync driver QP state with FW */ 1561 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); 1562 mbx_err: 1563 return status; 1564 } 1565 1566 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) 1567 { 1568 unsigned int i = idx / 32; 1569 u32 mask = (1U << (idx % 32)); 1570 1571 srq->idx_bit_fields[i] ^= mask; 1572 } 1573 1574 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1575 { 1576 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; 1577 } 1578 1579 static int is_hw_sq_empty(struct ocrdma_qp *qp) 1580 { 1581 return (qp->sq.tail == qp->sq.head); 1582 } 1583 1584 static int is_hw_rq_empty(struct ocrdma_qp *qp) 1585 { 1586 return (qp->rq.tail == qp->rq.head); 1587 } 1588 1589 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) 1590 { 1591 return q->va + (q->head * q->entry_size); 1592 } 1593 1594 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, 1595 u32 idx) 1596 { 1597 return q->va + (idx * q->entry_size); 1598 } 1599 1600 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) 1601 { 1602 q->head = (q->head + 1) & q->max_wqe_idx; 1603 } 1604 1605 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) 1606 { 1607 q->tail = (q->tail + 1) & q->max_wqe_idx; 1608 } 1609 1610 /* discard the cqe for a given QP */ 1611 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) 1612 { 1613 unsigned long cq_flags; 1614 unsigned long flags; 1615 int discard_cnt = 0; 1616 u32 cur_getp, stop_getp; 1617 struct ocrdma_cqe *cqe; 1618 u32 qpn = 0, wqe_idx = 0; 1619 1620 spin_lock_irqsave(&cq->cq_lock, cq_flags); 1621 1622 /* traverse through the CQEs in the hw CQ, 1623 * find the matching CQE for a given qp, 1624 * mark the matching one discarded by clearing qpn. 1625 * ring the doorbell in the poll_cq() as 1626 * we don't complete out of order cqe. 1627 */ 1628 1629 cur_getp = cq->getp; 1630 /* find upto when do we reap the cq. */ 1631 stop_getp = cur_getp; 1632 do { 1633 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) 1634 break; 1635 1636 cqe = cq->va + cur_getp; 1637 /* if (a) done reaping whole hw cq, or 1638 * (b) qp_xq becomes empty. 1639 * then exit 1640 */ 1641 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; 1642 /* if previously discarded cqe found, skip that too. */ 1643 /* check for matching qp */ 1644 if (qpn == 0 || qpn != qp->id) 1645 goto skip_cqe; 1646 1647 if (is_cqe_for_sq(cqe)) { 1648 ocrdma_hwq_inc_tail(&qp->sq); 1649 } else { 1650 if (qp->srq) { 1651 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 1652 OCRDMA_CQE_BUFTAG_SHIFT) & 1653 qp->srq->rq.max_wqe_idx; 1654 BUG_ON(wqe_idx < 1); 1655 spin_lock_irqsave(&qp->srq->q_lock, flags); 1656 ocrdma_hwq_inc_tail(&qp->srq->rq); 1657 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); 1658 spin_unlock_irqrestore(&qp->srq->q_lock, flags); 1659 1660 } else { 1661 ocrdma_hwq_inc_tail(&qp->rq); 1662 } 1663 } 1664 /* mark cqe discarded so that it is not picked up later 1665 * in the poll_cq(). 1666 */ 1667 discard_cnt += 1; 1668 cqe->cmn.qpn = 0; 1669 skip_cqe: 1670 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 1671 } while (cur_getp != stop_getp); 1672 spin_unlock_irqrestore(&cq->cq_lock, cq_flags); 1673 } 1674 1675 void ocrdma_del_flush_qp(struct ocrdma_qp *qp) 1676 { 1677 int found = false; 1678 unsigned long flags; 1679 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 1680 /* sync with any active CQ poll */ 1681 1682 spin_lock_irqsave(&dev->flush_q_lock, flags); 1683 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 1684 if (found) 1685 list_del(&qp->sq_entry); 1686 if (!qp->srq) { 1687 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); 1688 if (found) 1689 list_del(&qp->rq_entry); 1690 } 1691 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 1692 } 1693 1694 int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 1695 { 1696 struct ocrdma_pd *pd; 1697 struct ocrdma_qp *qp; 1698 struct ocrdma_dev *dev; 1699 struct ib_qp_attr attrs; 1700 int attr_mask; 1701 unsigned long flags; 1702 1703 qp = get_ocrdma_qp(ibqp); 1704 dev = get_ocrdma_dev(ibqp->device); 1705 1706 pd = qp->pd; 1707 1708 /* change the QP state to ERROR */ 1709 if (qp->state != OCRDMA_QPS_RST) { 1710 attrs.qp_state = IB_QPS_ERR; 1711 attr_mask = IB_QP_STATE; 1712 _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1713 } 1714 /* ensure that CQEs for newly created QP (whose id may be same with 1715 * one which just getting destroyed are same), dont get 1716 * discarded until the old CQEs are discarded. 1717 */ 1718 mutex_lock(&dev->dev_lock); 1719 (void) ocrdma_mbx_destroy_qp(dev, qp); 1720 1721 /* 1722 * acquire CQ lock while destroy is in progress, in order to 1723 * protect against proessing in-flight CQEs for this QP. 1724 */ 1725 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); 1726 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) { 1727 spin_lock(&qp->rq_cq->cq_lock); 1728 ocrdma_del_qpn_map(dev, qp); 1729 spin_unlock(&qp->rq_cq->cq_lock); 1730 } else { 1731 ocrdma_del_qpn_map(dev, qp); 1732 } 1733 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); 1734 1735 if (!pd->uctx) { 1736 ocrdma_discard_cqes(qp, qp->sq_cq); 1737 ocrdma_discard_cqes(qp, qp->rq_cq); 1738 } 1739 mutex_unlock(&dev->dev_lock); 1740 1741 if (pd->uctx) { 1742 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, 1743 PAGE_ALIGN(qp->sq.len)); 1744 if (!qp->srq) 1745 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, 1746 PAGE_ALIGN(qp->rq.len)); 1747 } 1748 1749 ocrdma_del_flush_qp(qp); 1750 1751 kfree(qp->wqe_wr_id_tbl); 1752 kfree(qp->rqe_wr_id_tbl); 1753 kfree(qp); 1754 return 0; 1755 } 1756 1757 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, 1758 struct ib_udata *udata) 1759 { 1760 int status; 1761 struct ocrdma_create_srq_uresp uresp; 1762 1763 memset(&uresp, 0, sizeof(uresp)); 1764 uresp.rq_dbid = srq->rq.dbid; 1765 uresp.num_rq_pages = 1; 1766 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va); 1767 uresp.rq_page_size = srq->rq.len; 1768 uresp.db_page_addr = dev->nic_info.unmapped_db + 1769 (srq->pd->id * dev->nic_info.db_page_size); 1770 uresp.db_page_size = dev->nic_info.db_page_size; 1771 uresp.num_rqe_allocated = srq->rq.max_cnt; 1772 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1773 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1774 uresp.db_shift = 24; 1775 } else { 1776 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 1777 uresp.db_shift = 16; 1778 } 1779 1780 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1781 if (status) 1782 return status; 1783 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], 1784 uresp.rq_page_size); 1785 if (status) 1786 return status; 1787 return status; 1788 } 1789 1790 int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, 1791 struct ib_udata *udata) 1792 { 1793 int status; 1794 struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd); 1795 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); 1796 struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq); 1797 1798 if (init_attr->attr.max_sge > dev->attr.max_recv_sge) 1799 return -EINVAL; 1800 if (init_attr->attr.max_wr > dev->attr.max_rqe) 1801 return -EINVAL; 1802 1803 spin_lock_init(&srq->q_lock); 1804 srq->pd = pd; 1805 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); 1806 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); 1807 if (status) 1808 return status; 1809 1810 if (!udata) { 1811 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64), 1812 GFP_KERNEL); 1813 if (!srq->rqe_wr_id_tbl) { 1814 status = -ENOMEM; 1815 goto arm_err; 1816 } 1817 1818 srq->bit_fields_len = (srq->rq.max_cnt / 32) + 1819 (srq->rq.max_cnt % 32 ? 1 : 0); 1820 srq->idx_bit_fields = 1821 kmalloc_array(srq->bit_fields_len, sizeof(u32), 1822 GFP_KERNEL); 1823 if (!srq->idx_bit_fields) { 1824 status = -ENOMEM; 1825 goto arm_err; 1826 } 1827 memset(srq->idx_bit_fields, 0xff, 1828 srq->bit_fields_len * sizeof(u32)); 1829 } 1830 1831 if (init_attr->attr.srq_limit) { 1832 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); 1833 if (status) 1834 goto arm_err; 1835 } 1836 1837 if (udata) { 1838 status = ocrdma_copy_srq_uresp(dev, srq, udata); 1839 if (status) 1840 goto arm_err; 1841 } 1842 1843 return 0; 1844 1845 arm_err: 1846 ocrdma_mbx_destroy_srq(dev, srq); 1847 kfree(srq->rqe_wr_id_tbl); 1848 kfree(srq->idx_bit_fields); 1849 return status; 1850 } 1851 1852 int ocrdma_modify_srq(struct ib_srq *ibsrq, 1853 struct ib_srq_attr *srq_attr, 1854 enum ib_srq_attr_mask srq_attr_mask, 1855 struct ib_udata *udata) 1856 { 1857 int status; 1858 struct ocrdma_srq *srq; 1859 1860 srq = get_ocrdma_srq(ibsrq); 1861 if (srq_attr_mask & IB_SRQ_MAX_WR) 1862 status = -EINVAL; 1863 else 1864 status = ocrdma_mbx_modify_srq(srq, srq_attr); 1865 return status; 1866 } 1867 1868 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 1869 { 1870 int status; 1871 struct ocrdma_srq *srq; 1872 1873 srq = get_ocrdma_srq(ibsrq); 1874 status = ocrdma_mbx_query_srq(srq, srq_attr); 1875 return status; 1876 } 1877 1878 void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) 1879 { 1880 struct ocrdma_srq *srq; 1881 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); 1882 1883 srq = get_ocrdma_srq(ibsrq); 1884 1885 ocrdma_mbx_destroy_srq(dev, srq); 1886 1887 if (srq->pd->uctx) 1888 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, 1889 PAGE_ALIGN(srq->rq.len)); 1890 1891 kfree(srq->idx_bit_fields); 1892 kfree(srq->rqe_wr_id_tbl); 1893 } 1894 1895 /* unprivileged verbs and their support functions. */ 1896 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, 1897 struct ocrdma_hdr_wqe *hdr, 1898 const struct ib_send_wr *wr) 1899 { 1900 struct ocrdma_ewqe_ud_hdr *ud_hdr = 1901 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); 1902 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah); 1903 1904 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn; 1905 if (qp->qp_type == IB_QPT_GSI) 1906 ud_hdr->qkey = qp->qkey; 1907 else 1908 ud_hdr->qkey = ud_wr(wr)->remote_qkey; 1909 ud_hdr->rsvd_ahid = ah->id; 1910 ud_hdr->hdr_type = ah->hdr_type; 1911 if (ah->av->valid & OCRDMA_AV_VLAN_VALID) 1912 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); 1913 } 1914 1915 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1916 struct ocrdma_sge *sge, int num_sge, 1917 struct ib_sge *sg_list) 1918 { 1919 int i; 1920 1921 for (i = 0; i < num_sge; i++) { 1922 sge[i].lrkey = sg_list[i].lkey; 1923 sge[i].addr_lo = sg_list[i].addr; 1924 sge[i].addr_hi = upper_32_bits(sg_list[i].addr); 1925 sge[i].len = sg_list[i].length; 1926 hdr->total_len += sg_list[i].length; 1927 } 1928 if (num_sge == 0) 1929 memset(sge, 0, sizeof(*sge)); 1930 } 1931 1932 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) 1933 { 1934 uint32_t total_len = 0, i; 1935 1936 for (i = 0; i < num_sge; i++) 1937 total_len += sg_list[i].length; 1938 return total_len; 1939 } 1940 1941 1942 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, 1943 struct ocrdma_hdr_wqe *hdr, 1944 struct ocrdma_sge *sge, 1945 const struct ib_send_wr *wr, u32 wqe_size) 1946 { 1947 int i; 1948 char *dpp_addr; 1949 1950 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { 1951 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); 1952 if (unlikely(hdr->total_len > qp->max_inline_data)) { 1953 pr_err("%s() supported_len=0x%x,\n" 1954 " unsupported len req=0x%x\n", __func__, 1955 qp->max_inline_data, hdr->total_len); 1956 return -EINVAL; 1957 } 1958 dpp_addr = (char *)sge; 1959 for (i = 0; i < wr->num_sge; i++) { 1960 memcpy(dpp_addr, 1961 (void *)(unsigned long)wr->sg_list[i].addr, 1962 wr->sg_list[i].length); 1963 dpp_addr += wr->sg_list[i].length; 1964 } 1965 1966 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); 1967 if (0 == hdr->total_len) 1968 wqe_size += sizeof(struct ocrdma_sge); 1969 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); 1970 } else { 1971 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 1972 if (wr->num_sge) 1973 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); 1974 else 1975 wqe_size += sizeof(struct ocrdma_sge); 1976 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 1977 } 1978 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 1979 return 0; 1980 } 1981 1982 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1983 const struct ib_send_wr *wr) 1984 { 1985 int status; 1986 struct ocrdma_sge *sge; 1987 u32 wqe_size = sizeof(*hdr); 1988 1989 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 1990 ocrdma_build_ud_hdr(qp, hdr, wr); 1991 sge = (struct ocrdma_sge *)(hdr + 2); 1992 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); 1993 } else { 1994 sge = (struct ocrdma_sge *)(hdr + 1); 1995 } 1996 1997 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1998 return status; 1999 } 2000 2001 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 2002 const struct ib_send_wr *wr) 2003 { 2004 int status; 2005 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 2006 struct ocrdma_sge *sge = ext_rw + 1; 2007 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); 2008 2009 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 2010 if (status) 2011 return status; 2012 ext_rw->addr_lo = rdma_wr(wr)->remote_addr; 2013 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr); 2014 ext_rw->lrkey = rdma_wr(wr)->rkey; 2015 ext_rw->len = hdr->total_len; 2016 return 0; 2017 } 2018 2019 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 2020 const struct ib_send_wr *wr) 2021 { 2022 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 2023 struct ocrdma_sge *sge = ext_rw + 1; 2024 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + 2025 sizeof(struct ocrdma_hdr_wqe); 2026 2027 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 2028 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2029 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); 2030 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2031 2032 ext_rw->addr_lo = rdma_wr(wr)->remote_addr; 2033 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr); 2034 ext_rw->lrkey = rdma_wr(wr)->rkey; 2035 ext_rw->len = hdr->total_len; 2036 } 2037 2038 static int get_encoded_page_size(int pg_sz) 2039 { 2040 /* Max size is 256M 4096 << 16 */ 2041 int i = 0; 2042 for (; i < 17; i++) 2043 if (pg_sz == (4096 << i)) 2044 break; 2045 return i; 2046 } 2047 2048 static int ocrdma_build_reg(struct ocrdma_qp *qp, 2049 struct ocrdma_hdr_wqe *hdr, 2050 const struct ib_reg_wr *wr) 2051 { 2052 u64 fbo; 2053 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2054 struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr); 2055 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; 2056 struct ocrdma_pbe *pbe; 2057 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2058 int num_pbes = 0, i; 2059 2060 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2061 2062 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2063 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2064 2065 if (wr->access & IB_ACCESS_LOCAL_WRITE) 2066 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; 2067 if (wr->access & IB_ACCESS_REMOTE_WRITE) 2068 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; 2069 if (wr->access & IB_ACCESS_REMOTE_READ) 2070 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; 2071 hdr->lkey = wr->key; 2072 hdr->total_len = mr->ibmr.length; 2073 2074 fbo = mr->ibmr.iova - mr->pages[0]; 2075 2076 fast_reg->va_hi = upper_32_bits(mr->ibmr.iova); 2077 fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff); 2078 fast_reg->fbo_hi = upper_32_bits(fbo); 2079 fast_reg->fbo_lo = (u32) fbo & 0xffffffff; 2080 fast_reg->num_sges = mr->npages; 2081 fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size); 2082 2083 pbe = pbl_tbl->va; 2084 for (i = 0; i < mr->npages; i++) { 2085 u64 buf_addr = mr->pages[i]; 2086 2087 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); 2088 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); 2089 num_pbes += 1; 2090 pbe++; 2091 2092 /* if the pbl is full storing the pbes, 2093 * move to next pbl. 2094 */ 2095 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) { 2096 pbl_tbl++; 2097 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 2098 } 2099 } 2100 2101 return 0; 2102 } 2103 2104 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) 2105 { 2106 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); 2107 2108 iowrite32(val, qp->sq_db); 2109 } 2110 2111 int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 2112 const struct ib_send_wr **bad_wr) 2113 { 2114 int status = 0; 2115 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 2116 struct ocrdma_hdr_wqe *hdr; 2117 unsigned long flags; 2118 2119 spin_lock_irqsave(&qp->q_lock, flags); 2120 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { 2121 spin_unlock_irqrestore(&qp->q_lock, flags); 2122 *bad_wr = wr; 2123 return -EINVAL; 2124 } 2125 2126 while (wr) { 2127 if (qp->qp_type == IB_QPT_UD && 2128 (wr->opcode != IB_WR_SEND && 2129 wr->opcode != IB_WR_SEND_WITH_IMM)) { 2130 *bad_wr = wr; 2131 status = -EINVAL; 2132 break; 2133 } 2134 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || 2135 wr->num_sge > qp->sq.max_sges) { 2136 *bad_wr = wr; 2137 status = -ENOMEM; 2138 break; 2139 } 2140 hdr = ocrdma_hwq_head(&qp->sq); 2141 hdr->cw = 0; 2142 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) 2143 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 2144 if (wr->send_flags & IB_SEND_FENCE) 2145 hdr->cw |= 2146 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); 2147 if (wr->send_flags & IB_SEND_SOLICITED) 2148 hdr->cw |= 2149 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); 2150 hdr->total_len = 0; 2151 switch (wr->opcode) { 2152 case IB_WR_SEND_WITH_IMM: 2153 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 2154 hdr->immdt = ntohl(wr->ex.imm_data); 2155 /* fall through */ 2156 case IB_WR_SEND: 2157 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 2158 ocrdma_build_send(qp, hdr, wr); 2159 break; 2160 case IB_WR_SEND_WITH_INV: 2161 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); 2162 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 2163 hdr->lkey = wr->ex.invalidate_rkey; 2164 status = ocrdma_build_send(qp, hdr, wr); 2165 break; 2166 case IB_WR_RDMA_WRITE_WITH_IMM: 2167 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 2168 hdr->immdt = ntohl(wr->ex.imm_data); 2169 /* fall through */ 2170 case IB_WR_RDMA_WRITE: 2171 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2172 status = ocrdma_build_write(qp, hdr, wr); 2173 break; 2174 case IB_WR_RDMA_READ: 2175 ocrdma_build_read(qp, hdr, wr); 2176 break; 2177 case IB_WR_LOCAL_INV: 2178 hdr->cw |= 2179 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); 2180 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) + 2181 sizeof(struct ocrdma_sge)) / 2182 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; 2183 hdr->lkey = wr->ex.invalidate_rkey; 2184 break; 2185 case IB_WR_REG_MR: 2186 status = ocrdma_build_reg(qp, hdr, reg_wr(wr)); 2187 break; 2188 default: 2189 status = -EINVAL; 2190 break; 2191 } 2192 if (status) { 2193 *bad_wr = wr; 2194 break; 2195 } 2196 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) 2197 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; 2198 else 2199 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; 2200 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; 2201 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & 2202 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); 2203 /* make sure wqe is written before adapter can access it */ 2204 wmb(); 2205 /* inform hw to start processing it */ 2206 ocrdma_ring_sq_db(qp); 2207 2208 /* update pointer, counter for next wr */ 2209 ocrdma_hwq_inc_head(&qp->sq); 2210 wr = wr->next; 2211 } 2212 spin_unlock_irqrestore(&qp->q_lock, flags); 2213 return status; 2214 } 2215 2216 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) 2217 { 2218 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); 2219 2220 iowrite32(val, qp->rq_db); 2221 } 2222 2223 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, 2224 const struct ib_recv_wr *wr, u16 tag) 2225 { 2226 u32 wqe_size = 0; 2227 struct ocrdma_sge *sge; 2228 if (wr->num_sge) 2229 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); 2230 else 2231 wqe_size = sizeof(*sge) + sizeof(*rqe); 2232 2233 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << 2234 OCRDMA_WQE_SIZE_SHIFT); 2235 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 2236 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2237 rqe->total_len = 0; 2238 rqe->rsvd_tag = tag; 2239 sge = (struct ocrdma_sge *)(rqe + 1); 2240 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); 2241 ocrdma_cpu_to_le32(rqe, wqe_size); 2242 } 2243 2244 int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 2245 const struct ib_recv_wr **bad_wr) 2246 { 2247 int status = 0; 2248 unsigned long flags; 2249 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 2250 struct ocrdma_hdr_wqe *rqe; 2251 2252 spin_lock_irqsave(&qp->q_lock, flags); 2253 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { 2254 spin_unlock_irqrestore(&qp->q_lock, flags); 2255 *bad_wr = wr; 2256 return -EINVAL; 2257 } 2258 while (wr) { 2259 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || 2260 wr->num_sge > qp->rq.max_sges) { 2261 *bad_wr = wr; 2262 status = -ENOMEM; 2263 break; 2264 } 2265 rqe = ocrdma_hwq_head(&qp->rq); 2266 ocrdma_build_rqe(rqe, wr, 0); 2267 2268 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; 2269 /* make sure rqe is written before adapter can access it */ 2270 wmb(); 2271 2272 /* inform hw to start processing it */ 2273 ocrdma_ring_rq_db(qp); 2274 2275 /* update pointer, counter for next wr */ 2276 ocrdma_hwq_inc_head(&qp->rq); 2277 wr = wr->next; 2278 } 2279 spin_unlock_irqrestore(&qp->q_lock, flags); 2280 return status; 2281 } 2282 2283 /* cqe for srq's rqe can potentially arrive out of order. 2284 * index gives the entry in the shadow table where to store 2285 * the wr_id. tag/index is returned in cqe to reference back 2286 * for a given rqe. 2287 */ 2288 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) 2289 { 2290 int row = 0; 2291 int indx = 0; 2292 2293 for (row = 0; row < srq->bit_fields_len; row++) { 2294 if (srq->idx_bit_fields[row]) { 2295 indx = ffs(srq->idx_bit_fields[row]); 2296 indx = (row * 32) + (indx - 1); 2297 BUG_ON(indx >= srq->rq.max_cnt); 2298 ocrdma_srq_toggle_bit(srq, indx); 2299 break; 2300 } 2301 } 2302 2303 BUG_ON(row == srq->bit_fields_len); 2304 return indx + 1; /* Use from index 1 */ 2305 } 2306 2307 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) 2308 { 2309 u32 val = srq->rq.dbid | (1 << 16); 2310 2311 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); 2312 } 2313 2314 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 2315 const struct ib_recv_wr **bad_wr) 2316 { 2317 int status = 0; 2318 unsigned long flags; 2319 struct ocrdma_srq *srq; 2320 struct ocrdma_hdr_wqe *rqe; 2321 u16 tag; 2322 2323 srq = get_ocrdma_srq(ibsrq); 2324 2325 spin_lock_irqsave(&srq->q_lock, flags); 2326 while (wr) { 2327 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || 2328 wr->num_sge > srq->rq.max_sges) { 2329 status = -ENOMEM; 2330 *bad_wr = wr; 2331 break; 2332 } 2333 tag = ocrdma_srq_get_idx(srq); 2334 rqe = ocrdma_hwq_head(&srq->rq); 2335 ocrdma_build_rqe(rqe, wr, tag); 2336 2337 srq->rqe_wr_id_tbl[tag] = wr->wr_id; 2338 /* make sure rqe is written before adapter can perform DMA */ 2339 wmb(); 2340 /* inform hw to start processing it */ 2341 ocrdma_ring_srq_db(srq); 2342 /* update pointer, counter for next wr */ 2343 ocrdma_hwq_inc_head(&srq->rq); 2344 wr = wr->next; 2345 } 2346 spin_unlock_irqrestore(&srq->q_lock, flags); 2347 return status; 2348 } 2349 2350 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) 2351 { 2352 enum ib_wc_status ibwc_status; 2353 2354 switch (status) { 2355 case OCRDMA_CQE_GENERAL_ERR: 2356 ibwc_status = IB_WC_GENERAL_ERR; 2357 break; 2358 case OCRDMA_CQE_LOC_LEN_ERR: 2359 ibwc_status = IB_WC_LOC_LEN_ERR; 2360 break; 2361 case OCRDMA_CQE_LOC_QP_OP_ERR: 2362 ibwc_status = IB_WC_LOC_QP_OP_ERR; 2363 break; 2364 case OCRDMA_CQE_LOC_EEC_OP_ERR: 2365 ibwc_status = IB_WC_LOC_EEC_OP_ERR; 2366 break; 2367 case OCRDMA_CQE_LOC_PROT_ERR: 2368 ibwc_status = IB_WC_LOC_PROT_ERR; 2369 break; 2370 case OCRDMA_CQE_WR_FLUSH_ERR: 2371 ibwc_status = IB_WC_WR_FLUSH_ERR; 2372 break; 2373 case OCRDMA_CQE_MW_BIND_ERR: 2374 ibwc_status = IB_WC_MW_BIND_ERR; 2375 break; 2376 case OCRDMA_CQE_BAD_RESP_ERR: 2377 ibwc_status = IB_WC_BAD_RESP_ERR; 2378 break; 2379 case OCRDMA_CQE_LOC_ACCESS_ERR: 2380 ibwc_status = IB_WC_LOC_ACCESS_ERR; 2381 break; 2382 case OCRDMA_CQE_REM_INV_REQ_ERR: 2383 ibwc_status = IB_WC_REM_INV_REQ_ERR; 2384 break; 2385 case OCRDMA_CQE_REM_ACCESS_ERR: 2386 ibwc_status = IB_WC_REM_ACCESS_ERR; 2387 break; 2388 case OCRDMA_CQE_REM_OP_ERR: 2389 ibwc_status = IB_WC_REM_OP_ERR; 2390 break; 2391 case OCRDMA_CQE_RETRY_EXC_ERR: 2392 ibwc_status = IB_WC_RETRY_EXC_ERR; 2393 break; 2394 case OCRDMA_CQE_RNR_RETRY_EXC_ERR: 2395 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; 2396 break; 2397 case OCRDMA_CQE_LOC_RDD_VIOL_ERR: 2398 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; 2399 break; 2400 case OCRDMA_CQE_REM_INV_RD_REQ_ERR: 2401 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; 2402 break; 2403 case OCRDMA_CQE_REM_ABORT_ERR: 2404 ibwc_status = IB_WC_REM_ABORT_ERR; 2405 break; 2406 case OCRDMA_CQE_INV_EECN_ERR: 2407 ibwc_status = IB_WC_INV_EECN_ERR; 2408 break; 2409 case OCRDMA_CQE_INV_EEC_STATE_ERR: 2410 ibwc_status = IB_WC_INV_EEC_STATE_ERR; 2411 break; 2412 case OCRDMA_CQE_FATAL_ERR: 2413 ibwc_status = IB_WC_FATAL_ERR; 2414 break; 2415 case OCRDMA_CQE_RESP_TIMEOUT_ERR: 2416 ibwc_status = IB_WC_RESP_TIMEOUT_ERR; 2417 break; 2418 default: 2419 ibwc_status = IB_WC_GENERAL_ERR; 2420 break; 2421 } 2422 return ibwc_status; 2423 } 2424 2425 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, 2426 u32 wqe_idx) 2427 { 2428 struct ocrdma_hdr_wqe *hdr; 2429 struct ocrdma_sge *rw; 2430 int opcode; 2431 2432 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); 2433 2434 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; 2435 /* Undo the hdr->cw swap */ 2436 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; 2437 switch (opcode) { 2438 case OCRDMA_WRITE: 2439 ibwc->opcode = IB_WC_RDMA_WRITE; 2440 break; 2441 case OCRDMA_READ: 2442 rw = (struct ocrdma_sge *)(hdr + 1); 2443 ibwc->opcode = IB_WC_RDMA_READ; 2444 ibwc->byte_len = rw->len; 2445 break; 2446 case OCRDMA_SEND: 2447 ibwc->opcode = IB_WC_SEND; 2448 break; 2449 case OCRDMA_FR_MR: 2450 ibwc->opcode = IB_WC_REG_MR; 2451 break; 2452 case OCRDMA_LKEY_INV: 2453 ibwc->opcode = IB_WC_LOCAL_INV; 2454 break; 2455 default: 2456 ibwc->status = IB_WC_GENERAL_ERR; 2457 pr_err("%s() invalid opcode received = 0x%x\n", 2458 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2459 break; 2460 } 2461 } 2462 2463 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2464 struct ocrdma_cqe *cqe) 2465 { 2466 if (is_cqe_for_sq(cqe)) { 2467 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2468 cqe->flags_status_srcqpn) & 2469 ~OCRDMA_CQE_STATUS_MASK); 2470 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2471 cqe->flags_status_srcqpn) | 2472 (OCRDMA_CQE_WR_FLUSH_ERR << 2473 OCRDMA_CQE_STATUS_SHIFT)); 2474 } else { 2475 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2476 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2477 cqe->flags_status_srcqpn) & 2478 ~OCRDMA_CQE_UD_STATUS_MASK); 2479 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2480 cqe->flags_status_srcqpn) | 2481 (OCRDMA_CQE_WR_FLUSH_ERR << 2482 OCRDMA_CQE_UD_STATUS_SHIFT)); 2483 } else { 2484 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2485 cqe->flags_status_srcqpn) & 2486 ~OCRDMA_CQE_STATUS_MASK); 2487 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2488 cqe->flags_status_srcqpn) | 2489 (OCRDMA_CQE_WR_FLUSH_ERR << 2490 OCRDMA_CQE_STATUS_SHIFT)); 2491 } 2492 } 2493 } 2494 2495 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2496 struct ocrdma_qp *qp, int status) 2497 { 2498 bool expand = false; 2499 2500 ibwc->byte_len = 0; 2501 ibwc->qp = &qp->ibqp; 2502 ibwc->status = ocrdma_to_ibwc_err(status); 2503 2504 ocrdma_flush_qp(qp); 2505 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); 2506 2507 /* if wqe/rqe pending for which cqe needs to be returned, 2508 * trigger inflating it. 2509 */ 2510 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { 2511 expand = true; 2512 ocrdma_set_cqe_status_flushed(qp, cqe); 2513 } 2514 return expand; 2515 } 2516 2517 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2518 struct ocrdma_qp *qp, int status) 2519 { 2520 ibwc->opcode = IB_WC_RECV; 2521 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2522 ocrdma_hwq_inc_tail(&qp->rq); 2523 2524 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2525 } 2526 2527 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2528 struct ocrdma_qp *qp, int status) 2529 { 2530 ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2531 ocrdma_hwq_inc_tail(&qp->sq); 2532 2533 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2534 } 2535 2536 2537 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, 2538 struct ocrdma_cqe *cqe, struct ib_wc *ibwc, 2539 bool *polled, bool *stop) 2540 { 2541 bool expand; 2542 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2543 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2544 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2545 if (status < OCRDMA_MAX_CQE_ERR) 2546 atomic_inc(&dev->cqe_err_stats[status]); 2547 2548 /* when hw sq is empty, but rq is not empty, so we continue 2549 * to keep the cqe in order to get the cq event again. 2550 */ 2551 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { 2552 /* when cq for rq and sq is same, it is safe to return 2553 * flush cqe for RQEs. 2554 */ 2555 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2556 *polled = true; 2557 status = OCRDMA_CQE_WR_FLUSH_ERR; 2558 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2559 } else { 2560 /* stop processing further cqe as this cqe is used for 2561 * triggering cq event on buddy cq of RQ. 2562 * When QP is destroyed, this cqe will be removed 2563 * from the cq's hardware q. 2564 */ 2565 *polled = false; 2566 *stop = true; 2567 expand = false; 2568 } 2569 } else if (is_hw_sq_empty(qp)) { 2570 /* Do nothing */ 2571 expand = false; 2572 *polled = false; 2573 *stop = false; 2574 } else { 2575 *polled = true; 2576 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2577 } 2578 return expand; 2579 } 2580 2581 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, 2582 struct ocrdma_cqe *cqe, 2583 struct ib_wc *ibwc, bool *polled) 2584 { 2585 bool expand = false; 2586 int tail = qp->sq.tail; 2587 u32 wqe_idx; 2588 2589 if (!qp->wqe_wr_id_tbl[tail].signaled) { 2590 *polled = false; /* WC cannot be consumed yet */ 2591 } else { 2592 ibwc->status = IB_WC_SUCCESS; 2593 ibwc->wc_flags = 0; 2594 ibwc->qp = &qp->ibqp; 2595 ocrdma_update_wc(qp, ibwc, tail); 2596 *polled = true; 2597 } 2598 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & 2599 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; 2600 if (tail != wqe_idx) 2601 expand = true; /* Coalesced CQE can't be consumed yet */ 2602 2603 ocrdma_hwq_inc_tail(&qp->sq); 2604 return expand; 2605 } 2606 2607 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2608 struct ib_wc *ibwc, bool *polled, bool *stop) 2609 { 2610 int status; 2611 bool expand; 2612 2613 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2614 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2615 2616 if (status == OCRDMA_CQE_SUCCESS) 2617 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); 2618 else 2619 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); 2620 return expand; 2621 } 2622 2623 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc, 2624 struct ocrdma_cqe *cqe) 2625 { 2626 int status; 2627 u16 hdr_type = 0; 2628 2629 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2630 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; 2631 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & 2632 OCRDMA_CQE_SRCQP_MASK; 2633 ibwc->pkey_index = 0; 2634 ibwc->wc_flags = IB_WC_GRH; 2635 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2636 OCRDMA_CQE_UD_XFER_LEN_SHIFT) & 2637 OCRDMA_CQE_UD_XFER_LEN_MASK; 2638 2639 if (ocrdma_is_udp_encap_supported(dev)) { 2640 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2641 OCRDMA_CQE_UD_L3TYPE_SHIFT) & 2642 OCRDMA_CQE_UD_L3TYPE_MASK; 2643 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 2644 ibwc->network_hdr_type = hdr_type; 2645 } 2646 2647 return status; 2648 } 2649 2650 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, 2651 struct ocrdma_cqe *cqe, 2652 struct ocrdma_qp *qp) 2653 { 2654 unsigned long flags; 2655 struct ocrdma_srq *srq; 2656 u32 wqe_idx; 2657 2658 srq = get_ocrdma_srq(qp->ibqp.srq); 2659 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 2660 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; 2661 BUG_ON(wqe_idx < 1); 2662 2663 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; 2664 spin_lock_irqsave(&srq->q_lock, flags); 2665 ocrdma_srq_toggle_bit(srq, wqe_idx - 1); 2666 spin_unlock_irqrestore(&srq->q_lock, flags); 2667 ocrdma_hwq_inc_tail(&srq->rq); 2668 } 2669 2670 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2671 struct ib_wc *ibwc, bool *polled, bool *stop, 2672 int status) 2673 { 2674 bool expand; 2675 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2676 2677 if (status < OCRDMA_MAX_CQE_ERR) 2678 atomic_inc(&dev->cqe_err_stats[status]); 2679 2680 /* when hw_rq is empty, but wq is not empty, so continue 2681 * to keep the cqe to get the cq event again. 2682 */ 2683 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { 2684 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2685 *polled = true; 2686 status = OCRDMA_CQE_WR_FLUSH_ERR; 2687 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2688 } else { 2689 *polled = false; 2690 *stop = true; 2691 expand = false; 2692 } 2693 } else if (is_hw_rq_empty(qp)) { 2694 /* Do nothing */ 2695 expand = false; 2696 *polled = false; 2697 *stop = false; 2698 } else { 2699 *polled = true; 2700 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2701 } 2702 return expand; 2703 } 2704 2705 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, 2706 struct ocrdma_cqe *cqe, struct ib_wc *ibwc) 2707 { 2708 struct ocrdma_dev *dev; 2709 2710 dev = get_ocrdma_dev(qp->ibqp.device); 2711 ibwc->opcode = IB_WC_RECV; 2712 ibwc->qp = &qp->ibqp; 2713 ibwc->status = IB_WC_SUCCESS; 2714 2715 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) 2716 ocrdma_update_ud_rcqe(dev, ibwc, cqe); 2717 else 2718 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); 2719 2720 if (is_cqe_imm(cqe)) { 2721 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2722 ibwc->wc_flags |= IB_WC_WITH_IMM; 2723 } else if (is_cqe_wr_imm(cqe)) { 2724 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2725 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2726 ibwc->wc_flags |= IB_WC_WITH_IMM; 2727 } else if (is_cqe_invalidated(cqe)) { 2728 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); 2729 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; 2730 } 2731 if (qp->ibqp.srq) { 2732 ocrdma_update_free_srq_cqe(ibwc, cqe, qp); 2733 } else { 2734 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2735 ocrdma_hwq_inc_tail(&qp->rq); 2736 } 2737 } 2738 2739 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2740 struct ib_wc *ibwc, bool *polled, bool *stop) 2741 { 2742 int status; 2743 bool expand = false; 2744 2745 ibwc->wc_flags = 0; 2746 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2747 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2748 OCRDMA_CQE_UD_STATUS_MASK) >> 2749 OCRDMA_CQE_UD_STATUS_SHIFT; 2750 } else { 2751 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2752 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2753 } 2754 2755 if (status == OCRDMA_CQE_SUCCESS) { 2756 *polled = true; 2757 ocrdma_poll_success_rcqe(qp, cqe, ibwc); 2758 } else { 2759 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, 2760 status); 2761 } 2762 return expand; 2763 } 2764 2765 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, 2766 u16 cur_getp) 2767 { 2768 if (cq->phase_change) { 2769 if (cur_getp == 0) 2770 cq->phase = (~cq->phase & OCRDMA_CQE_VALID); 2771 } else { 2772 /* clear valid bit */ 2773 cqe->flags_status_srcqpn = 0; 2774 } 2775 } 2776 2777 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, 2778 struct ib_wc *ibwc) 2779 { 2780 u16 qpn = 0; 2781 int i = 0; 2782 bool expand = false; 2783 int polled_hw_cqes = 0; 2784 struct ocrdma_qp *qp = NULL; 2785 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); 2786 struct ocrdma_cqe *cqe; 2787 u16 cur_getp; bool polled = false; bool stop = false; 2788 2789 cur_getp = cq->getp; 2790 while (num_entries) { 2791 cqe = cq->va + cur_getp; 2792 /* check whether valid cqe or not */ 2793 if (!is_cqe_valid(cq, cqe)) 2794 break; 2795 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); 2796 /* ignore discarded cqe */ 2797 if (qpn == 0) 2798 goto skip_cqe; 2799 qp = dev->qp_tbl[qpn]; 2800 BUG_ON(qp == NULL); 2801 2802 if (is_cqe_for_sq(cqe)) { 2803 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, 2804 &stop); 2805 } else { 2806 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, 2807 &stop); 2808 } 2809 if (expand) 2810 goto expand_cqe; 2811 if (stop) 2812 goto stop_cqe; 2813 /* clear qpn to avoid duplicate processing by discard_cqe() */ 2814 cqe->cmn.qpn = 0; 2815 skip_cqe: 2816 polled_hw_cqes += 1; 2817 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 2818 ocrdma_change_cq_phase(cq, cqe, cur_getp); 2819 expand_cqe: 2820 if (polled) { 2821 num_entries -= 1; 2822 i += 1; 2823 ibwc = ibwc + 1; 2824 polled = false; 2825 } 2826 } 2827 stop_cqe: 2828 cq->getp = cur_getp; 2829 2830 if (polled_hw_cqes) 2831 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes); 2832 2833 return i; 2834 } 2835 2836 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ 2837 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, 2838 struct ocrdma_qp *qp, struct ib_wc *ibwc) 2839 { 2840 int err_cqes = 0; 2841 2842 while (num_entries) { 2843 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) 2844 break; 2845 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { 2846 ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2847 ocrdma_hwq_inc_tail(&qp->sq); 2848 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { 2849 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2850 ocrdma_hwq_inc_tail(&qp->rq); 2851 } else { 2852 return err_cqes; 2853 } 2854 ibwc->byte_len = 0; 2855 ibwc->status = IB_WC_WR_FLUSH_ERR; 2856 ibwc = ibwc + 1; 2857 err_cqes += 1; 2858 num_entries -= 1; 2859 } 2860 return err_cqes; 2861 } 2862 2863 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 2864 { 2865 int cqes_to_poll = num_entries; 2866 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 2867 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 2868 int num_os_cqe = 0, err_cqes = 0; 2869 struct ocrdma_qp *qp; 2870 unsigned long flags; 2871 2872 /* poll cqes from adapter CQ */ 2873 spin_lock_irqsave(&cq->cq_lock, flags); 2874 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); 2875 spin_unlock_irqrestore(&cq->cq_lock, flags); 2876 cqes_to_poll -= num_os_cqe; 2877 2878 if (cqes_to_poll) { 2879 wc = wc + num_os_cqe; 2880 /* adapter returns single error cqe when qp moves to 2881 * error state. So insert error cqes with wc_status as 2882 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ 2883 * respectively which uses this CQ. 2884 */ 2885 spin_lock_irqsave(&dev->flush_q_lock, flags); 2886 list_for_each_entry(qp, &cq->sq_head, sq_entry) { 2887 if (cqes_to_poll == 0) 2888 break; 2889 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); 2890 cqes_to_poll -= err_cqes; 2891 num_os_cqe += err_cqes; 2892 wc = wc + err_cqes; 2893 } 2894 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 2895 } 2896 return num_os_cqe; 2897 } 2898 2899 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) 2900 { 2901 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 2902 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 2903 u16 cq_id; 2904 unsigned long flags; 2905 bool arm_needed = false, sol_needed = false; 2906 2907 cq_id = cq->id; 2908 2909 spin_lock_irqsave(&cq->cq_lock, flags); 2910 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) 2911 arm_needed = true; 2912 if (cq_flags & IB_CQ_SOLICITED) 2913 sol_needed = true; 2914 2915 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); 2916 spin_unlock_irqrestore(&cq->cq_lock, flags); 2917 2918 return 0; 2919 } 2920 2921 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, 2922 u32 max_num_sg, struct ib_udata *udata) 2923 { 2924 int status; 2925 struct ocrdma_mr *mr; 2926 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 2927 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 2928 2929 if (mr_type != IB_MR_TYPE_MEM_REG) 2930 return ERR_PTR(-EINVAL); 2931 2932 if (max_num_sg > dev->attr.max_pages_per_frmr) 2933 return ERR_PTR(-EINVAL); 2934 2935 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 2936 if (!mr) 2937 return ERR_PTR(-ENOMEM); 2938 2939 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); 2940 if (!mr->pages) { 2941 status = -ENOMEM; 2942 goto pl_err; 2943 } 2944 2945 status = ocrdma_get_pbl_info(dev, mr, max_num_sg); 2946 if (status) 2947 goto pbl_err; 2948 mr->hwmr.fr_mr = 1; 2949 mr->hwmr.remote_rd = 0; 2950 mr->hwmr.remote_wr = 0; 2951 mr->hwmr.local_rd = 0; 2952 mr->hwmr.local_wr = 0; 2953 mr->hwmr.mw_bind = 0; 2954 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 2955 if (status) 2956 goto pbl_err; 2957 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0); 2958 if (status) 2959 goto mbx_err; 2960 mr->ibmr.rkey = mr->hwmr.lkey; 2961 mr->ibmr.lkey = mr->hwmr.lkey; 2962 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = 2963 (unsigned long) mr; 2964 return &mr->ibmr; 2965 mbx_err: 2966 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 2967 pbl_err: 2968 kfree(mr->pages); 2969 pl_err: 2970 kfree(mr); 2971 return ERR_PTR(-ENOMEM); 2972 } 2973 2974 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr) 2975 { 2976 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr); 2977 2978 if (unlikely(mr->npages == mr->hwmr.num_pbes)) 2979 return -ENOMEM; 2980 2981 mr->pages[mr->npages++] = addr; 2982 2983 return 0; 2984 } 2985 2986 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 2987 unsigned int *sg_offset) 2988 { 2989 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr); 2990 2991 mr->npages = 0; 2992 2993 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page); 2994 } 2995