1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: IB Verbs interpreter 37 */ 38 39 #include <linux/interrupt.h> 40 #include <linux/types.h> 41 #include <linux/pci.h> 42 #include <linux/netdevice.h> 43 #include <linux/if_ether.h> 44 #include <net/addrconf.h> 45 46 #include <rdma/ib_verbs.h> 47 #include <rdma/ib_user_verbs.h> 48 #include <rdma/ib_umem.h> 49 #include <rdma/ib_addr.h> 50 #include <rdma/ib_mad.h> 51 #include <rdma/ib_cache.h> 52 #include <rdma/uverbs_ioctl.h> 53 54 #include "bnxt_ulp.h" 55 56 #include "roce_hsi.h" 57 #include "qplib_res.h" 58 #include "qplib_sp.h" 59 #include "qplib_fp.h" 60 #include "qplib_rcfw.h" 61 62 #include "bnxt_re.h" 63 #include "ib_verbs.h" 64 65 #include <rdma/uverbs_types.h> 66 #include <rdma/uverbs_std_types.h> 67 68 #include <rdma/ib_user_ioctl_cmds.h> 69 70 #define UVERBS_MODULE_NAME bnxt_re 71 #include <rdma/uverbs_named_ioctl.h> 72 73 #include <rdma/bnxt_re-abi.h> 74 75 static int __from_ib_access_flags(int iflags) 76 { 77 int qflags = 0; 78 79 if (iflags & IB_ACCESS_LOCAL_WRITE) 80 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 81 if (iflags & IB_ACCESS_REMOTE_READ) 82 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; 83 if (iflags & IB_ACCESS_REMOTE_WRITE) 84 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; 85 if (iflags & IB_ACCESS_REMOTE_ATOMIC) 86 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; 87 if (iflags & IB_ACCESS_MW_BIND) 88 qflags |= BNXT_QPLIB_ACCESS_MW_BIND; 89 if (iflags & IB_ZERO_BASED) 90 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; 91 if (iflags & IB_ACCESS_ON_DEMAND) 92 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; 93 return qflags; 94 }; 95 96 static enum ib_access_flags __to_ib_access_flags(int qflags) 97 { 98 enum ib_access_flags iflags = 0; 99 100 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) 101 iflags |= IB_ACCESS_LOCAL_WRITE; 102 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) 103 iflags |= IB_ACCESS_REMOTE_WRITE; 104 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) 105 iflags |= IB_ACCESS_REMOTE_READ; 106 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) 107 iflags |= IB_ACCESS_REMOTE_ATOMIC; 108 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) 109 iflags |= IB_ACCESS_MW_BIND; 110 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) 111 iflags |= IB_ZERO_BASED; 112 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) 113 iflags |= IB_ACCESS_ON_DEMAND; 114 return iflags; 115 }; 116 117 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, 118 struct bnxt_qplib_sge *sg_list, int num) 119 { 120 int i, total = 0; 121 122 for (i = 0; i < num; i++) { 123 sg_list[i].addr = ib_sg_list[i].addr; 124 sg_list[i].lkey = ib_sg_list[i].lkey; 125 sg_list[i].size = ib_sg_list[i].length; 126 total += sg_list[i].size; 127 } 128 return total; 129 } 130 131 /* Device */ 132 int bnxt_re_query_device(struct ib_device *ibdev, 133 struct ib_device_attr *ib_attr, 134 struct ib_udata *udata) 135 { 136 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 137 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 138 139 memset(ib_attr, 0, sizeof(*ib_attr)); 140 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, 141 min(sizeof(dev_attr->fw_ver), 142 sizeof(ib_attr->fw_ver))); 143 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid, 144 rdev->netdev->dev_addr); 145 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; 146 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED; 147 148 ib_attr->vendor_id = rdev->en_dev->pdev->vendor; 149 ib_attr->vendor_part_id = rdev->en_dev->pdev->device; 150 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device; 151 ib_attr->max_qp = dev_attr->max_qp; 152 ib_attr->max_qp_wr = dev_attr->max_qp_wqes; 153 ib_attr->device_cap_flags = 154 IB_DEVICE_CURR_QP_STATE_MOD 155 | IB_DEVICE_RC_RNR_NAK_GEN 156 | IB_DEVICE_SHUTDOWN_PORT 157 | IB_DEVICE_SYS_IMAGE_GUID 158 | IB_DEVICE_RESIZE_MAX_WR 159 | IB_DEVICE_PORT_ACTIVE_EVENT 160 | IB_DEVICE_N_NOTIFY_CQ 161 | IB_DEVICE_MEM_WINDOW 162 | IB_DEVICE_MEM_WINDOW_TYPE_2B 163 | IB_DEVICE_MEM_MGT_EXTENSIONS; 164 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 165 ib_attr->max_send_sge = dev_attr->max_qp_sges; 166 ib_attr->max_recv_sge = dev_attr->max_qp_sges; 167 ib_attr->max_sge_rd = dev_attr->max_qp_sges; 168 ib_attr->max_cq = dev_attr->max_cq; 169 ib_attr->max_cqe = dev_attr->max_cq_wqes; 170 ib_attr->max_mr = dev_attr->max_mr; 171 ib_attr->max_pd = dev_attr->max_pd; 172 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; 173 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; 174 ib_attr->atomic_cap = IB_ATOMIC_NONE; 175 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE; 176 if (dev_attr->is_atomic) { 177 ib_attr->atomic_cap = IB_ATOMIC_GLOB; 178 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB; 179 } 180 181 ib_attr->max_ee_rd_atom = 0; 182 ib_attr->max_res_rd_atom = 0; 183 ib_attr->max_ee_init_rd_atom = 0; 184 ib_attr->max_ee = 0; 185 ib_attr->max_rdd = 0; 186 ib_attr->max_mw = dev_attr->max_mw; 187 ib_attr->max_raw_ipv6_qp = 0; 188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp; 189 ib_attr->max_mcast_grp = 0; 190 ib_attr->max_mcast_qp_attach = 0; 191 ib_attr->max_total_mcast_qp_attach = 0; 192 ib_attr->max_ah = dev_attr->max_ah; 193 194 ib_attr->max_srq = dev_attr->max_srq; 195 ib_attr->max_srq_wr = dev_attr->max_srq_wqes; 196 ib_attr->max_srq_sge = dev_attr->max_srq_sges; 197 198 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; 199 200 ib_attr->max_pkeys = 1; 201 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; 202 return 0; 203 } 204 205 /* Port */ 206 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, 207 struct ib_port_attr *port_attr) 208 { 209 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 210 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 211 int rc; 212 213 memset(port_attr, 0, sizeof(*port_attr)); 214 215 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { 216 port_attr->state = IB_PORT_ACTIVE; 217 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 218 } else { 219 port_attr->state = IB_PORT_DOWN; 220 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; 221 } 222 port_attr->max_mtu = IB_MTU_4096; 223 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); 224 port_attr->gid_tbl_len = dev_attr->max_sgid; 225 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 226 IB_PORT_DEVICE_MGMT_SUP | 227 IB_PORT_VENDOR_CLASS_SUP; 228 port_attr->ip_gids = true; 229 230 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW; 231 port_attr->bad_pkey_cntr = 0; 232 port_attr->qkey_viol_cntr = 0; 233 port_attr->pkey_tbl_len = dev_attr->max_pkey; 234 port_attr->lid = 0; 235 port_attr->sm_lid = 0; 236 port_attr->lmc = 0; 237 port_attr->max_vl_num = 4; 238 port_attr->sm_sl = 0; 239 port_attr->subnet_timeout = 0; 240 port_attr->init_type_reply = 0; 241 rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed, 242 &port_attr->active_width); 243 244 return rc; 245 } 246 247 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, 248 struct ib_port_immutable *immutable) 249 { 250 struct ib_port_attr port_attr; 251 252 if (bnxt_re_query_port(ibdev, port_num, &port_attr)) 253 return -EINVAL; 254 255 immutable->pkey_tbl_len = port_attr.pkey_tbl_len; 256 immutable->gid_tbl_len = port_attr.gid_tbl_len; 257 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; 258 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 259 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 260 return 0; 261 } 262 263 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) 264 { 265 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 266 267 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", 268 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1], 269 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]); 270 } 271 272 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, 273 u16 index, u16 *pkey) 274 { 275 if (index > 0) 276 return -EINVAL; 277 278 *pkey = IB_DEFAULT_PKEY_FULL; 279 280 return 0; 281 } 282 283 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, 284 int index, union ib_gid *gid) 285 { 286 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 287 int rc; 288 289 /* Ignore port_num */ 290 memset(gid, 0, sizeof(*gid)); 291 rc = bnxt_qplib_get_sgid(&rdev->qplib_res, 292 &rdev->qplib_res.sgid_tbl, index, 293 (struct bnxt_qplib_gid *)gid); 294 return rc; 295 } 296 297 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) 298 { 299 int rc = 0; 300 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 301 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); 302 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 303 struct bnxt_qplib_gid *gid_to_del; 304 u16 vlan_id = 0xFFFF; 305 306 /* Delete the entry from the hardware */ 307 ctx = *context; 308 if (!ctx) 309 return -EINVAL; 310 311 if (sgid_tbl && sgid_tbl->active) { 312 if (ctx->idx >= sgid_tbl->max) 313 return -EINVAL; 314 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; 315 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; 316 /* DEL_GID is called in WQ context(netdevice_event_work_handler) 317 * or via the ib_unregister_device path. In the former case QP1 318 * may not be destroyed yet, in which case just return as FW 319 * needs that entry to be present and will fail it's deletion. 320 * We could get invoked again after QP1 is destroyed OR get an 321 * ADD_GID call with a different GID value for the same index 322 * where we issue MODIFY_GID cmd to update the GID entry -- TBD 323 */ 324 if (ctx->idx == 0 && 325 rdma_link_local_addr((struct in6_addr *)gid_to_del) && 326 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { 327 ibdev_dbg(&rdev->ibdev, 328 "Trying to delete GID0 while QP1 is alive\n"); 329 return -EFAULT; 330 } 331 ctx->refcnt--; 332 if (!ctx->refcnt) { 333 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, 334 vlan_id, true); 335 if (rc) { 336 ibdev_err(&rdev->ibdev, 337 "Failed to remove GID: %#x", rc); 338 } else { 339 ctx_tbl = sgid_tbl->ctx; 340 ctx_tbl[ctx->idx] = NULL; 341 kfree(ctx); 342 } 343 } 344 } else { 345 return -EINVAL; 346 } 347 return rc; 348 } 349 350 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) 351 { 352 int rc; 353 u32 tbl_idx = 0; 354 u16 vlan_id = 0xFFFF; 355 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 356 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); 357 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 358 359 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL); 360 if (rc) 361 return rc; 362 363 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid, 364 rdev->qplib_res.netdev->dev_addr, 365 vlan_id, true, &tbl_idx); 366 if (rc == -EALREADY) { 367 ctx_tbl = sgid_tbl->ctx; 368 ctx_tbl[tbl_idx]->refcnt++; 369 *context = ctx_tbl[tbl_idx]; 370 return 0; 371 } 372 373 if (rc < 0) { 374 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc); 375 return rc; 376 } 377 378 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 379 if (!ctx) 380 return -ENOMEM; 381 ctx_tbl = sgid_tbl->ctx; 382 ctx->idx = tbl_idx; 383 ctx->refcnt = 1; 384 ctx_tbl[tbl_idx] = ctx; 385 *context = ctx; 386 387 return rc; 388 } 389 390 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, 391 u32 port_num) 392 { 393 return IB_LINK_LAYER_ETHERNET; 394 } 395 396 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) 397 398 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) 399 { 400 struct bnxt_re_fence_data *fence = &pd->fence; 401 struct ib_mr *ib_mr = &fence->mr->ib_mr; 402 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; 403 404 memset(wqe, 0, sizeof(*wqe)); 405 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; 406 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; 407 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 408 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 409 wqe->bind.zero_based = false; 410 wqe->bind.parent_l_key = ib_mr->lkey; 411 wqe->bind.va = (u64)(unsigned long)fence->va; 412 wqe->bind.length = fence->size; 413 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); 414 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; 415 416 /* Save the initial rkey in fence structure for now; 417 * wqe->bind.r_key will be set at (re)bind time. 418 */ 419 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); 420 } 421 422 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) 423 { 424 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, 425 qplib_qp); 426 struct ib_pd *ib_pd = qp->ib_qp.pd; 427 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 428 struct bnxt_re_fence_data *fence = &pd->fence; 429 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; 430 struct bnxt_qplib_swqe wqe; 431 int rc; 432 433 memcpy(&wqe, fence_wqe, sizeof(wqe)); 434 wqe.bind.r_key = fence->bind_rkey; 435 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); 436 437 ibdev_dbg(&qp->rdev->ibdev, 438 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", 439 wqe.bind.r_key, qp->qplib_qp.id, pd); 440 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 441 if (rc) { 442 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n"); 443 return rc; 444 } 445 bnxt_qplib_post_send_db(&qp->qplib_qp); 446 447 return rc; 448 } 449 450 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) 451 { 452 struct bnxt_re_fence_data *fence = &pd->fence; 453 struct bnxt_re_dev *rdev = pd->rdev; 454 struct device *dev = &rdev->en_dev->pdev->dev; 455 struct bnxt_re_mr *mr = fence->mr; 456 457 if (fence->mw) { 458 bnxt_re_dealloc_mw(fence->mw); 459 fence->mw = NULL; 460 } 461 if (mr) { 462 if (mr->ib_mr.rkey) 463 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, 464 true); 465 if (mr->ib_mr.lkey) 466 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 467 kfree(mr); 468 fence->mr = NULL; 469 } 470 if (fence->dma_addr) { 471 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, 472 DMA_BIDIRECTIONAL); 473 fence->dma_addr = 0; 474 } 475 } 476 477 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) 478 { 479 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; 480 struct bnxt_re_fence_data *fence = &pd->fence; 481 struct bnxt_re_dev *rdev = pd->rdev; 482 struct device *dev = &rdev->en_dev->pdev->dev; 483 struct bnxt_re_mr *mr = NULL; 484 dma_addr_t dma_addr = 0; 485 struct ib_mw *mw; 486 int rc; 487 488 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, 489 DMA_BIDIRECTIONAL); 490 rc = dma_mapping_error(dev, dma_addr); 491 if (rc) { 492 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n"); 493 rc = -EIO; 494 fence->dma_addr = 0; 495 goto fail; 496 } 497 fence->dma_addr = dma_addr; 498 499 /* Allocate a MR */ 500 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 501 if (!mr) { 502 rc = -ENOMEM; 503 goto fail; 504 } 505 fence->mr = mr; 506 mr->rdev = rdev; 507 mr->qplib_mr.pd = &pd->qplib_pd; 508 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 509 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 510 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 511 if (rc) { 512 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); 513 goto fail; 514 } 515 516 /* Register MR */ 517 mr->ib_mr.lkey = mr->qplib_mr.lkey; 518 mr->qplib_mr.va = (u64)(unsigned long)fence->va; 519 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; 520 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 521 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE); 522 if (rc) { 523 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n"); 524 goto fail; 525 } 526 mr->ib_mr.rkey = mr->qplib_mr.rkey; 527 528 /* Create a fence MW only for kernel consumers */ 529 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); 530 if (IS_ERR(mw)) { 531 ibdev_err(&rdev->ibdev, 532 "Failed to create fence-MW for PD: %p\n", pd); 533 rc = PTR_ERR(mw); 534 goto fail; 535 } 536 fence->mw = mw; 537 538 bnxt_re_create_fence_wqe(pd); 539 return 0; 540 541 fail: 542 bnxt_re_destroy_fence_mr(pd); 543 return rc; 544 } 545 546 static struct bnxt_re_user_mmap_entry* 547 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset, 548 enum bnxt_re_mmap_flag mmap_flag, u64 *offset) 549 { 550 struct bnxt_re_user_mmap_entry *entry; 551 int ret; 552 553 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 554 if (!entry) 555 return NULL; 556 557 entry->mem_offset = mem_offset; 558 entry->mmap_flag = mmap_flag; 559 entry->uctx = uctx; 560 561 switch (mmap_flag) { 562 case BNXT_RE_MMAP_SH_PAGE: 563 ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx, 564 &entry->rdma_entry, PAGE_SIZE, 0); 565 break; 566 case BNXT_RE_MMAP_UC_DB: 567 case BNXT_RE_MMAP_WC_DB: 568 case BNXT_RE_MMAP_DBR_BAR: 569 case BNXT_RE_MMAP_DBR_PAGE: 570 ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx, 571 &entry->rdma_entry, PAGE_SIZE); 572 break; 573 default: 574 ret = -EINVAL; 575 break; 576 } 577 578 if (ret) { 579 kfree(entry); 580 return NULL; 581 } 582 if (offset) 583 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 584 585 return entry; 586 } 587 588 /* Protection Domains */ 589 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) 590 { 591 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 592 struct bnxt_re_dev *rdev = pd->rdev; 593 594 if (udata) { 595 rdma_user_mmap_entry_remove(pd->pd_db_mmap); 596 pd->pd_db_mmap = NULL; 597 } 598 599 bnxt_re_destroy_fence_mr(pd); 600 601 if (pd->qplib_pd.id) { 602 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res, 603 &rdev->qplib_res.pd_tbl, 604 &pd->qplib_pd)) 605 atomic_dec(&rdev->stats.res.pd_count); 606 } 607 return 0; 608 } 609 610 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 611 { 612 struct ib_device *ibdev = ibpd->device; 613 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 614 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( 615 udata, struct bnxt_re_ucontext, ib_uctx); 616 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); 617 struct bnxt_re_user_mmap_entry *entry = NULL; 618 u32 active_pds; 619 int rc = 0; 620 621 pd->rdev = rdev; 622 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) { 623 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD"); 624 rc = -ENOMEM; 625 goto fail; 626 } 627 628 if (udata) { 629 struct bnxt_re_pd_resp resp = {}; 630 631 if (!ucntx->dpi.dbr) { 632 /* Allocate DPI in alloc_pd to avoid failing of 633 * ibv_devinfo and family of application when DPIs 634 * are depleted. 635 */ 636 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, 637 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) { 638 rc = -ENOMEM; 639 goto dbfail; 640 } 641 } 642 643 resp.pdid = pd->qplib_pd.id; 644 /* Still allow mapping this DBR to the new user PD. */ 645 resp.dpi = ucntx->dpi.dpi; 646 647 entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr, 648 BNXT_RE_MMAP_UC_DB, &resp.dbr); 649 650 if (!entry) { 651 rc = -ENOMEM; 652 goto dbfail; 653 } 654 655 pd->pd_db_mmap = &entry->rdma_entry; 656 657 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); 658 if (rc) { 659 rdma_user_mmap_entry_remove(pd->pd_db_mmap); 660 rc = -EFAULT; 661 goto dbfail; 662 } 663 } 664 665 if (!udata) 666 if (bnxt_re_create_fence_mr(pd)) 667 ibdev_warn(&rdev->ibdev, 668 "Failed to create Fence-MR\n"); 669 active_pds = atomic_inc_return(&rdev->stats.res.pd_count); 670 if (active_pds > rdev->stats.res.pd_watermark) 671 rdev->stats.res.pd_watermark = active_pds; 672 673 return 0; 674 dbfail: 675 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, 676 &pd->qplib_pd); 677 fail: 678 return rc; 679 } 680 681 /* Address Handles */ 682 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) 683 { 684 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); 685 struct bnxt_re_dev *rdev = ah->rdev; 686 bool block = true; 687 int rc; 688 689 block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); 690 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block); 691 if (BNXT_RE_CHECK_RC(rc)) { 692 if (rc == -ETIMEDOUT) 693 rc = 0; 694 else 695 goto fail; 696 } 697 atomic_dec(&rdev->stats.res.ah_count); 698 fail: 699 return rc; 700 } 701 702 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) 703 { 704 u8 nw_type; 705 706 switch (ntype) { 707 case RDMA_NETWORK_IPV4: 708 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; 709 break; 710 case RDMA_NETWORK_IPV6: 711 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; 712 break; 713 default: 714 nw_type = CMDQ_CREATE_AH_TYPE_V1; 715 break; 716 } 717 return nw_type; 718 } 719 720 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, 721 struct ib_udata *udata) 722 { 723 struct ib_pd *ib_pd = ib_ah->pd; 724 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 725 struct rdma_ah_attr *ah_attr = init_attr->ah_attr; 726 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 727 struct bnxt_re_dev *rdev = pd->rdev; 728 const struct ib_gid_attr *sgid_attr; 729 struct bnxt_re_gid_ctx *ctx; 730 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); 731 u32 active_ahs; 732 u8 nw_type; 733 int rc; 734 735 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { 736 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set"); 737 return -EINVAL; 738 } 739 740 ah->rdev = rdev; 741 ah->qplib_ah.pd = &pd->qplib_pd; 742 743 /* Supply the configuration for the HW */ 744 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw, 745 sizeof(union ib_gid)); 746 sgid_attr = grh->sgid_attr; 747 /* Get the HW context of the GID. The reference 748 * of GID table entry is already taken by the caller. 749 */ 750 ctx = rdma_read_gid_hw_context(sgid_attr); 751 ah->qplib_ah.sgid_index = ctx->idx; 752 ah->qplib_ah.host_sgid_index = grh->sgid_index; 753 ah->qplib_ah.traffic_class = grh->traffic_class; 754 ah->qplib_ah.flow_label = grh->flow_label; 755 ah->qplib_ah.hop_limit = grh->hop_limit; 756 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr); 757 758 /* Get network header type for this GID */ 759 nw_type = rdma_gid_attr_network_type(sgid_attr); 760 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type); 761 762 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN); 763 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, 764 !(init_attr->flags & 765 RDMA_CREATE_AH_SLEEPABLE)); 766 if (rc) { 767 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH"); 768 return rc; 769 } 770 771 /* Write AVID to shared page. */ 772 if (udata) { 773 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( 774 udata, struct bnxt_re_ucontext, ib_uctx); 775 unsigned long flag; 776 u32 *wrptr; 777 778 spin_lock_irqsave(&uctx->sh_lock, flag); 779 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); 780 *wrptr = ah->qplib_ah.id; 781 wmb(); /* make sure cache is updated. */ 782 spin_unlock_irqrestore(&uctx->sh_lock, flag); 783 } 784 active_ahs = atomic_inc_return(&rdev->stats.res.ah_count); 785 if (active_ahs > rdev->stats.res.ah_watermark) 786 rdev->stats.res.ah_watermark = active_ahs; 787 788 return 0; 789 } 790 791 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) 792 { 793 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); 794 795 ah_attr->type = ib_ah->type; 796 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl); 797 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN); 798 rdma_ah_set_grh(ah_attr, NULL, 0, 799 ah->qplib_ah.host_sgid_index, 800 0, ah->qplib_ah.traffic_class); 801 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data); 802 rdma_ah_set_port_num(ah_attr, 1); 803 rdma_ah_set_static_rate(ah_attr, 0); 804 return 0; 805 } 806 807 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) 808 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 809 { 810 unsigned long flags; 811 812 spin_lock_irqsave(&qp->scq->cq_lock, flags); 813 if (qp->rcq != qp->scq) 814 spin_lock(&qp->rcq->cq_lock); 815 else 816 __acquire(&qp->rcq->cq_lock); 817 818 return flags; 819 } 820 821 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, 822 unsigned long flags) 823 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 824 { 825 if (qp->rcq != qp->scq) 826 spin_unlock(&qp->rcq->cq_lock); 827 else 828 __release(&qp->rcq->cq_lock); 829 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); 830 } 831 832 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) 833 { 834 struct bnxt_re_qp *gsi_sqp; 835 struct bnxt_re_ah *gsi_sah; 836 struct bnxt_re_dev *rdev; 837 int rc; 838 839 rdev = qp->rdev; 840 gsi_sqp = rdev->gsi_ctx.gsi_sqp; 841 gsi_sah = rdev->gsi_ctx.gsi_sah; 842 843 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); 844 bnxt_qplib_destroy_ah(&rdev->qplib_res, 845 &gsi_sah->qplib_ah, 846 true); 847 atomic_dec(&rdev->stats.res.ah_count); 848 bnxt_qplib_clean_qp(&qp->qplib_qp); 849 850 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); 851 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); 852 if (rc) { 853 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed"); 854 goto fail; 855 } 856 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); 857 858 /* remove from active qp list */ 859 mutex_lock(&rdev->qp_lock); 860 list_del(&gsi_sqp->list); 861 mutex_unlock(&rdev->qp_lock); 862 atomic_dec(&rdev->stats.res.qp_count); 863 864 kfree(rdev->gsi_ctx.sqp_tbl); 865 kfree(gsi_sah); 866 kfree(gsi_sqp); 867 rdev->gsi_ctx.gsi_sqp = NULL; 868 rdev->gsi_ctx.gsi_sah = NULL; 869 rdev->gsi_ctx.sqp_tbl = NULL; 870 871 return 0; 872 fail: 873 return rc; 874 } 875 876 /* Queue Pairs */ 877 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) 878 { 879 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 880 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; 881 struct bnxt_re_dev *rdev = qp->rdev; 882 struct bnxt_qplib_nq *scq_nq = NULL; 883 struct bnxt_qplib_nq *rcq_nq = NULL; 884 unsigned int flags; 885 int rc; 886 887 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); 888 889 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); 890 if (rc) { 891 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP"); 892 return rc; 893 } 894 895 if (rdma_is_kernel_res(&qp->ib_qp.res)) { 896 flags = bnxt_re_lock_cqs(qp); 897 bnxt_qplib_clean_qp(&qp->qplib_qp); 898 bnxt_re_unlock_cqs(qp, flags); 899 } 900 901 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); 902 903 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { 904 rc = bnxt_re_destroy_gsi_sqp(qp); 905 if (rc) 906 return rc; 907 } 908 909 mutex_lock(&rdev->qp_lock); 910 list_del(&qp->list); 911 mutex_unlock(&rdev->qp_lock); 912 atomic_dec(&rdev->stats.res.qp_count); 913 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC) 914 atomic_dec(&rdev->stats.res.rc_qp_count); 915 else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) 916 atomic_dec(&rdev->stats.res.ud_qp_count); 917 918 ib_umem_release(qp->rumem); 919 ib_umem_release(qp->sumem); 920 921 /* Flush all the entries of notification queue associated with 922 * given qp. 923 */ 924 scq_nq = qplib_qp->scq->nq; 925 rcq_nq = qplib_qp->rcq->nq; 926 bnxt_re_synchronize_nq(scq_nq); 927 if (scq_nq != rcq_nq) 928 bnxt_re_synchronize_nq(rcq_nq); 929 930 return 0; 931 } 932 933 static u8 __from_ib_qp_type(enum ib_qp_type type) 934 { 935 switch (type) { 936 case IB_QPT_GSI: 937 return CMDQ_CREATE_QP1_TYPE_GSI; 938 case IB_QPT_RC: 939 return CMDQ_CREATE_QP_TYPE_RC; 940 case IB_QPT_UD: 941 return CMDQ_CREATE_QP_TYPE_UD; 942 default: 943 return IB_QPT_MAX; 944 } 945 } 946 947 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp, 948 int rsge, int max) 949 { 950 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) 951 rsge = max; 952 return bnxt_re_get_rwqe_size(rsge); 953 } 954 955 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge) 956 { 957 u16 wqe_size, calc_ils; 958 959 wqe_size = bnxt_re_get_swqe_size(nsge); 960 if (ilsize) { 961 calc_ils = sizeof(struct sq_send_hdr) + ilsize; 962 wqe_size = max_t(u16, calc_ils, wqe_size); 963 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr)); 964 } 965 return wqe_size; 966 } 967 968 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, 969 struct ib_qp_init_attr *init_attr) 970 { 971 struct bnxt_qplib_dev_attr *dev_attr; 972 struct bnxt_qplib_qp *qplqp; 973 struct bnxt_re_dev *rdev; 974 struct bnxt_qplib_q *sq; 975 int align, ilsize; 976 977 rdev = qp->rdev; 978 qplqp = &qp->qplib_qp; 979 sq = &qplqp->sq; 980 dev_attr = &rdev->dev_attr; 981 982 align = sizeof(struct sq_send_hdr); 983 ilsize = ALIGN(init_attr->cap.max_inline_data, align); 984 985 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge); 986 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges)) 987 return -EINVAL; 988 /* For gen p4 and gen p5 backward compatibility mode 989 * wqe size is fixed to 128 bytes 990 */ 991 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) && 992 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) 993 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges); 994 995 if (init_attr->cap.max_inline_data) { 996 qplqp->max_inline_data = sq->wqe_size - 997 sizeof(struct sq_send_hdr); 998 init_attr->cap.max_inline_data = qplqp->max_inline_data; 999 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) 1000 sq->max_sge = qplqp->max_inline_data / 1001 sizeof(struct sq_sge); 1002 } 1003 1004 return 0; 1005 } 1006 1007 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, 1008 struct bnxt_re_qp *qp, struct ib_udata *udata) 1009 { 1010 struct bnxt_qplib_qp *qplib_qp; 1011 struct bnxt_re_ucontext *cntx; 1012 struct bnxt_re_qp_req ureq; 1013 int bytes = 0, psn_sz; 1014 struct ib_umem *umem; 1015 int psn_nume; 1016 1017 qplib_qp = &qp->qplib_qp; 1018 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, 1019 ib_uctx); 1020 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1021 return -EFAULT; 1022 1023 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); 1024 /* Consider mapping PSN search memory only for RC QPs. */ 1025 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { 1026 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 1027 sizeof(struct sq_psn_search_ext) : 1028 sizeof(struct sq_psn_search); 1029 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 1030 qplib_qp->sq.max_wqe : 1031 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / 1032 sizeof(struct bnxt_qplib_sge)); 1033 bytes += (psn_nume * psn_sz); 1034 } 1035 1036 bytes = PAGE_ALIGN(bytes); 1037 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes, 1038 IB_ACCESS_LOCAL_WRITE); 1039 if (IS_ERR(umem)) 1040 return PTR_ERR(umem); 1041 1042 qp->sumem = umem; 1043 qplib_qp->sq.sg_info.umem = umem; 1044 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; 1045 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; 1046 qplib_qp->qp_handle = ureq.qp_handle; 1047 1048 if (!qp->qplib_qp.srq) { 1049 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size); 1050 bytes = PAGE_ALIGN(bytes); 1051 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes, 1052 IB_ACCESS_LOCAL_WRITE); 1053 if (IS_ERR(umem)) 1054 goto rqfail; 1055 qp->rumem = umem; 1056 qplib_qp->rq.sg_info.umem = umem; 1057 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; 1058 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT; 1059 } 1060 1061 qplib_qp->dpi = &cntx->dpi; 1062 return 0; 1063 rqfail: 1064 ib_umem_release(qp->sumem); 1065 qp->sumem = NULL; 1066 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); 1067 1068 return PTR_ERR(umem); 1069 } 1070 1071 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah 1072 (struct bnxt_re_pd *pd, 1073 struct bnxt_qplib_res *qp1_res, 1074 struct bnxt_qplib_qp *qp1_qp) 1075 { 1076 struct bnxt_re_dev *rdev = pd->rdev; 1077 struct bnxt_re_ah *ah; 1078 union ib_gid sgid; 1079 int rc; 1080 1081 ah = kzalloc(sizeof(*ah), GFP_KERNEL); 1082 if (!ah) 1083 return NULL; 1084 1085 ah->rdev = rdev; 1086 ah->qplib_ah.pd = &pd->qplib_pd; 1087 1088 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid); 1089 if (rc) 1090 goto fail; 1091 1092 /* supply the dgid data same as sgid */ 1093 memcpy(ah->qplib_ah.dgid.data, &sgid.raw, 1094 sizeof(union ib_gid)); 1095 ah->qplib_ah.sgid_index = 0; 1096 1097 ah->qplib_ah.traffic_class = 0; 1098 ah->qplib_ah.flow_label = 0; 1099 ah->qplib_ah.hop_limit = 1; 1100 ah->qplib_ah.sl = 0; 1101 /* Have DMAC same as SMAC */ 1102 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr); 1103 1104 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false); 1105 if (rc) { 1106 ibdev_err(&rdev->ibdev, 1107 "Failed to allocate HW AH for Shadow QP"); 1108 goto fail; 1109 } 1110 atomic_inc(&rdev->stats.res.ah_count); 1111 1112 return ah; 1113 1114 fail: 1115 kfree(ah); 1116 return NULL; 1117 } 1118 1119 static struct bnxt_re_qp *bnxt_re_create_shadow_qp 1120 (struct bnxt_re_pd *pd, 1121 struct bnxt_qplib_res *qp1_res, 1122 struct bnxt_qplib_qp *qp1_qp) 1123 { 1124 struct bnxt_re_dev *rdev = pd->rdev; 1125 struct bnxt_re_qp *qp; 1126 int rc; 1127 1128 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1129 if (!qp) 1130 return NULL; 1131 1132 qp->rdev = rdev; 1133 1134 /* Initialize the shadow QP structure from the QP1 values */ 1135 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); 1136 1137 qp->qplib_qp.pd = &pd->qplib_pd; 1138 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); 1139 qp->qplib_qp.type = IB_QPT_UD; 1140 1141 qp->qplib_qp.max_inline_data = 0; 1142 qp->qplib_qp.sig_type = true; 1143 1144 /* Shadow QP SQ depth should be same as QP1 RQ depth */ 1145 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6); 1146 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; 1147 qp->qplib_qp.sq.max_sge = 2; 1148 /* Q full delta can be 1 since it is internal QP */ 1149 qp->qplib_qp.sq.q_full_delta = 1; 1150 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE; 1151 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT; 1152 1153 qp->qplib_qp.scq = qp1_qp->scq; 1154 qp->qplib_qp.rcq = qp1_qp->rcq; 1155 1156 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6); 1157 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; 1158 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; 1159 /* Q full delta can be 1 since it is internal QP */ 1160 qp->qplib_qp.rq.q_full_delta = 1; 1161 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE; 1162 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT; 1163 1164 qp->qplib_qp.mtu = qp1_qp->mtu; 1165 1166 qp->qplib_qp.sq_hdr_buf_size = 0; 1167 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; 1168 qp->qplib_qp.dpi = &rdev->dpi_privileged; 1169 1170 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp); 1171 if (rc) 1172 goto fail; 1173 1174 spin_lock_init(&qp->sq_lock); 1175 INIT_LIST_HEAD(&qp->list); 1176 mutex_lock(&rdev->qp_lock); 1177 list_add_tail(&qp->list, &rdev->qp_list); 1178 atomic_inc(&rdev->stats.res.qp_count); 1179 mutex_unlock(&rdev->qp_lock); 1180 return qp; 1181 fail: 1182 kfree(qp); 1183 return NULL; 1184 } 1185 1186 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, 1187 struct ib_qp_init_attr *init_attr) 1188 { 1189 struct bnxt_qplib_dev_attr *dev_attr; 1190 struct bnxt_qplib_qp *qplqp; 1191 struct bnxt_re_dev *rdev; 1192 struct bnxt_qplib_q *rq; 1193 int entries; 1194 1195 rdev = qp->rdev; 1196 qplqp = &qp->qplib_qp; 1197 rq = &qplqp->rq; 1198 dev_attr = &rdev->dev_attr; 1199 1200 if (init_attr->srq) { 1201 struct bnxt_re_srq *srq; 1202 1203 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); 1204 qplqp->srq = &srq->qplib_srq; 1205 rq->max_wqe = 0; 1206 } else { 1207 rq->max_sge = init_attr->cap.max_recv_sge; 1208 if (rq->max_sge > dev_attr->max_qp_sges) 1209 rq->max_sge = dev_attr->max_qp_sges; 1210 init_attr->cap.max_recv_sge = rq->max_sge; 1211 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge, 1212 dev_attr->max_qp_sges); 1213 /* Allocate 1 more than what's provided so posting max doesn't 1214 * mean empty. 1215 */ 1216 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1); 1217 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); 1218 rq->q_full_delta = 0; 1219 rq->sg_info.pgsize = PAGE_SIZE; 1220 rq->sg_info.pgshft = PAGE_SHIFT; 1221 } 1222 1223 return 0; 1224 } 1225 1226 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) 1227 { 1228 struct bnxt_qplib_dev_attr *dev_attr; 1229 struct bnxt_qplib_qp *qplqp; 1230 struct bnxt_re_dev *rdev; 1231 1232 rdev = qp->rdev; 1233 qplqp = &qp->qplib_qp; 1234 dev_attr = &rdev->dev_attr; 1235 1236 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 1237 qplqp->rq.max_sge = dev_attr->max_qp_sges; 1238 if (qplqp->rq.max_sge > dev_attr->max_qp_sges) 1239 qplqp->rq.max_sge = dev_attr->max_qp_sges; 1240 qplqp->rq.max_sge = 6; 1241 } 1242 } 1243 1244 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, 1245 struct ib_qp_init_attr *init_attr, 1246 struct ib_udata *udata) 1247 { 1248 struct bnxt_qplib_dev_attr *dev_attr; 1249 struct bnxt_qplib_qp *qplqp; 1250 struct bnxt_re_dev *rdev; 1251 struct bnxt_qplib_q *sq; 1252 int entries; 1253 int diff; 1254 int rc; 1255 1256 rdev = qp->rdev; 1257 qplqp = &qp->qplib_qp; 1258 sq = &qplqp->sq; 1259 dev_attr = &rdev->dev_attr; 1260 1261 sq->max_sge = init_attr->cap.max_send_sge; 1262 if (sq->max_sge > dev_attr->max_qp_sges) { 1263 sq->max_sge = dev_attr->max_qp_sges; 1264 init_attr->cap.max_send_sge = sq->max_sge; 1265 } 1266 1267 rc = bnxt_re_setup_swqe_size(qp, init_attr); 1268 if (rc) 1269 return rc; 1270 1271 entries = init_attr->cap.max_send_wr; 1272 /* Allocate 128 + 1 more than what's provided */ 1273 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? 1274 0 : BNXT_QPLIB_RESERVED_QP_WRS; 1275 entries = roundup_pow_of_two(entries + diff + 1); 1276 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); 1277 sq->q_full_delta = diff + 1; 1278 /* 1279 * Reserving one slot for Phantom WQE. Application can 1280 * post one extra entry in this case. But allowing this to avoid 1281 * unexpected Queue full condition 1282 */ 1283 qplqp->sq.q_full_delta -= 1; 1284 qplqp->sq.sg_info.pgsize = PAGE_SIZE; 1285 qplqp->sq.sg_info.pgshft = PAGE_SHIFT; 1286 1287 return 0; 1288 } 1289 1290 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, 1291 struct ib_qp_init_attr *init_attr) 1292 { 1293 struct bnxt_qplib_dev_attr *dev_attr; 1294 struct bnxt_qplib_qp *qplqp; 1295 struct bnxt_re_dev *rdev; 1296 int entries; 1297 1298 rdev = qp->rdev; 1299 qplqp = &qp->qplib_qp; 1300 dev_attr = &rdev->dev_attr; 1301 1302 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 1303 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1); 1304 qplqp->sq.max_wqe = min_t(u32, entries, 1305 dev_attr->max_qp_wqes + 1); 1306 qplqp->sq.q_full_delta = qplqp->sq.max_wqe - 1307 init_attr->cap.max_send_wr; 1308 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ 1309 if (qplqp->sq.max_sge > dev_attr->max_qp_sges) 1310 qplqp->sq.max_sge = dev_attr->max_qp_sges; 1311 } 1312 } 1313 1314 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, 1315 struct ib_qp_init_attr *init_attr) 1316 { 1317 struct bnxt_qplib_chip_ctx *chip_ctx; 1318 int qptype; 1319 1320 chip_ctx = rdev->chip_ctx; 1321 1322 qptype = __from_ib_qp_type(init_attr->qp_type); 1323 if (qptype == IB_QPT_MAX) { 1324 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype); 1325 qptype = -EOPNOTSUPP; 1326 goto out; 1327 } 1328 1329 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) && 1330 init_attr->qp_type == IB_QPT_GSI) 1331 qptype = CMDQ_CREATE_QP_TYPE_GSI; 1332 out: 1333 return qptype; 1334 } 1335 1336 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, 1337 struct ib_qp_init_attr *init_attr, 1338 struct ib_udata *udata) 1339 { 1340 struct bnxt_qplib_dev_attr *dev_attr; 1341 struct bnxt_qplib_qp *qplqp; 1342 struct bnxt_re_dev *rdev; 1343 struct bnxt_re_cq *cq; 1344 int rc = 0, qptype; 1345 1346 rdev = qp->rdev; 1347 qplqp = &qp->qplib_qp; 1348 dev_attr = &rdev->dev_attr; 1349 1350 /* Setup misc params */ 1351 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); 1352 qplqp->pd = &pd->qplib_pd; 1353 qplqp->qp_handle = (u64)qplqp; 1354 qplqp->max_inline_data = init_attr->cap.max_inline_data; 1355 qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1356 qptype = bnxt_re_init_qp_type(rdev, init_attr); 1357 if (qptype < 0) { 1358 rc = qptype; 1359 goto out; 1360 } 1361 qplqp->type = (u8)qptype; 1362 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode; 1363 1364 if (init_attr->qp_type == IB_QPT_RC) { 1365 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; 1366 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; 1367 } 1368 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 1369 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ 1370 if (init_attr->create_flags) { 1371 ibdev_dbg(&rdev->ibdev, 1372 "QP create flags 0x%x not supported", 1373 init_attr->create_flags); 1374 return -EOPNOTSUPP; 1375 } 1376 1377 /* Setup CQs */ 1378 if (init_attr->send_cq) { 1379 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); 1380 qplqp->scq = &cq->qplib_cq; 1381 qp->scq = cq; 1382 } 1383 1384 if (init_attr->recv_cq) { 1385 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); 1386 qplqp->rcq = &cq->qplib_cq; 1387 qp->rcq = cq; 1388 } 1389 1390 /* Setup RQ/SRQ */ 1391 rc = bnxt_re_init_rq_attr(qp, init_attr); 1392 if (rc) 1393 goto out; 1394 if (init_attr->qp_type == IB_QPT_GSI) 1395 bnxt_re_adjust_gsi_rq_attr(qp); 1396 1397 /* Setup SQ */ 1398 rc = bnxt_re_init_sq_attr(qp, init_attr, udata); 1399 if (rc) 1400 goto out; 1401 if (init_attr->qp_type == IB_QPT_GSI) 1402 bnxt_re_adjust_gsi_sq_attr(qp, init_attr); 1403 1404 if (udata) /* This will update DPI and qp_handle */ 1405 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); 1406 out: 1407 return rc; 1408 } 1409 1410 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, 1411 struct bnxt_re_pd *pd) 1412 { 1413 struct bnxt_re_sqp_entries *sqp_tbl; 1414 struct bnxt_re_dev *rdev; 1415 struct bnxt_re_qp *sqp; 1416 struct bnxt_re_ah *sah; 1417 int rc = 0; 1418 1419 rdev = qp->rdev; 1420 /* Create a shadow QP to handle the QP1 traffic */ 1421 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl), 1422 GFP_KERNEL); 1423 if (!sqp_tbl) 1424 return -ENOMEM; 1425 rdev->gsi_ctx.sqp_tbl = sqp_tbl; 1426 1427 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); 1428 if (!sqp) { 1429 rc = -ENODEV; 1430 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1"); 1431 goto out; 1432 } 1433 rdev->gsi_ctx.gsi_sqp = sqp; 1434 1435 sqp->rcq = qp->rcq; 1436 sqp->scq = qp->scq; 1437 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, 1438 &qp->qplib_qp); 1439 if (!sah) { 1440 bnxt_qplib_destroy_qp(&rdev->qplib_res, 1441 &sqp->qplib_qp); 1442 rc = -ENODEV; 1443 ibdev_err(&rdev->ibdev, 1444 "Failed to create AH entry for ShadowQP"); 1445 goto out; 1446 } 1447 rdev->gsi_ctx.gsi_sah = sah; 1448 1449 return 0; 1450 out: 1451 kfree(sqp_tbl); 1452 return rc; 1453 } 1454 1455 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, 1456 struct ib_qp_init_attr *init_attr) 1457 { 1458 struct bnxt_re_dev *rdev; 1459 struct bnxt_qplib_qp *qplqp; 1460 int rc; 1461 1462 rdev = qp->rdev; 1463 qplqp = &qp->qplib_qp; 1464 1465 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; 1466 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; 1467 1468 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp); 1469 if (rc) { 1470 ibdev_err(&rdev->ibdev, "create HW QP1 failed!"); 1471 goto out; 1472 } 1473 1474 rc = bnxt_re_create_shadow_gsi(qp, pd); 1475 out: 1476 return rc; 1477 } 1478 1479 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, 1480 struct ib_qp_init_attr *init_attr, 1481 struct bnxt_qplib_dev_attr *dev_attr) 1482 { 1483 bool rc = true; 1484 1485 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes || 1486 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes || 1487 init_attr->cap.max_send_sge > dev_attr->max_qp_sges || 1488 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || 1489 init_attr->cap.max_inline_data > dev_attr->max_inline_data) { 1490 ibdev_err(&rdev->ibdev, 1491 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", 1492 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, 1493 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, 1494 init_attr->cap.max_send_sge, dev_attr->max_qp_sges, 1495 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, 1496 init_attr->cap.max_inline_data, 1497 dev_attr->max_inline_data); 1498 rc = false; 1499 } 1500 return rc; 1501 } 1502 1503 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, 1504 struct ib_udata *udata) 1505 { 1506 struct ib_pd *ib_pd = ib_qp->pd; 1507 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 1508 struct bnxt_re_dev *rdev = pd->rdev; 1509 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 1510 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1511 u32 active_qps; 1512 int rc; 1513 1514 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); 1515 if (!rc) { 1516 rc = -EINVAL; 1517 goto fail; 1518 } 1519 1520 qp->rdev = rdev; 1521 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); 1522 if (rc) 1523 goto fail; 1524 1525 if (qp_init_attr->qp_type == IB_QPT_GSI && 1526 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) { 1527 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr); 1528 if (rc == -ENODEV) 1529 goto qp_destroy; 1530 if (rc) 1531 goto fail; 1532 } else { 1533 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); 1534 if (rc) { 1535 ibdev_err(&rdev->ibdev, "Failed to create HW QP"); 1536 goto free_umem; 1537 } 1538 if (udata) { 1539 struct bnxt_re_qp_resp resp; 1540 1541 resp.qpid = qp->qplib_qp.id; 1542 resp.rsvd = 0; 1543 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 1544 if (rc) { 1545 ibdev_err(&rdev->ibdev, "Failed to copy QP udata"); 1546 goto qp_destroy; 1547 } 1548 } 1549 } 1550 1551 qp->ib_qp.qp_num = qp->qplib_qp.id; 1552 if (qp_init_attr->qp_type == IB_QPT_GSI) 1553 rdev->gsi_ctx.gsi_qp = qp; 1554 spin_lock_init(&qp->sq_lock); 1555 spin_lock_init(&qp->rq_lock); 1556 INIT_LIST_HEAD(&qp->list); 1557 mutex_lock(&rdev->qp_lock); 1558 list_add_tail(&qp->list, &rdev->qp_list); 1559 mutex_unlock(&rdev->qp_lock); 1560 active_qps = atomic_inc_return(&rdev->stats.res.qp_count); 1561 if (active_qps > rdev->stats.res.qp_watermark) 1562 rdev->stats.res.qp_watermark = active_qps; 1563 if (qp_init_attr->qp_type == IB_QPT_RC) { 1564 active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count); 1565 if (active_qps > rdev->stats.res.rc_qp_watermark) 1566 rdev->stats.res.rc_qp_watermark = active_qps; 1567 } else if (qp_init_attr->qp_type == IB_QPT_UD) { 1568 active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count); 1569 if (active_qps > rdev->stats.res.ud_qp_watermark) 1570 rdev->stats.res.ud_qp_watermark = active_qps; 1571 } 1572 1573 return 0; 1574 qp_destroy: 1575 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); 1576 free_umem: 1577 ib_umem_release(qp->rumem); 1578 ib_umem_release(qp->sumem); 1579 fail: 1580 return rc; 1581 } 1582 1583 static u8 __from_ib_qp_state(enum ib_qp_state state) 1584 { 1585 switch (state) { 1586 case IB_QPS_RESET: 1587 return CMDQ_MODIFY_QP_NEW_STATE_RESET; 1588 case IB_QPS_INIT: 1589 return CMDQ_MODIFY_QP_NEW_STATE_INIT; 1590 case IB_QPS_RTR: 1591 return CMDQ_MODIFY_QP_NEW_STATE_RTR; 1592 case IB_QPS_RTS: 1593 return CMDQ_MODIFY_QP_NEW_STATE_RTS; 1594 case IB_QPS_SQD: 1595 return CMDQ_MODIFY_QP_NEW_STATE_SQD; 1596 case IB_QPS_SQE: 1597 return CMDQ_MODIFY_QP_NEW_STATE_SQE; 1598 case IB_QPS_ERR: 1599 default: 1600 return CMDQ_MODIFY_QP_NEW_STATE_ERR; 1601 } 1602 } 1603 1604 static enum ib_qp_state __to_ib_qp_state(u8 state) 1605 { 1606 switch (state) { 1607 case CMDQ_MODIFY_QP_NEW_STATE_RESET: 1608 return IB_QPS_RESET; 1609 case CMDQ_MODIFY_QP_NEW_STATE_INIT: 1610 return IB_QPS_INIT; 1611 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1612 return IB_QPS_RTR; 1613 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1614 return IB_QPS_RTS; 1615 case CMDQ_MODIFY_QP_NEW_STATE_SQD: 1616 return IB_QPS_SQD; 1617 case CMDQ_MODIFY_QP_NEW_STATE_SQE: 1618 return IB_QPS_SQE; 1619 case CMDQ_MODIFY_QP_NEW_STATE_ERR: 1620 default: 1621 return IB_QPS_ERR; 1622 } 1623 } 1624 1625 static u32 __from_ib_mtu(enum ib_mtu mtu) 1626 { 1627 switch (mtu) { 1628 case IB_MTU_256: 1629 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256; 1630 case IB_MTU_512: 1631 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512; 1632 case IB_MTU_1024: 1633 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024; 1634 case IB_MTU_2048: 1635 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 1636 case IB_MTU_4096: 1637 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096; 1638 default: 1639 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 1640 } 1641 } 1642 1643 static enum ib_mtu __to_ib_mtu(u32 mtu) 1644 { 1645 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) { 1646 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256: 1647 return IB_MTU_256; 1648 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512: 1649 return IB_MTU_512; 1650 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024: 1651 return IB_MTU_1024; 1652 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048: 1653 return IB_MTU_2048; 1654 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096: 1655 return IB_MTU_4096; 1656 default: 1657 return IB_MTU_2048; 1658 } 1659 } 1660 1661 /* Shared Receive Queues */ 1662 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) 1663 { 1664 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1665 ib_srq); 1666 struct bnxt_re_dev *rdev = srq->rdev; 1667 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; 1668 struct bnxt_qplib_nq *nq = NULL; 1669 1670 if (qplib_srq->cq) 1671 nq = qplib_srq->cq->nq; 1672 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); 1673 ib_umem_release(srq->umem); 1674 atomic_dec(&rdev->stats.res.srq_count); 1675 if (nq) 1676 nq->budget--; 1677 return 0; 1678 } 1679 1680 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, 1681 struct bnxt_re_pd *pd, 1682 struct bnxt_re_srq *srq, 1683 struct ib_udata *udata) 1684 { 1685 struct bnxt_re_srq_req ureq; 1686 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; 1687 struct ib_umem *umem; 1688 int bytes = 0; 1689 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context( 1690 udata, struct bnxt_re_ucontext, ib_uctx); 1691 1692 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1693 return -EFAULT; 1694 1695 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size); 1696 bytes = PAGE_ALIGN(bytes); 1697 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes, 1698 IB_ACCESS_LOCAL_WRITE); 1699 if (IS_ERR(umem)) 1700 return PTR_ERR(umem); 1701 1702 srq->umem = umem; 1703 qplib_srq->sg_info.umem = umem; 1704 qplib_srq->sg_info.pgsize = PAGE_SIZE; 1705 qplib_srq->sg_info.pgshft = PAGE_SHIFT; 1706 qplib_srq->srq_handle = ureq.srq_handle; 1707 qplib_srq->dpi = &cntx->dpi; 1708 1709 return 0; 1710 } 1711 1712 int bnxt_re_create_srq(struct ib_srq *ib_srq, 1713 struct ib_srq_init_attr *srq_init_attr, 1714 struct ib_udata *udata) 1715 { 1716 struct bnxt_qplib_dev_attr *dev_attr; 1717 struct bnxt_qplib_nq *nq = NULL; 1718 struct bnxt_re_dev *rdev; 1719 struct bnxt_re_srq *srq; 1720 struct bnxt_re_pd *pd; 1721 struct ib_pd *ib_pd; 1722 u32 active_srqs; 1723 int rc, entries; 1724 1725 ib_pd = ib_srq->pd; 1726 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 1727 rdev = pd->rdev; 1728 dev_attr = &rdev->dev_attr; 1729 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); 1730 1731 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { 1732 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded"); 1733 rc = -EINVAL; 1734 goto exit; 1735 } 1736 1737 if (srq_init_attr->srq_type != IB_SRQT_BASIC) { 1738 rc = -EOPNOTSUPP; 1739 goto exit; 1740 } 1741 1742 srq->rdev = rdev; 1743 srq->qplib_srq.pd = &pd->qplib_pd; 1744 srq->qplib_srq.dpi = &rdev->dpi_privileged; 1745 /* Allocate 1 more than what's provided so posting max doesn't 1746 * mean empty 1747 */ 1748 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); 1749 if (entries > dev_attr->max_srq_wqes + 1) 1750 entries = dev_attr->max_srq_wqes + 1; 1751 srq->qplib_srq.max_wqe = entries; 1752 1753 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; 1754 /* 128 byte wqe size for SRQ . So use max sges */ 1755 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges); 1756 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; 1757 srq->srq_limit = srq_init_attr->attr.srq_limit; 1758 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; 1759 nq = &rdev->nq[0]; 1760 1761 if (udata) { 1762 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); 1763 if (rc) 1764 goto fail; 1765 } 1766 1767 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); 1768 if (rc) { 1769 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!"); 1770 goto fail; 1771 } 1772 1773 if (udata) { 1774 struct bnxt_re_srq_resp resp; 1775 1776 resp.srqid = srq->qplib_srq.id; 1777 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 1778 if (rc) { 1779 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!"); 1780 bnxt_qplib_destroy_srq(&rdev->qplib_res, 1781 &srq->qplib_srq); 1782 goto fail; 1783 } 1784 } 1785 if (nq) 1786 nq->budget++; 1787 active_srqs = atomic_inc_return(&rdev->stats.res.srq_count); 1788 if (active_srqs > rdev->stats.res.srq_watermark) 1789 rdev->stats.res.srq_watermark = active_srqs; 1790 spin_lock_init(&srq->lock); 1791 1792 return 0; 1793 1794 fail: 1795 ib_umem_release(srq->umem); 1796 exit: 1797 return rc; 1798 } 1799 1800 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, 1801 enum ib_srq_attr_mask srq_attr_mask, 1802 struct ib_udata *udata) 1803 { 1804 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1805 ib_srq); 1806 struct bnxt_re_dev *rdev = srq->rdev; 1807 int rc; 1808 1809 switch (srq_attr_mask) { 1810 case IB_SRQ_MAX_WR: 1811 /* SRQ resize is not supported */ 1812 return -EINVAL; 1813 case IB_SRQ_LIMIT: 1814 /* Change the SRQ threshold */ 1815 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) 1816 return -EINVAL; 1817 1818 srq->qplib_srq.threshold = srq_attr->srq_limit; 1819 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); 1820 if (rc) { 1821 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); 1822 return rc; 1823 } 1824 /* On success, update the shadow */ 1825 srq->srq_limit = srq_attr->srq_limit; 1826 /* No need to Build and send response back to udata */ 1827 return 0; 1828 default: 1829 ibdev_err(&rdev->ibdev, 1830 "Unsupported srq_attr_mask 0x%x", srq_attr_mask); 1831 return -EINVAL; 1832 } 1833 } 1834 1835 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) 1836 { 1837 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1838 ib_srq); 1839 struct bnxt_re_srq tsrq; 1840 struct bnxt_re_dev *rdev = srq->rdev; 1841 int rc; 1842 1843 /* Get live SRQ attr */ 1844 tsrq.qplib_srq.id = srq->qplib_srq.id; 1845 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq); 1846 if (rc) { 1847 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!"); 1848 return rc; 1849 } 1850 srq_attr->max_wr = srq->qplib_srq.max_wqe; 1851 srq_attr->max_sge = srq->qplib_srq.max_sge; 1852 srq_attr->srq_limit = tsrq.qplib_srq.threshold; 1853 1854 return 0; 1855 } 1856 1857 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, 1858 const struct ib_recv_wr **bad_wr) 1859 { 1860 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1861 ib_srq); 1862 struct bnxt_qplib_swqe wqe; 1863 unsigned long flags; 1864 int rc = 0; 1865 1866 spin_lock_irqsave(&srq->lock, flags); 1867 while (wr) { 1868 /* Transcribe each ib_recv_wr to qplib_swqe */ 1869 wqe.num_sge = wr->num_sge; 1870 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); 1871 wqe.wr_id = wr->wr_id; 1872 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; 1873 1874 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe); 1875 if (rc) { 1876 *bad_wr = wr; 1877 break; 1878 } 1879 wr = wr->next; 1880 } 1881 spin_unlock_irqrestore(&srq->lock, flags); 1882 1883 return rc; 1884 } 1885 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, 1886 struct bnxt_re_qp *qp1_qp, 1887 int qp_attr_mask) 1888 { 1889 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; 1890 int rc; 1891 1892 if (qp_attr_mask & IB_QP_STATE) { 1893 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; 1894 qp->qplib_qp.state = qp1_qp->qplib_qp.state; 1895 } 1896 if (qp_attr_mask & IB_QP_PKEY_INDEX) { 1897 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; 1898 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index; 1899 } 1900 1901 if (qp_attr_mask & IB_QP_QKEY) { 1902 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; 1903 /* Using a Random QKEY */ 1904 qp->qplib_qp.qkey = 0x81818181; 1905 } 1906 if (qp_attr_mask & IB_QP_SQ_PSN) { 1907 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; 1908 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; 1909 } 1910 1911 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); 1912 if (rc) 1913 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1"); 1914 return rc; 1915 } 1916 1917 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, 1918 int qp_attr_mask, struct ib_udata *udata) 1919 { 1920 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1921 struct bnxt_re_dev *rdev = qp->rdev; 1922 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 1923 enum ib_qp_state curr_qp_state, new_qp_state; 1924 int rc, entries; 1925 unsigned int flags; 1926 u8 nw_type; 1927 1928 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1929 return -EOPNOTSUPP; 1930 1931 qp->qplib_qp.modify_flags = 0; 1932 if (qp_attr_mask & IB_QP_STATE) { 1933 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state); 1934 new_qp_state = qp_attr->qp_state; 1935 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state, 1936 ib_qp->qp_type, qp_attr_mask)) { 1937 ibdev_err(&rdev->ibdev, 1938 "Invalid attribute mask: %#x specified ", 1939 qp_attr_mask); 1940 ibdev_err(&rdev->ibdev, 1941 "for qpn: %#x type: %#x", 1942 ib_qp->qp_num, ib_qp->qp_type); 1943 ibdev_err(&rdev->ibdev, 1944 "curr_qp_state=0x%x, new_qp_state=0x%x\n", 1945 curr_qp_state, new_qp_state); 1946 return -EINVAL; 1947 } 1948 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; 1949 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state); 1950 1951 if (!qp->sumem && 1952 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1953 ibdev_dbg(&rdev->ibdev, 1954 "Move QP = %p to flush list\n", qp); 1955 flags = bnxt_re_lock_cqs(qp); 1956 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 1957 bnxt_re_unlock_cqs(qp, flags); 1958 } 1959 if (!qp->sumem && 1960 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 1961 ibdev_dbg(&rdev->ibdev, 1962 "Move QP = %p out of flush list\n", qp); 1963 flags = bnxt_re_lock_cqs(qp); 1964 bnxt_qplib_clean_qp(&qp->qplib_qp); 1965 bnxt_re_unlock_cqs(qp, flags); 1966 } 1967 } 1968 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 1969 qp->qplib_qp.modify_flags |= 1970 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY; 1971 qp->qplib_qp.en_sqd_async_notify = true; 1972 } 1973 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { 1974 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; 1975 qp->qplib_qp.access = 1976 __from_ib_access_flags(qp_attr->qp_access_flags); 1977 /* LOCAL_WRITE access must be set to allow RC receive */ 1978 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 1979 /* Temp: Set all params on QP as of now */ 1980 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; 1981 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; 1982 } 1983 if (qp_attr_mask & IB_QP_PKEY_INDEX) { 1984 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; 1985 qp->qplib_qp.pkey_index = qp_attr->pkey_index; 1986 } 1987 if (qp_attr_mask & IB_QP_QKEY) { 1988 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; 1989 qp->qplib_qp.qkey = qp_attr->qkey; 1990 } 1991 if (qp_attr_mask & IB_QP_AV) { 1992 const struct ib_global_route *grh = 1993 rdma_ah_read_grh(&qp_attr->ah_attr); 1994 const struct ib_gid_attr *sgid_attr; 1995 struct bnxt_re_gid_ctx *ctx; 1996 1997 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | 1998 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | 1999 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | 2000 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | 2001 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | 2002 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | 2003 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; 2004 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw, 2005 sizeof(qp->qplib_qp.ah.dgid.data)); 2006 qp->qplib_qp.ah.flow_label = grh->flow_label; 2007 sgid_attr = grh->sgid_attr; 2008 /* Get the HW context of the GID. The reference 2009 * of GID table entry is already taken by the caller. 2010 */ 2011 ctx = rdma_read_gid_hw_context(sgid_attr); 2012 qp->qplib_qp.ah.sgid_index = ctx->idx; 2013 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; 2014 qp->qplib_qp.ah.hop_limit = grh->hop_limit; 2015 qp->qplib_qp.ah.traffic_class = grh->traffic_class; 2016 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr); 2017 ether_addr_copy(qp->qplib_qp.ah.dmac, 2018 qp_attr->ah_attr.roce.dmac); 2019 2020 rc = rdma_read_gid_l2_fields(sgid_attr, NULL, 2021 &qp->qplib_qp.smac[0]); 2022 if (rc) 2023 return rc; 2024 2025 nw_type = rdma_gid_attr_network_type(sgid_attr); 2026 switch (nw_type) { 2027 case RDMA_NETWORK_IPV4: 2028 qp->qplib_qp.nw_type = 2029 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4; 2030 break; 2031 case RDMA_NETWORK_IPV6: 2032 qp->qplib_qp.nw_type = 2033 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6; 2034 break; 2035 default: 2036 qp->qplib_qp.nw_type = 2037 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1; 2038 break; 2039 } 2040 } 2041 2042 if (qp_attr_mask & IB_QP_PATH_MTU) { 2043 qp->qplib_qp.modify_flags |= 2044 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 2045 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); 2046 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); 2047 } else if (qp_attr->qp_state == IB_QPS_RTR) { 2048 qp->qplib_qp.modify_flags |= 2049 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 2050 qp->qplib_qp.path_mtu = 2051 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); 2052 qp->qplib_qp.mtu = 2053 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 2054 } 2055 2056 if (qp_attr_mask & IB_QP_TIMEOUT) { 2057 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT; 2058 qp->qplib_qp.timeout = qp_attr->timeout; 2059 } 2060 if (qp_attr_mask & IB_QP_RETRY_CNT) { 2061 qp->qplib_qp.modify_flags |= 2062 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT; 2063 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt; 2064 } 2065 if (qp_attr_mask & IB_QP_RNR_RETRY) { 2066 qp->qplib_qp.modify_flags |= 2067 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY; 2068 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry; 2069 } 2070 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) { 2071 qp->qplib_qp.modify_flags |= 2072 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER; 2073 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer; 2074 } 2075 if (qp_attr_mask & IB_QP_RQ_PSN) { 2076 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN; 2077 qp->qplib_qp.rq.psn = qp_attr->rq_psn; 2078 } 2079 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2080 qp->qplib_qp.modify_flags |= 2081 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; 2082 /* Cap the max_rd_atomic to device max */ 2083 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, 2084 dev_attr->max_qp_rd_atom); 2085 } 2086 if (qp_attr_mask & IB_QP_SQ_PSN) { 2087 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; 2088 qp->qplib_qp.sq.psn = qp_attr->sq_psn; 2089 } 2090 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2091 if (qp_attr->max_dest_rd_atomic > 2092 dev_attr->max_qp_init_rd_atom) { 2093 ibdev_err(&rdev->ibdev, 2094 "max_dest_rd_atomic requested%d is > dev_max%d", 2095 qp_attr->max_dest_rd_atomic, 2096 dev_attr->max_qp_init_rd_atom); 2097 return -EINVAL; 2098 } 2099 2100 qp->qplib_qp.modify_flags |= 2101 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; 2102 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; 2103 } 2104 if (qp_attr_mask & IB_QP_CAP) { 2105 qp->qplib_qp.modify_flags |= 2106 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE | 2107 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE | 2108 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE | 2109 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE | 2110 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA; 2111 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) || 2112 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) || 2113 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) || 2114 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || 2115 (qp_attr->cap.max_inline_data >= 2116 dev_attr->max_inline_data)) { 2117 ibdev_err(&rdev->ibdev, 2118 "Create QP failed - max exceeded"); 2119 return -EINVAL; 2120 } 2121 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); 2122 qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 2123 dev_attr->max_qp_wqes + 1); 2124 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - 2125 qp_attr->cap.max_send_wr; 2126 /* 2127 * Reserving one slot for Phantom WQE. Some application can 2128 * post one extra entry in this case. Allowing this to avoid 2129 * unexpected Queue full condition 2130 */ 2131 qp->qplib_qp.sq.q_full_delta -= 1; 2132 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; 2133 if (qp->qplib_qp.rq.max_wqe) { 2134 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); 2135 qp->qplib_qp.rq.max_wqe = 2136 min_t(u32, entries, dev_attr->max_qp_wqes + 1); 2137 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - 2138 qp_attr->cap.max_recv_wr; 2139 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; 2140 } else { 2141 /* SRQ was used prior, just ignore the RQ caps */ 2142 } 2143 } 2144 if (qp_attr_mask & IB_QP_DEST_QPN) { 2145 qp->qplib_qp.modify_flags |= 2146 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID; 2147 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num; 2148 } 2149 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); 2150 if (rc) { 2151 ibdev_err(&rdev->ibdev, "Failed to modify HW QP"); 2152 return rc; 2153 } 2154 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) 2155 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); 2156 return rc; 2157 } 2158 2159 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, 2160 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 2161 { 2162 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2163 struct bnxt_re_dev *rdev = qp->rdev; 2164 struct bnxt_qplib_qp *qplib_qp; 2165 int rc; 2166 2167 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); 2168 if (!qplib_qp) 2169 return -ENOMEM; 2170 2171 qplib_qp->id = qp->qplib_qp.id; 2172 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; 2173 2174 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); 2175 if (rc) { 2176 ibdev_err(&rdev->ibdev, "Failed to query HW QP"); 2177 goto out; 2178 } 2179 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); 2180 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state); 2181 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; 2182 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); 2183 qp_attr->pkey_index = qplib_qp->pkey_index; 2184 qp_attr->qkey = qplib_qp->qkey; 2185 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 2186 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, 2187 qplib_qp->ah.host_sgid_index, 2188 qplib_qp->ah.hop_limit, 2189 qplib_qp->ah.traffic_class); 2190 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); 2191 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); 2192 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); 2193 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); 2194 qp_attr->timeout = qplib_qp->timeout; 2195 qp_attr->retry_cnt = qplib_qp->retry_cnt; 2196 qp_attr->rnr_retry = qplib_qp->rnr_retry; 2197 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; 2198 qp_attr->rq_psn = qplib_qp->rq.psn; 2199 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; 2200 qp_attr->sq_psn = qplib_qp->sq.psn; 2201 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; 2202 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : 2203 IB_SIGNAL_REQ_WR; 2204 qp_attr->dest_qp_num = qplib_qp->dest_qpn; 2205 2206 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; 2207 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; 2208 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe; 2209 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge; 2210 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; 2211 qp_init_attr->cap = qp_attr->cap; 2212 2213 out: 2214 kfree(qplib_qp); 2215 return rc; 2216 } 2217 2218 /* Routine for sending QP1 packets for RoCE V1 an V2 2219 */ 2220 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, 2221 const struct ib_send_wr *wr, 2222 struct bnxt_qplib_swqe *wqe, 2223 int payload_size) 2224 { 2225 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, 2226 ib_ah); 2227 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah; 2228 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr; 2229 struct bnxt_qplib_sge sge; 2230 u8 nw_type; 2231 u16 ether_type; 2232 union ib_gid dgid; 2233 bool is_eth = false; 2234 bool is_vlan = false; 2235 bool is_grh = false; 2236 bool is_udp = false; 2237 u8 ip_version = 0; 2238 u16 vlan_id = 0xFFFF; 2239 void *buf; 2240 int i, rc; 2241 2242 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); 2243 2244 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL); 2245 if (rc) 2246 return rc; 2247 2248 /* Get network header type for this GID */ 2249 nw_type = rdma_gid_attr_network_type(sgid_attr); 2250 switch (nw_type) { 2251 case RDMA_NETWORK_IPV4: 2252 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET; 2253 break; 2254 case RDMA_NETWORK_IPV6: 2255 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET; 2256 break; 2257 default: 2258 nw_type = BNXT_RE_ROCE_V1_PACKET; 2259 break; 2260 } 2261 memcpy(&dgid.raw, &qplib_ah->dgid, 16); 2262 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; 2263 if (is_udp) { 2264 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { 2265 ip_version = 4; 2266 ether_type = ETH_P_IP; 2267 } else { 2268 ip_version = 6; 2269 ether_type = ETH_P_IPV6; 2270 } 2271 is_grh = false; 2272 } else { 2273 ether_type = ETH_P_IBOE; 2274 is_grh = true; 2275 } 2276 2277 is_eth = true; 2278 is_vlan = vlan_id && (vlan_id < 0x1000); 2279 2280 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, 2281 ip_version, is_udp, 0, &qp->qp1_hdr); 2282 2283 /* ETH */ 2284 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac); 2285 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac); 2286 2287 /* For vlan, check the sgid for vlan existence */ 2288 2289 if (!is_vlan) { 2290 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); 2291 } else { 2292 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); 2293 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id); 2294 } 2295 2296 if (is_grh || (ip_version == 6)) { 2297 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, 2298 sizeof(sgid_attr->gid)); 2299 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, 2300 sizeof(sgid_attr->gid)); 2301 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit; 2302 } 2303 2304 if (ip_version == 4) { 2305 qp->qp1_hdr.ip4.tos = 0; 2306 qp->qp1_hdr.ip4.id = 0; 2307 qp->qp1_hdr.ip4.frag_off = htons(IP_DF); 2308 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit; 2309 2310 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); 2311 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4); 2312 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr); 2313 } 2314 2315 if (is_udp) { 2316 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); 2317 qp->qp1_hdr.udp.sport = htons(0x8CD1); 2318 qp->qp1_hdr.udp.csum = 0; 2319 } 2320 2321 /* BTH */ 2322 if (wr->opcode == IB_WR_SEND_WITH_IMM) { 2323 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 2324 qp->qp1_hdr.immediate_present = 1; 2325 } else { 2326 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 2327 } 2328 if (wr->send_flags & IB_SEND_SOLICITED) 2329 qp->qp1_hdr.bth.solicited_event = 1; 2330 /* pad_count */ 2331 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; 2332 2333 /* P_key for QP1 is for all members */ 2334 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); 2335 qp->qp1_hdr.bth.destination_qpn = IB_QP1; 2336 qp->qp1_hdr.bth.ack_req = 0; 2337 qp->send_psn++; 2338 qp->send_psn &= BTH_PSN_MASK; 2339 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); 2340 /* DETH */ 2341 /* Use the priviledged Q_Key for QP1 */ 2342 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); 2343 qp->qp1_hdr.deth.source_qpn = IB_QP1; 2344 2345 /* Pack the QP1 to the transmit buffer */ 2346 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge); 2347 if (buf) { 2348 ib_ud_header_pack(&qp->qp1_hdr, buf); 2349 for (i = wqe->num_sge; i; i--) { 2350 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr; 2351 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey; 2352 wqe->sg_list[i].size = wqe->sg_list[i - 1].size; 2353 } 2354 2355 /* 2356 * Max Header buf size for IPV6 RoCE V2 is 86, 2357 * which is same as the QP1 SQ header buffer. 2358 * Header buf size for IPV4 RoCE V2 can be 66. 2359 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). 2360 * Subtract 20 bytes from QP1 SQ header buf size 2361 */ 2362 if (is_udp && ip_version == 4) 2363 sge.size -= 20; 2364 /* 2365 * Max Header buf size for RoCE V1 is 78. 2366 * ETH(14) + VLAN(4) + GRH(40) + BTH(20). 2367 * Subtract 8 bytes from QP1 SQ header buf size 2368 */ 2369 if (!is_udp) 2370 sge.size -= 8; 2371 2372 /* Subtract 4 bytes for non vlan packets */ 2373 if (!is_vlan) 2374 sge.size -= 4; 2375 2376 wqe->sg_list[0].addr = sge.addr; 2377 wqe->sg_list[0].lkey = sge.lkey; 2378 wqe->sg_list[0].size = sge.size; 2379 wqe->num_sge++; 2380 2381 } else { 2382 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!"); 2383 rc = -ENOMEM; 2384 } 2385 return rc; 2386 } 2387 2388 /* For the MAD layer, it only provides the recv SGE the size of 2389 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH, 2390 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire 2391 * receive packet (334 bytes) with no VLAN and then copy the GRH 2392 * and the MAD datagram out to the provided SGE. 2393 */ 2394 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, 2395 const struct ib_recv_wr *wr, 2396 struct bnxt_qplib_swqe *wqe, 2397 int payload_size) 2398 { 2399 struct bnxt_re_sqp_entries *sqp_entry; 2400 struct bnxt_qplib_sge ref, sge; 2401 struct bnxt_re_dev *rdev; 2402 u32 rq_prod_index; 2403 2404 rdev = qp->rdev; 2405 2406 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); 2407 2408 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) 2409 return -ENOMEM; 2410 2411 /* Create 1 SGE to receive the entire 2412 * ethernet packet 2413 */ 2414 /* Save the reference from ULP */ 2415 ref.addr = wqe->sg_list[0].addr; 2416 ref.lkey = wqe->sg_list[0].lkey; 2417 ref.size = wqe->sg_list[0].size; 2418 2419 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; 2420 2421 /* SGE 1 */ 2422 wqe->sg_list[0].addr = sge.addr; 2423 wqe->sg_list[0].lkey = sge.lkey; 2424 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; 2425 sge.size -= wqe->sg_list[0].size; 2426 2427 sqp_entry->sge.addr = ref.addr; 2428 sqp_entry->sge.lkey = ref.lkey; 2429 sqp_entry->sge.size = ref.size; 2430 /* Store the wrid for reporting completion */ 2431 sqp_entry->wrid = wqe->wr_id; 2432 /* change the wqe->wrid to table index */ 2433 wqe->wr_id = rq_prod_index; 2434 return 0; 2435 } 2436 2437 static int is_ud_qp(struct bnxt_re_qp *qp) 2438 { 2439 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || 2440 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); 2441 } 2442 2443 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, 2444 const struct ib_send_wr *wr, 2445 struct bnxt_qplib_swqe *wqe) 2446 { 2447 struct bnxt_re_ah *ah = NULL; 2448 2449 if (is_ud_qp(qp)) { 2450 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah); 2451 wqe->send.q_key = ud_wr(wr)->remote_qkey; 2452 wqe->send.dst_qp = ud_wr(wr)->remote_qpn; 2453 wqe->send.avid = ah->qplib_ah.id; 2454 } 2455 switch (wr->opcode) { 2456 case IB_WR_SEND: 2457 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND; 2458 break; 2459 case IB_WR_SEND_WITH_IMM: 2460 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; 2461 wqe->send.imm_data = wr->ex.imm_data; 2462 break; 2463 case IB_WR_SEND_WITH_INV: 2464 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; 2465 wqe->send.inv_key = wr->ex.invalidate_rkey; 2466 break; 2467 default: 2468 return -EINVAL; 2469 } 2470 if (wr->send_flags & IB_SEND_SIGNALED) 2471 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2472 if (wr->send_flags & IB_SEND_FENCE) 2473 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2474 if (wr->send_flags & IB_SEND_SOLICITED) 2475 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2476 if (wr->send_flags & IB_SEND_INLINE) 2477 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; 2478 2479 return 0; 2480 } 2481 2482 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr, 2483 struct bnxt_qplib_swqe *wqe) 2484 { 2485 switch (wr->opcode) { 2486 case IB_WR_RDMA_WRITE: 2487 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE; 2488 break; 2489 case IB_WR_RDMA_WRITE_WITH_IMM: 2490 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; 2491 wqe->rdma.imm_data = wr->ex.imm_data; 2492 break; 2493 case IB_WR_RDMA_READ: 2494 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; 2495 wqe->rdma.inv_key = wr->ex.invalidate_rkey; 2496 break; 2497 default: 2498 return -EINVAL; 2499 } 2500 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr; 2501 wqe->rdma.r_key = rdma_wr(wr)->rkey; 2502 if (wr->send_flags & IB_SEND_SIGNALED) 2503 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2504 if (wr->send_flags & IB_SEND_FENCE) 2505 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2506 if (wr->send_flags & IB_SEND_SOLICITED) 2507 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2508 if (wr->send_flags & IB_SEND_INLINE) 2509 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; 2510 2511 return 0; 2512 } 2513 2514 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr, 2515 struct bnxt_qplib_swqe *wqe) 2516 { 2517 switch (wr->opcode) { 2518 case IB_WR_ATOMIC_CMP_AND_SWP: 2519 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; 2520 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; 2521 wqe->atomic.swap_data = atomic_wr(wr)->swap; 2522 break; 2523 case IB_WR_ATOMIC_FETCH_AND_ADD: 2524 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD; 2525 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; 2526 break; 2527 default: 2528 return -EINVAL; 2529 } 2530 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr; 2531 wqe->atomic.r_key = atomic_wr(wr)->rkey; 2532 if (wr->send_flags & IB_SEND_SIGNALED) 2533 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2534 if (wr->send_flags & IB_SEND_FENCE) 2535 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2536 if (wr->send_flags & IB_SEND_SOLICITED) 2537 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2538 return 0; 2539 } 2540 2541 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr, 2542 struct bnxt_qplib_swqe *wqe) 2543 { 2544 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; 2545 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; 2546 2547 /* Need unconditional fence for local invalidate 2548 * opcode to work as expected. 2549 */ 2550 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2551 2552 if (wr->send_flags & IB_SEND_SIGNALED) 2553 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2554 if (wr->send_flags & IB_SEND_SOLICITED) 2555 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2556 2557 return 0; 2558 } 2559 2560 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, 2561 struct bnxt_qplib_swqe *wqe) 2562 { 2563 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr); 2564 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl; 2565 int access = wr->access; 2566 2567 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; 2568 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; 2569 wqe->frmr.page_list = mr->pages; 2570 wqe->frmr.page_list_len = mr->npages; 2571 wqe->frmr.levels = qplib_frpl->hwq.level; 2572 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; 2573 2574 /* Need unconditional fence for reg_mr 2575 * opcode to function as expected. 2576 */ 2577 2578 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2579 2580 if (wr->wr.send_flags & IB_SEND_SIGNALED) 2581 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2582 2583 if (access & IB_ACCESS_LOCAL_WRITE) 2584 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; 2585 if (access & IB_ACCESS_REMOTE_READ) 2586 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ; 2587 if (access & IB_ACCESS_REMOTE_WRITE) 2588 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE; 2589 if (access & IB_ACCESS_REMOTE_ATOMIC) 2590 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC; 2591 if (access & IB_ACCESS_MW_BIND) 2592 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND; 2593 2594 wqe->frmr.l_key = wr->key; 2595 wqe->frmr.length = wr->mr->length; 2596 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K); 2597 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K); 2598 wqe->frmr.va = wr->mr->iova; 2599 return 0; 2600 } 2601 2602 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, 2603 const struct ib_send_wr *wr, 2604 struct bnxt_qplib_swqe *wqe) 2605 { 2606 /* Copy the inline data to the data field */ 2607 u8 *in_data; 2608 u32 i, sge_len; 2609 void *sge_addr; 2610 2611 in_data = wqe->inline_data; 2612 for (i = 0; i < wr->num_sge; i++) { 2613 sge_addr = (void *)(unsigned long) 2614 wr->sg_list[i].addr; 2615 sge_len = wr->sg_list[i].length; 2616 2617 if ((sge_len + wqe->inline_len) > 2618 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { 2619 ibdev_err(&rdev->ibdev, 2620 "Inline data size requested > supported value"); 2621 return -EINVAL; 2622 } 2623 sge_len = wr->sg_list[i].length; 2624 2625 memcpy(in_data, sge_addr, sge_len); 2626 in_data += wr->sg_list[i].length; 2627 wqe->inline_len += wr->sg_list[i].length; 2628 } 2629 return wqe->inline_len; 2630 } 2631 2632 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, 2633 const struct ib_send_wr *wr, 2634 struct bnxt_qplib_swqe *wqe) 2635 { 2636 int payload_sz = 0; 2637 2638 if (wr->send_flags & IB_SEND_INLINE) 2639 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe); 2640 else 2641 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list, 2642 wqe->num_sge); 2643 2644 return payload_sz; 2645 } 2646 2647 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) 2648 { 2649 if ((qp->ib_qp.qp_type == IB_QPT_UD || 2650 qp->ib_qp.qp_type == IB_QPT_GSI || 2651 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && 2652 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { 2653 int qp_attr_mask; 2654 struct ib_qp_attr qp_attr; 2655 2656 qp_attr_mask = IB_QP_STATE; 2657 qp_attr.qp_state = IB_QPS_RTS; 2658 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); 2659 qp->qplib_qp.wqe_cnt = 0; 2660 } 2661 } 2662 2663 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, 2664 struct bnxt_re_qp *qp, 2665 const struct ib_send_wr *wr) 2666 { 2667 int rc = 0, payload_sz = 0; 2668 unsigned long flags; 2669 2670 spin_lock_irqsave(&qp->sq_lock, flags); 2671 while (wr) { 2672 struct bnxt_qplib_swqe wqe = {}; 2673 2674 /* Common */ 2675 wqe.num_sge = wr->num_sge; 2676 if (wr->num_sge > qp->qplib_qp.sq.max_sge) { 2677 ibdev_err(&rdev->ibdev, 2678 "Limit exceeded for Send SGEs"); 2679 rc = -EINVAL; 2680 goto bad; 2681 } 2682 2683 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe); 2684 if (payload_sz < 0) { 2685 rc = -EINVAL; 2686 goto bad; 2687 } 2688 wqe.wr_id = wr->wr_id; 2689 2690 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND; 2691 2692 rc = bnxt_re_build_send_wqe(qp, wr, &wqe); 2693 if (!rc) 2694 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 2695 bad: 2696 if (rc) { 2697 ibdev_err(&rdev->ibdev, 2698 "Post send failed opcode = %#x rc = %d", 2699 wr->opcode, rc); 2700 break; 2701 } 2702 wr = wr->next; 2703 } 2704 bnxt_qplib_post_send_db(&qp->qplib_qp); 2705 bnxt_ud_qp_hw_stall_workaround(qp); 2706 spin_unlock_irqrestore(&qp->sq_lock, flags); 2707 return rc; 2708 } 2709 2710 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, 2711 const struct ib_send_wr **bad_wr) 2712 { 2713 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2714 struct bnxt_qplib_swqe wqe; 2715 int rc = 0, payload_sz = 0; 2716 unsigned long flags; 2717 2718 spin_lock_irqsave(&qp->sq_lock, flags); 2719 while (wr) { 2720 /* House keeping */ 2721 memset(&wqe, 0, sizeof(wqe)); 2722 2723 /* Common */ 2724 wqe.num_sge = wr->num_sge; 2725 if (wr->num_sge > qp->qplib_qp.sq.max_sge) { 2726 ibdev_err(&qp->rdev->ibdev, 2727 "Limit exceeded for Send SGEs"); 2728 rc = -EINVAL; 2729 goto bad; 2730 } 2731 2732 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe); 2733 if (payload_sz < 0) { 2734 rc = -EINVAL; 2735 goto bad; 2736 } 2737 wqe.wr_id = wr->wr_id; 2738 2739 switch (wr->opcode) { 2740 case IB_WR_SEND: 2741 case IB_WR_SEND_WITH_IMM: 2742 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) { 2743 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe, 2744 payload_sz); 2745 if (rc) 2746 goto bad; 2747 wqe.rawqp1.lflags |= 2748 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; 2749 } 2750 switch (wr->send_flags) { 2751 case IB_SEND_IP_CSUM: 2752 wqe.rawqp1.lflags |= 2753 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; 2754 break; 2755 default: 2756 break; 2757 } 2758 fallthrough; 2759 case IB_WR_SEND_WITH_INV: 2760 rc = bnxt_re_build_send_wqe(qp, wr, &wqe); 2761 break; 2762 case IB_WR_RDMA_WRITE: 2763 case IB_WR_RDMA_WRITE_WITH_IMM: 2764 case IB_WR_RDMA_READ: 2765 rc = bnxt_re_build_rdma_wqe(wr, &wqe); 2766 break; 2767 case IB_WR_ATOMIC_CMP_AND_SWP: 2768 case IB_WR_ATOMIC_FETCH_AND_ADD: 2769 rc = bnxt_re_build_atomic_wqe(wr, &wqe); 2770 break; 2771 case IB_WR_RDMA_READ_WITH_INV: 2772 ibdev_err(&qp->rdev->ibdev, 2773 "RDMA Read with Invalidate is not supported"); 2774 rc = -EINVAL; 2775 goto bad; 2776 case IB_WR_LOCAL_INV: 2777 rc = bnxt_re_build_inv_wqe(wr, &wqe); 2778 break; 2779 case IB_WR_REG_MR: 2780 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe); 2781 break; 2782 default: 2783 /* Unsupported WRs */ 2784 ibdev_err(&qp->rdev->ibdev, 2785 "WR (%#x) is not supported", wr->opcode); 2786 rc = -EINVAL; 2787 goto bad; 2788 } 2789 if (!rc) 2790 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 2791 bad: 2792 if (rc) { 2793 ibdev_err(&qp->rdev->ibdev, 2794 "post_send failed op:%#x qps = %#x rc = %d\n", 2795 wr->opcode, qp->qplib_qp.state, rc); 2796 *bad_wr = wr; 2797 break; 2798 } 2799 wr = wr->next; 2800 } 2801 bnxt_qplib_post_send_db(&qp->qplib_qp); 2802 bnxt_ud_qp_hw_stall_workaround(qp); 2803 spin_unlock_irqrestore(&qp->sq_lock, flags); 2804 2805 return rc; 2806 } 2807 2808 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, 2809 struct bnxt_re_qp *qp, 2810 const struct ib_recv_wr *wr) 2811 { 2812 struct bnxt_qplib_swqe wqe; 2813 int rc = 0; 2814 2815 while (wr) { 2816 /* House keeping */ 2817 memset(&wqe, 0, sizeof(wqe)); 2818 2819 /* Common */ 2820 wqe.num_sge = wr->num_sge; 2821 if (wr->num_sge > qp->qplib_qp.rq.max_sge) { 2822 ibdev_err(&rdev->ibdev, 2823 "Limit exceeded for Receive SGEs"); 2824 rc = -EINVAL; 2825 break; 2826 } 2827 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); 2828 wqe.wr_id = wr->wr_id; 2829 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; 2830 2831 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); 2832 if (rc) 2833 break; 2834 2835 wr = wr->next; 2836 } 2837 if (!rc) 2838 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2839 return rc; 2840 } 2841 2842 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, 2843 const struct ib_recv_wr **bad_wr) 2844 { 2845 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2846 struct bnxt_qplib_swqe wqe; 2847 int rc = 0, payload_sz = 0; 2848 unsigned long flags; 2849 u32 count = 0; 2850 2851 spin_lock_irqsave(&qp->rq_lock, flags); 2852 while (wr) { 2853 /* House keeping */ 2854 memset(&wqe, 0, sizeof(wqe)); 2855 2856 /* Common */ 2857 wqe.num_sge = wr->num_sge; 2858 if (wr->num_sge > qp->qplib_qp.rq.max_sge) { 2859 ibdev_err(&qp->rdev->ibdev, 2860 "Limit exceeded for Receive SGEs"); 2861 rc = -EINVAL; 2862 *bad_wr = wr; 2863 break; 2864 } 2865 2866 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, 2867 wr->num_sge); 2868 wqe.wr_id = wr->wr_id; 2869 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; 2870 2871 if (ib_qp->qp_type == IB_QPT_GSI && 2872 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI) 2873 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe, 2874 payload_sz); 2875 if (!rc) 2876 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); 2877 if (rc) { 2878 *bad_wr = wr; 2879 break; 2880 } 2881 2882 /* Ring DB if the RQEs posted reaches a threshold value */ 2883 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { 2884 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2885 count = 0; 2886 } 2887 2888 wr = wr->next; 2889 } 2890 2891 if (count) 2892 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2893 2894 spin_unlock_irqrestore(&qp->rq_lock, flags); 2895 2896 return rc; 2897 } 2898 2899 /* Completion Queues */ 2900 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) 2901 { 2902 struct bnxt_re_cq *cq; 2903 struct bnxt_qplib_nq *nq; 2904 struct bnxt_re_dev *rdev; 2905 2906 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 2907 rdev = cq->rdev; 2908 nq = cq->qplib_cq.nq; 2909 2910 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); 2911 ib_umem_release(cq->umem); 2912 2913 atomic_dec(&rdev->stats.res.cq_count); 2914 nq->budget--; 2915 kfree(cq->cql); 2916 return 0; 2917 } 2918 2919 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 2920 struct ib_udata *udata) 2921 { 2922 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); 2923 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 2924 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); 2925 int rc, entries; 2926 int cqe = attr->cqe; 2927 struct bnxt_qplib_nq *nq = NULL; 2928 unsigned int nq_alloc_cnt; 2929 u32 active_cqs; 2930 2931 if (attr->flags) 2932 return -EOPNOTSUPP; 2933 2934 /* Validate CQ fields */ 2935 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { 2936 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded"); 2937 return -EINVAL; 2938 } 2939 2940 cq->rdev = rdev; 2941 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); 2942 2943 entries = roundup_pow_of_two(cqe + 1); 2944 if (entries > dev_attr->max_cq_wqes + 1) 2945 entries = dev_attr->max_cq_wqes + 1; 2946 2947 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; 2948 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; 2949 if (udata) { 2950 struct bnxt_re_cq_req req; 2951 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( 2952 udata, struct bnxt_re_ucontext, ib_uctx); 2953 if (ib_copy_from_udata(&req, udata, sizeof(req))) { 2954 rc = -EFAULT; 2955 goto fail; 2956 } 2957 2958 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, 2959 entries * sizeof(struct cq_base), 2960 IB_ACCESS_LOCAL_WRITE); 2961 if (IS_ERR(cq->umem)) { 2962 rc = PTR_ERR(cq->umem); 2963 goto fail; 2964 } 2965 cq->qplib_cq.sg_info.umem = cq->umem; 2966 cq->qplib_cq.dpi = &uctx->dpi; 2967 } else { 2968 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 2969 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), 2970 GFP_KERNEL); 2971 if (!cq->cql) { 2972 rc = -ENOMEM; 2973 goto fail; 2974 } 2975 2976 cq->qplib_cq.dpi = &rdev->dpi_privileged; 2977 } 2978 /* 2979 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a 2980 * used for getting the NQ index. 2981 */ 2982 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt); 2983 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)]; 2984 cq->qplib_cq.max_wqe = entries; 2985 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id; 2986 cq->qplib_cq.nq = nq; 2987 2988 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); 2989 if (rc) { 2990 ibdev_err(&rdev->ibdev, "Failed to create HW CQ"); 2991 goto fail; 2992 } 2993 2994 cq->ib_cq.cqe = entries; 2995 cq->cq_period = cq->qplib_cq.period; 2996 nq->budget++; 2997 2998 active_cqs = atomic_inc_return(&rdev->stats.res.cq_count); 2999 if (active_cqs > rdev->stats.res.cq_watermark) 3000 rdev->stats.res.cq_watermark = active_cqs; 3001 spin_lock_init(&cq->cq_lock); 3002 3003 if (udata) { 3004 struct bnxt_re_cq_resp resp; 3005 3006 resp.cqid = cq->qplib_cq.id; 3007 resp.tail = cq->qplib_cq.hwq.cons; 3008 resp.phase = cq->qplib_cq.period; 3009 resp.rsvd = 0; 3010 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 3011 if (rc) { 3012 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata"); 3013 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); 3014 goto c2fail; 3015 } 3016 } 3017 3018 return 0; 3019 3020 c2fail: 3021 ib_umem_release(cq->umem); 3022 fail: 3023 kfree(cq->cql); 3024 return rc; 3025 } 3026 3027 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq) 3028 { 3029 struct bnxt_re_dev *rdev = cq->rdev; 3030 3031 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq); 3032 3033 cq->qplib_cq.max_wqe = cq->resize_cqe; 3034 if (cq->resize_umem) { 3035 ib_umem_release(cq->umem); 3036 cq->umem = cq->resize_umem; 3037 cq->resize_umem = NULL; 3038 cq->resize_cqe = 0; 3039 } 3040 } 3041 3042 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) 3043 { 3044 struct bnxt_qplib_sg_info sg_info = {}; 3045 struct bnxt_qplib_dpi *orig_dpi = NULL; 3046 struct bnxt_qplib_dev_attr *dev_attr; 3047 struct bnxt_re_ucontext *uctx = NULL; 3048 struct bnxt_re_resize_cq_req req; 3049 struct bnxt_re_dev *rdev; 3050 struct bnxt_re_cq *cq; 3051 int rc, entries; 3052 3053 cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); 3054 rdev = cq->rdev; 3055 dev_attr = &rdev->dev_attr; 3056 if (!ibcq->uobject) { 3057 ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported"); 3058 return -EOPNOTSUPP; 3059 } 3060 3061 if (cq->resize_umem) { 3062 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy", 3063 cq->qplib_cq.id); 3064 return -EBUSY; 3065 } 3066 3067 /* Check the requested cq depth out of supported depth */ 3068 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { 3069 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d", 3070 cq->qplib_cq.id, cqe); 3071 return -EINVAL; 3072 } 3073 3074 entries = roundup_pow_of_two(cqe + 1); 3075 if (entries > dev_attr->max_cq_wqes + 1) 3076 entries = dev_attr->max_cq_wqes + 1; 3077 3078 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, 3079 ib_uctx); 3080 /* uverbs consumer */ 3081 if (ib_copy_from_udata(&req, udata, sizeof(req))) { 3082 rc = -EFAULT; 3083 goto fail; 3084 } 3085 3086 cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va, 3087 entries * sizeof(struct cq_base), 3088 IB_ACCESS_LOCAL_WRITE); 3089 if (IS_ERR(cq->resize_umem)) { 3090 rc = PTR_ERR(cq->resize_umem); 3091 cq->resize_umem = NULL; 3092 ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n", 3093 __func__, rc); 3094 goto fail; 3095 } 3096 cq->resize_cqe = entries; 3097 memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info)); 3098 orig_dpi = cq->qplib_cq.dpi; 3099 3100 cq->qplib_cq.sg_info.umem = cq->resize_umem; 3101 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; 3102 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; 3103 cq->qplib_cq.dpi = &uctx->dpi; 3104 3105 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries); 3106 if (rc) { 3107 ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!", 3108 cq->qplib_cq.id); 3109 goto fail; 3110 } 3111 3112 cq->ib_cq.cqe = cq->resize_cqe; 3113 atomic_inc(&rdev->stats.res.resize_count); 3114 3115 return 0; 3116 3117 fail: 3118 if (cq->resize_umem) { 3119 ib_umem_release(cq->resize_umem); 3120 cq->resize_umem = NULL; 3121 cq->resize_cqe = 0; 3122 memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info)); 3123 cq->qplib_cq.dpi = orig_dpi; 3124 } 3125 return rc; 3126 } 3127 3128 static u8 __req_to_ib_wc_status(u8 qstatus) 3129 { 3130 switch (qstatus) { 3131 case CQ_REQ_STATUS_OK: 3132 return IB_WC_SUCCESS; 3133 case CQ_REQ_STATUS_BAD_RESPONSE_ERR: 3134 return IB_WC_BAD_RESP_ERR; 3135 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR: 3136 return IB_WC_LOC_LEN_ERR; 3137 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR: 3138 return IB_WC_LOC_QP_OP_ERR; 3139 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR: 3140 return IB_WC_LOC_PROT_ERR; 3141 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR: 3142 return IB_WC_GENERAL_ERR; 3143 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR: 3144 return IB_WC_REM_INV_REQ_ERR; 3145 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR: 3146 return IB_WC_REM_ACCESS_ERR; 3147 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR: 3148 return IB_WC_REM_OP_ERR; 3149 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR: 3150 return IB_WC_RNR_RETRY_EXC_ERR; 3151 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR: 3152 return IB_WC_RETRY_EXC_ERR; 3153 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR: 3154 return IB_WC_WR_FLUSH_ERR; 3155 default: 3156 return IB_WC_GENERAL_ERR; 3157 } 3158 return 0; 3159 } 3160 3161 static u8 __rawqp1_to_ib_wc_status(u8 qstatus) 3162 { 3163 switch (qstatus) { 3164 case CQ_RES_RAWETH_QP1_STATUS_OK: 3165 return IB_WC_SUCCESS; 3166 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR: 3167 return IB_WC_LOC_ACCESS_ERR; 3168 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR: 3169 return IB_WC_LOC_LEN_ERR; 3170 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR: 3171 return IB_WC_LOC_PROT_ERR; 3172 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR: 3173 return IB_WC_LOC_QP_OP_ERR; 3174 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR: 3175 return IB_WC_GENERAL_ERR; 3176 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR: 3177 return IB_WC_WR_FLUSH_ERR; 3178 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR: 3179 return IB_WC_WR_FLUSH_ERR; 3180 default: 3181 return IB_WC_GENERAL_ERR; 3182 } 3183 } 3184 3185 static u8 __rc_to_ib_wc_status(u8 qstatus) 3186 { 3187 switch (qstatus) { 3188 case CQ_RES_RC_STATUS_OK: 3189 return IB_WC_SUCCESS; 3190 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR: 3191 return IB_WC_LOC_ACCESS_ERR; 3192 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR: 3193 return IB_WC_LOC_LEN_ERR; 3194 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR: 3195 return IB_WC_LOC_PROT_ERR; 3196 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR: 3197 return IB_WC_LOC_QP_OP_ERR; 3198 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR: 3199 return IB_WC_GENERAL_ERR; 3200 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR: 3201 return IB_WC_REM_INV_REQ_ERR; 3202 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR: 3203 return IB_WC_WR_FLUSH_ERR; 3204 case CQ_RES_RC_STATUS_HW_FLUSH_ERR: 3205 return IB_WC_WR_FLUSH_ERR; 3206 default: 3207 return IB_WC_GENERAL_ERR; 3208 } 3209 } 3210 3211 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) 3212 { 3213 switch (cqe->type) { 3214 case BNXT_QPLIB_SWQE_TYPE_SEND: 3215 wc->opcode = IB_WC_SEND; 3216 break; 3217 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: 3218 wc->opcode = IB_WC_SEND; 3219 wc->wc_flags |= IB_WC_WITH_IMM; 3220 break; 3221 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: 3222 wc->opcode = IB_WC_SEND; 3223 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 3224 break; 3225 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: 3226 wc->opcode = IB_WC_RDMA_WRITE; 3227 break; 3228 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: 3229 wc->opcode = IB_WC_RDMA_WRITE; 3230 wc->wc_flags |= IB_WC_WITH_IMM; 3231 break; 3232 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: 3233 wc->opcode = IB_WC_RDMA_READ; 3234 break; 3235 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: 3236 wc->opcode = IB_WC_COMP_SWAP; 3237 break; 3238 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: 3239 wc->opcode = IB_WC_FETCH_ADD; 3240 break; 3241 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: 3242 wc->opcode = IB_WC_LOCAL_INV; 3243 break; 3244 case BNXT_QPLIB_SWQE_TYPE_REG_MR: 3245 wc->opcode = IB_WC_REG_MR; 3246 break; 3247 default: 3248 wc->opcode = IB_WC_SEND; 3249 break; 3250 } 3251 3252 wc->status = __req_to_ib_wc_status(cqe->status); 3253 } 3254 3255 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, 3256 u16 raweth_qp1_flags2) 3257 { 3258 bool is_ipv6 = false, is_ipv4 = false; 3259 3260 /* raweth_qp1_flags Bit 9-6 indicates itype */ 3261 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) 3262 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) 3263 return -1; 3264 3265 if (raweth_qp1_flags2 & 3266 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC && 3267 raweth_qp1_flags2 & 3268 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) { 3269 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */ 3270 (raweth_qp1_flags2 & 3271 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ? 3272 (is_ipv6 = true) : (is_ipv4 = true); 3273 return ((is_ipv6) ? 3274 BNXT_RE_ROCEV2_IPV6_PACKET : 3275 BNXT_RE_ROCEV2_IPV4_PACKET); 3276 } else { 3277 return BNXT_RE_ROCE_V1_PACKET; 3278 } 3279 } 3280 3281 static int bnxt_re_to_ib_nw_type(int nw_type) 3282 { 3283 u8 nw_hdr_type = 0xFF; 3284 3285 switch (nw_type) { 3286 case BNXT_RE_ROCE_V1_PACKET: 3287 nw_hdr_type = RDMA_NETWORK_ROCE_V1; 3288 break; 3289 case BNXT_RE_ROCEV2_IPV4_PACKET: 3290 nw_hdr_type = RDMA_NETWORK_IPV4; 3291 break; 3292 case BNXT_RE_ROCEV2_IPV6_PACKET: 3293 nw_hdr_type = RDMA_NETWORK_IPV6; 3294 break; 3295 } 3296 return nw_hdr_type; 3297 } 3298 3299 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, 3300 void *rq_hdr_buf) 3301 { 3302 u8 *tmp_buf = NULL; 3303 struct ethhdr *eth_hdr; 3304 u16 eth_type; 3305 bool rc = false; 3306 3307 tmp_buf = (u8 *)rq_hdr_buf; 3308 /* 3309 * If dest mac is not same as I/F mac, this could be a 3310 * loopback address or multicast address, check whether 3311 * it is a loopback packet 3312 */ 3313 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) { 3314 tmp_buf += 4; 3315 /* Check the ether type */ 3316 eth_hdr = (struct ethhdr *)tmp_buf; 3317 eth_type = ntohs(eth_hdr->h_proto); 3318 switch (eth_type) { 3319 case ETH_P_IBOE: 3320 rc = true; 3321 break; 3322 case ETH_P_IP: 3323 case ETH_P_IPV6: { 3324 u32 len; 3325 struct udphdr *udp_hdr; 3326 3327 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) : 3328 sizeof(struct ipv6hdr)); 3329 tmp_buf += sizeof(struct ethhdr) + len; 3330 udp_hdr = (struct udphdr *)tmp_buf; 3331 if (ntohs(udp_hdr->dest) == 3332 ROCE_V2_UDP_DPORT) 3333 rc = true; 3334 break; 3335 } 3336 default: 3337 break; 3338 } 3339 } 3340 3341 return rc; 3342 } 3343 3344 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, 3345 struct bnxt_qplib_cqe *cqe) 3346 { 3347 struct bnxt_re_dev *rdev = gsi_qp->rdev; 3348 struct bnxt_re_sqp_entries *sqp_entry = NULL; 3349 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; 3350 dma_addr_t shrq_hdr_buf_map; 3351 struct ib_sge s_sge[2] = {}; 3352 struct ib_sge r_sge[2] = {}; 3353 struct bnxt_re_ah *gsi_sah; 3354 struct ib_recv_wr rwr = {}; 3355 dma_addr_t rq_hdr_buf_map; 3356 struct ib_ud_wr udwr = {}; 3357 struct ib_send_wr *swr; 3358 u32 skip_bytes = 0; 3359 int pkt_type = 0; 3360 void *rq_hdr_buf; 3361 u32 offset = 0; 3362 u32 tbl_idx; 3363 int rc; 3364 3365 swr = &udwr.wr; 3366 tbl_idx = cqe->wr_id; 3367 3368 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf + 3369 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size); 3370 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, 3371 tbl_idx); 3372 3373 /* Shadow QP header buffer */ 3374 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, 3375 tbl_idx); 3376 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; 3377 3378 /* Store this cqe */ 3379 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); 3380 sqp_entry->qp1_qp = gsi_qp; 3381 3382 /* Find packet type from the cqe */ 3383 3384 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags, 3385 cqe->raweth_qp1_flags2); 3386 if (pkt_type < 0) { 3387 ibdev_err(&rdev->ibdev, "Invalid packet\n"); 3388 return -EINVAL; 3389 } 3390 3391 /* Adjust the offset for the user buffer and post in the rq */ 3392 3393 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET) 3394 offset = 20; 3395 3396 /* 3397 * QP1 loopback packet has 4 bytes of internal header before 3398 * ether header. Skip these four bytes. 3399 */ 3400 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf)) 3401 skip_bytes = 4; 3402 3403 /* First send SGE . Skip the ether header*/ 3404 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 3405 + skip_bytes; 3406 s_sge[0].lkey = 0xFFFFFFFF; 3407 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 : 3408 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; 3409 3410 /* Second Send SGE */ 3411 s_sge[1].addr = s_sge[0].addr + s_sge[0].length + 3412 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE; 3413 if (pkt_type != BNXT_RE_ROCE_V1_PACKET) 3414 s_sge[1].addr += 8; 3415 s_sge[1].lkey = 0xFFFFFFFF; 3416 s_sge[1].length = 256; 3417 3418 /* First recv SGE */ 3419 3420 r_sge[0].addr = shrq_hdr_buf_map; 3421 r_sge[0].lkey = 0xFFFFFFFF; 3422 r_sge[0].length = 40; 3423 3424 r_sge[1].addr = sqp_entry->sge.addr + offset; 3425 r_sge[1].lkey = sqp_entry->sge.lkey; 3426 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset; 3427 3428 /* Create receive work request */ 3429 rwr.num_sge = 2; 3430 rwr.sg_list = r_sge; 3431 rwr.wr_id = tbl_idx; 3432 rwr.next = NULL; 3433 3434 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); 3435 if (rc) { 3436 ibdev_err(&rdev->ibdev, 3437 "Failed to post Rx buffers to shadow QP"); 3438 return -ENOMEM; 3439 } 3440 3441 swr->num_sge = 2; 3442 swr->sg_list = s_sge; 3443 swr->wr_id = tbl_idx; 3444 swr->opcode = IB_WR_SEND; 3445 swr->next = NULL; 3446 gsi_sah = rdev->gsi_ctx.gsi_sah; 3447 udwr.ah = &gsi_sah->ib_ah; 3448 udwr.remote_qpn = gsi_sqp->qplib_qp.id; 3449 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; 3450 3451 /* post data received in the send queue */ 3452 return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); 3453 } 3454 3455 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, 3456 struct bnxt_qplib_cqe *cqe) 3457 { 3458 wc->opcode = IB_WC_RECV; 3459 wc->status = __rawqp1_to_ib_wc_status(cqe->status); 3460 wc->wc_flags |= IB_WC_GRH; 3461 } 3462 3463 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, 3464 u16 vlan_id) 3465 { 3466 /* 3467 * Check if the vlan is configured in the host. If not configured, it 3468 * can be a transparent VLAN. So dont report the vlan id. 3469 */ 3470 if (!__vlan_find_dev_deep_rcu(rdev->netdev, 3471 htons(ETH_P_8021Q), vlan_id)) 3472 return false; 3473 return true; 3474 } 3475 3476 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, 3477 u16 *vid, u8 *sl) 3478 { 3479 bool ret = false; 3480 u32 metadata; 3481 u16 tpid; 3482 3483 metadata = orig_cqe->raweth_qp1_metadata; 3484 if (orig_cqe->raweth_qp1_flags2 & 3485 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) { 3486 tpid = ((metadata & 3487 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >> 3488 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT); 3489 if (tpid == ETH_P_8021Q) { 3490 *vid = metadata & 3491 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK; 3492 *sl = (metadata & 3493 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >> 3494 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT; 3495 ret = true; 3496 } 3497 } 3498 3499 return ret; 3500 } 3501 3502 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, 3503 struct bnxt_qplib_cqe *cqe) 3504 { 3505 wc->opcode = IB_WC_RECV; 3506 wc->status = __rc_to_ib_wc_status(cqe->status); 3507 3508 if (cqe->flags & CQ_RES_RC_FLAGS_IMM) 3509 wc->wc_flags |= IB_WC_WITH_IMM; 3510 if (cqe->flags & CQ_RES_RC_FLAGS_INV) 3511 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 3512 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) == 3513 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) 3514 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 3515 } 3516 3517 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, 3518 struct ib_wc *wc, 3519 struct bnxt_qplib_cqe *cqe) 3520 { 3521 struct bnxt_re_dev *rdev = gsi_sqp->rdev; 3522 struct bnxt_re_qp *gsi_qp = NULL; 3523 struct bnxt_qplib_cqe *orig_cqe = NULL; 3524 struct bnxt_re_sqp_entries *sqp_entry = NULL; 3525 int nw_type; 3526 u32 tbl_idx; 3527 u16 vlan_id; 3528 u8 sl; 3529 3530 tbl_idx = cqe->wr_id; 3531 3532 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; 3533 gsi_qp = sqp_entry->qp1_qp; 3534 orig_cqe = &sqp_entry->cqe; 3535 3536 wc->wr_id = sqp_entry->wrid; 3537 wc->byte_len = orig_cqe->length; 3538 wc->qp = &gsi_qp->ib_qp; 3539 3540 wc->ex.imm_data = orig_cqe->immdata; 3541 wc->src_qp = orig_cqe->src_qp; 3542 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); 3543 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { 3544 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { 3545 wc->vlan_id = vlan_id; 3546 wc->sl = sl; 3547 wc->wc_flags |= IB_WC_WITH_VLAN; 3548 } 3549 } 3550 wc->port_num = 1; 3551 wc->vendor_err = orig_cqe->status; 3552 3553 wc->opcode = IB_WC_RECV; 3554 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status); 3555 wc->wc_flags |= IB_WC_GRH; 3556 3557 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags, 3558 orig_cqe->raweth_qp1_flags2); 3559 if (nw_type >= 0) { 3560 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); 3561 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 3562 } 3563 } 3564 3565 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, 3566 struct ib_wc *wc, 3567 struct bnxt_qplib_cqe *cqe) 3568 { 3569 struct bnxt_re_dev *rdev; 3570 u16 vlan_id = 0; 3571 u8 nw_type; 3572 3573 rdev = qp->rdev; 3574 wc->opcode = IB_WC_RECV; 3575 wc->status = __rc_to_ib_wc_status(cqe->status); 3576 3577 if (cqe->flags & CQ_RES_UD_FLAGS_IMM) 3578 wc->wc_flags |= IB_WC_WITH_IMM; 3579 /* report only on GSI QP for Thor */ 3580 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) { 3581 wc->wc_flags |= IB_WC_GRH; 3582 memcpy(wc->smac, cqe->smac, ETH_ALEN); 3583 wc->wc_flags |= IB_WC_WITH_SMAC; 3584 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { 3585 vlan_id = (cqe->cfa_meta & 0xFFF); 3586 } 3587 /* Mark only if vlan_id is non zero */ 3588 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { 3589 wc->vlan_id = vlan_id; 3590 wc->wc_flags |= IB_WC_WITH_VLAN; 3591 } 3592 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >> 3593 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT; 3594 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); 3595 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 3596 } 3597 3598 } 3599 3600 static int send_phantom_wqe(struct bnxt_re_qp *qp) 3601 { 3602 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; 3603 unsigned long flags; 3604 int rc; 3605 3606 spin_lock_irqsave(&qp->sq_lock, flags); 3607 3608 rc = bnxt_re_bind_fence_mw(lib_qp); 3609 if (!rc) { 3610 lib_qp->sq.phantom_wqe_cnt++; 3611 ibdev_dbg(&qp->rdev->ibdev, 3612 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", 3613 lib_qp->id, lib_qp->sq.hwq.prod, 3614 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), 3615 lib_qp->sq.phantom_wqe_cnt); 3616 } 3617 3618 spin_unlock_irqrestore(&qp->sq_lock, flags); 3619 return rc; 3620 } 3621 3622 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) 3623 { 3624 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 3625 struct bnxt_re_qp *qp, *sh_qp; 3626 struct bnxt_qplib_cqe *cqe; 3627 int i, ncqe, budget; 3628 struct bnxt_qplib_q *sq; 3629 struct bnxt_qplib_qp *lib_qp; 3630 u32 tbl_idx; 3631 struct bnxt_re_sqp_entries *sqp_entry = NULL; 3632 unsigned long flags; 3633 3634 /* User CQ; the only processing we do is to 3635 * complete any pending CQ resize operation. 3636 */ 3637 if (cq->umem) { 3638 if (cq->resize_umem) 3639 bnxt_re_resize_cq_complete(cq); 3640 return 0; 3641 } 3642 3643 spin_lock_irqsave(&cq->cq_lock, flags); 3644 budget = min_t(u32, num_entries, cq->max_cql); 3645 num_entries = budget; 3646 if (!cq->cql) { 3647 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use"); 3648 goto exit; 3649 } 3650 cqe = &cq->cql[0]; 3651 while (budget) { 3652 lib_qp = NULL; 3653 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); 3654 if (lib_qp) { 3655 sq = &lib_qp->sq; 3656 if (sq->send_phantom) { 3657 qp = container_of(lib_qp, 3658 struct bnxt_re_qp, qplib_qp); 3659 if (send_phantom_wqe(qp) == -ENOMEM) 3660 ibdev_err(&cq->rdev->ibdev, 3661 "Phantom failed! Scheduled to send again\n"); 3662 else 3663 sq->send_phantom = false; 3664 } 3665 } 3666 if (ncqe < budget) 3667 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq, 3668 cqe + ncqe, 3669 budget - ncqe); 3670 3671 if (!ncqe) 3672 break; 3673 3674 for (i = 0; i < ncqe; i++, cqe++) { 3675 /* Transcribe each qplib_wqe back to ib_wc */ 3676 memset(wc, 0, sizeof(*wc)); 3677 3678 wc->wr_id = cqe->wr_id; 3679 wc->byte_len = cqe->length; 3680 qp = container_of 3681 ((struct bnxt_qplib_qp *) 3682 (unsigned long)(cqe->qp_handle), 3683 struct bnxt_re_qp, qplib_qp); 3684 wc->qp = &qp->ib_qp; 3685 wc->ex.imm_data = cqe->immdata; 3686 wc->src_qp = cqe->src_qp; 3687 memcpy(wc->smac, cqe->smac, ETH_ALEN); 3688 wc->port_num = 1; 3689 wc->vendor_err = cqe->status; 3690 3691 switch (cqe->opcode) { 3692 case CQ_BASE_CQE_TYPE_REQ: 3693 sh_qp = qp->rdev->gsi_ctx.gsi_sqp; 3694 if (sh_qp && 3695 qp->qplib_qp.id == sh_qp->qplib_qp.id) { 3696 /* Handle this completion with 3697 * the stored completion 3698 */ 3699 memset(wc, 0, sizeof(*wc)); 3700 continue; 3701 } 3702 bnxt_re_process_req_wc(wc, cqe); 3703 break; 3704 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 3705 if (!cqe->status) { 3706 int rc = 0; 3707 3708 rc = bnxt_re_process_raw_qp_pkt_rx 3709 (qp, cqe); 3710 if (!rc) { 3711 memset(wc, 0, sizeof(*wc)); 3712 continue; 3713 } 3714 cqe->status = -1; 3715 } 3716 /* Errors need not be looped back. 3717 * But change the wr_id to the one 3718 * stored in the table 3719 */ 3720 tbl_idx = cqe->wr_id; 3721 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx]; 3722 wc->wr_id = sqp_entry->wrid; 3723 bnxt_re_process_res_rawqp1_wc(wc, cqe); 3724 break; 3725 case CQ_BASE_CQE_TYPE_RES_RC: 3726 bnxt_re_process_res_rc_wc(wc, cqe); 3727 break; 3728 case CQ_BASE_CQE_TYPE_RES_UD: 3729 sh_qp = qp->rdev->gsi_ctx.gsi_sqp; 3730 if (sh_qp && 3731 qp->qplib_qp.id == sh_qp->qplib_qp.id) { 3732 /* Handle this completion with 3733 * the stored completion 3734 */ 3735 if (cqe->status) { 3736 continue; 3737 } else { 3738 bnxt_re_process_res_shadow_qp_wc 3739 (qp, wc, cqe); 3740 break; 3741 } 3742 } 3743 bnxt_re_process_res_ud_wc(qp, wc, cqe); 3744 break; 3745 default: 3746 ibdev_err(&cq->rdev->ibdev, 3747 "POLL CQ : type 0x%x not handled", 3748 cqe->opcode); 3749 continue; 3750 } 3751 wc++; 3752 budget--; 3753 } 3754 } 3755 exit: 3756 spin_unlock_irqrestore(&cq->cq_lock, flags); 3757 return num_entries - budget; 3758 } 3759 3760 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, 3761 enum ib_cq_notify_flags ib_cqn_flags) 3762 { 3763 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 3764 int type = 0, rc = 0; 3765 unsigned long flags; 3766 3767 spin_lock_irqsave(&cq->cq_lock, flags); 3768 /* Trigger on the very next completion */ 3769 if (ib_cqn_flags & IB_CQ_NEXT_COMP) 3770 type = DBC_DBC_TYPE_CQ_ARMALL; 3771 /* Trigger on the next solicited completion */ 3772 else if (ib_cqn_flags & IB_CQ_SOLICITED) 3773 type = DBC_DBC_TYPE_CQ_ARMSE; 3774 3775 /* Poll to see if there are missed events */ 3776 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && 3777 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) { 3778 rc = 1; 3779 goto exit; 3780 } 3781 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); 3782 3783 exit: 3784 spin_unlock_irqrestore(&cq->cq_lock, flags); 3785 return rc; 3786 } 3787 3788 /* Memory Regions */ 3789 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) 3790 { 3791 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3792 struct bnxt_re_dev *rdev = pd->rdev; 3793 struct bnxt_re_mr *mr; 3794 u32 active_mrs; 3795 int rc; 3796 3797 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3798 if (!mr) 3799 return ERR_PTR(-ENOMEM); 3800 3801 mr->rdev = rdev; 3802 mr->qplib_mr.pd = &pd->qplib_pd; 3803 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 3804 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 3805 3806 /* Allocate and register 0 as the address */ 3807 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 3808 if (rc) 3809 goto fail; 3810 3811 mr->qplib_mr.hwq.level = PBL_LVL_MAX; 3812 mr->qplib_mr.total_size = -1; /* Infinte length */ 3813 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0, 3814 PAGE_SIZE); 3815 if (rc) 3816 goto fail_mr; 3817 3818 mr->ib_mr.lkey = mr->qplib_mr.lkey; 3819 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | 3820 IB_ACCESS_REMOTE_ATOMIC)) 3821 mr->ib_mr.rkey = mr->ib_mr.lkey; 3822 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count); 3823 if (active_mrs > rdev->stats.res.mr_watermark) 3824 rdev->stats.res.mr_watermark = active_mrs; 3825 3826 return &mr->ib_mr; 3827 3828 fail_mr: 3829 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3830 fail: 3831 kfree(mr); 3832 return ERR_PTR(rc); 3833 } 3834 3835 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 3836 { 3837 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 3838 struct bnxt_re_dev *rdev = mr->rdev; 3839 int rc; 3840 3841 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3842 if (rc) { 3843 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc); 3844 return rc; 3845 } 3846 3847 if (mr->pages) { 3848 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3849 &mr->qplib_frpl); 3850 kfree(mr->pages); 3851 mr->npages = 0; 3852 mr->pages = NULL; 3853 } 3854 ib_umem_release(mr->ib_umem); 3855 3856 kfree(mr); 3857 atomic_dec(&rdev->stats.res.mr_count); 3858 return rc; 3859 } 3860 3861 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr) 3862 { 3863 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 3864 3865 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs)) 3866 return -ENOMEM; 3867 3868 mr->pages[mr->npages++] = addr; 3869 return 0; 3870 } 3871 3872 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, 3873 unsigned int *sg_offset) 3874 { 3875 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 3876 3877 mr->npages = 0; 3878 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page); 3879 } 3880 3881 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, 3882 u32 max_num_sg) 3883 { 3884 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3885 struct bnxt_re_dev *rdev = pd->rdev; 3886 struct bnxt_re_mr *mr = NULL; 3887 u32 active_mrs; 3888 int rc; 3889 3890 if (type != IB_MR_TYPE_MEM_REG) { 3891 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type); 3892 return ERR_PTR(-EINVAL); 3893 } 3894 if (max_num_sg > MAX_PBL_LVL_1_PGS) 3895 return ERR_PTR(-EINVAL); 3896 3897 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3898 if (!mr) 3899 return ERR_PTR(-ENOMEM); 3900 3901 mr->rdev = rdev; 3902 mr->qplib_mr.pd = &pd->qplib_pd; 3903 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR; 3904 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 3905 3906 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 3907 if (rc) 3908 goto bail; 3909 3910 mr->ib_mr.lkey = mr->qplib_mr.lkey; 3911 mr->ib_mr.rkey = mr->ib_mr.lkey; 3912 3913 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); 3914 if (!mr->pages) { 3915 rc = -ENOMEM; 3916 goto fail; 3917 } 3918 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, 3919 &mr->qplib_frpl, max_num_sg); 3920 if (rc) { 3921 ibdev_err(&rdev->ibdev, 3922 "Failed to allocate HW FR page list"); 3923 goto fail_mr; 3924 } 3925 3926 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count); 3927 if (active_mrs > rdev->stats.res.mr_watermark) 3928 rdev->stats.res.mr_watermark = active_mrs; 3929 return &mr->ib_mr; 3930 3931 fail_mr: 3932 kfree(mr->pages); 3933 fail: 3934 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3935 bail: 3936 kfree(mr); 3937 return ERR_PTR(rc); 3938 } 3939 3940 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 3941 struct ib_udata *udata) 3942 { 3943 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3944 struct bnxt_re_dev *rdev = pd->rdev; 3945 struct bnxt_re_mw *mw; 3946 u32 active_mws; 3947 int rc; 3948 3949 mw = kzalloc(sizeof(*mw), GFP_KERNEL); 3950 if (!mw) 3951 return ERR_PTR(-ENOMEM); 3952 mw->rdev = rdev; 3953 mw->qplib_mw.pd = &pd->qplib_pd; 3954 3955 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? 3956 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : 3957 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); 3958 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); 3959 if (rc) { 3960 ibdev_err(&rdev->ibdev, "Allocate MW failed!"); 3961 goto fail; 3962 } 3963 mw->ib_mw.rkey = mw->qplib_mw.rkey; 3964 3965 active_mws = atomic_inc_return(&rdev->stats.res.mw_count); 3966 if (active_mws > rdev->stats.res.mw_watermark) 3967 rdev->stats.res.mw_watermark = active_mws; 3968 return &mw->ib_mw; 3969 3970 fail: 3971 kfree(mw); 3972 return ERR_PTR(rc); 3973 } 3974 3975 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) 3976 { 3977 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); 3978 struct bnxt_re_dev *rdev = mw->rdev; 3979 int rc; 3980 3981 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); 3982 if (rc) { 3983 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc); 3984 return rc; 3985 } 3986 3987 kfree(mw); 3988 atomic_dec(&rdev->stats.res.mw_count); 3989 return rc; 3990 } 3991 3992 static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr, 3993 int mr_access_flags, struct ib_umem *umem) 3994 { 3995 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3996 struct bnxt_re_dev *rdev = pd->rdev; 3997 unsigned long page_size; 3998 struct bnxt_re_mr *mr; 3999 int umem_pgs, rc; 4000 u32 active_mrs; 4001 4002 if (length > BNXT_RE_MAX_MR_SIZE) { 4003 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n", 4004 length, BNXT_RE_MAX_MR_SIZE); 4005 return ERR_PTR(-ENOMEM); 4006 } 4007 4008 page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr); 4009 if (!page_size) { 4010 ibdev_err(&rdev->ibdev, "umem page size unsupported!"); 4011 return ERR_PTR(-EINVAL); 4012 } 4013 4014 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 4015 if (!mr) 4016 return ERR_PTR(-ENOMEM); 4017 4018 mr->rdev = rdev; 4019 mr->qplib_mr.pd = &pd->qplib_pd; 4020 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 4021 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; 4022 4023 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 4024 if (rc) { 4025 ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc); 4026 rc = -EIO; 4027 goto free_mr; 4028 } 4029 /* The fixed portion of the rkey is the same as the lkey */ 4030 mr->ib_mr.rkey = mr->qplib_mr.rkey; 4031 mr->ib_umem = umem; 4032 mr->qplib_mr.va = virt_addr; 4033 mr->qplib_mr.total_size = length; 4034 4035 umem_pgs = ib_umem_num_dma_blocks(umem, page_size); 4036 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem, 4037 umem_pgs, page_size); 4038 if (rc) { 4039 ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc); 4040 rc = -EIO; 4041 goto free_mrw; 4042 } 4043 4044 mr->ib_mr.lkey = mr->qplib_mr.lkey; 4045 mr->ib_mr.rkey = mr->qplib_mr.lkey; 4046 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count); 4047 if (active_mrs > rdev->stats.res.mr_watermark) 4048 rdev->stats.res.mr_watermark = active_mrs; 4049 4050 return &mr->ib_mr; 4051 4052 free_mrw: 4053 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 4054 free_mr: 4055 kfree(mr); 4056 return ERR_PTR(rc); 4057 } 4058 4059 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, 4060 u64 virt_addr, int mr_access_flags, 4061 struct ib_udata *udata) 4062 { 4063 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 4064 struct bnxt_re_dev *rdev = pd->rdev; 4065 struct ib_umem *umem; 4066 struct ib_mr *ib_mr; 4067 4068 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); 4069 if (IS_ERR(umem)) 4070 return ERR_CAST(umem); 4071 4072 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); 4073 if (IS_ERR(ib_mr)) 4074 ib_umem_release(umem); 4075 return ib_mr; 4076 } 4077 4078 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, 4079 u64 length, u64 virt_addr, int fd, 4080 int mr_access_flags, struct ib_udata *udata) 4081 { 4082 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 4083 struct bnxt_re_dev *rdev = pd->rdev; 4084 struct ib_umem_dmabuf *umem_dmabuf; 4085 struct ib_umem *umem; 4086 struct ib_mr *ib_mr; 4087 4088 umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length, 4089 fd, mr_access_flags); 4090 if (IS_ERR(umem_dmabuf)) 4091 return ERR_CAST(umem_dmabuf); 4092 4093 umem = &umem_dmabuf->umem; 4094 4095 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); 4096 if (IS_ERR(ib_mr)) 4097 ib_umem_release(umem); 4098 return ib_mr; 4099 } 4100 4101 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) 4102 { 4103 struct ib_device *ibdev = ctx->device; 4104 struct bnxt_re_ucontext *uctx = 4105 container_of(ctx, struct bnxt_re_ucontext, ib_uctx); 4106 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 4107 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 4108 struct bnxt_re_user_mmap_entry *entry; 4109 struct bnxt_re_uctx_resp resp = {}; 4110 u32 chip_met_rev_num = 0; 4111 int rc; 4112 4113 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver); 4114 4115 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) { 4116 ibdev_dbg(ibdev, " is different from the device %d ", 4117 BNXT_RE_ABI_VERSION); 4118 return -EPERM; 4119 } 4120 4121 uctx->rdev = rdev; 4122 4123 uctx->shpg = (void *)__get_free_page(GFP_KERNEL); 4124 if (!uctx->shpg) { 4125 rc = -ENOMEM; 4126 goto fail; 4127 } 4128 spin_lock_init(&uctx->sh_lock); 4129 4130 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX; 4131 chip_met_rev_num = rdev->chip_ctx->chip_num; 4132 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) << 4133 BNXT_RE_CHIP_ID0_CHIP_REV_SFT; 4134 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) << 4135 BNXT_RE_CHIP_ID0_CHIP_MET_SFT; 4136 resp.chip_id0 = chip_met_rev_num; 4137 /*Temp, Use xa_alloc instead */ 4138 resp.dev_id = rdev->en_dev->pdev->devfn; 4139 resp.max_qp = rdev->qplib_ctx.qpc_count; 4140 resp.pg_size = PAGE_SIZE; 4141 resp.cqe_sz = sizeof(struct cq_base); 4142 resp.max_cqd = dev_attr->max_cq_wqes; 4143 4144 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE; 4145 resp.mode = rdev->chip_ctx->modes.wqe_mode; 4146 4147 if (rdev->chip_ctx->modes.db_push) 4148 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED; 4149 4150 entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL); 4151 if (!entry) { 4152 rc = -ENOMEM; 4153 goto cfail; 4154 } 4155 uctx->shpage_mmap = &entry->rdma_entry; 4156 if (rdev->pacing.dbr_pacing) 4157 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED; 4158 4159 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); 4160 if (rc) { 4161 ibdev_err(ibdev, "Failed to copy user context"); 4162 rc = -EFAULT; 4163 goto cfail; 4164 } 4165 4166 return 0; 4167 cfail: 4168 free_page((unsigned long)uctx->shpg); 4169 uctx->shpg = NULL; 4170 fail: 4171 return rc; 4172 } 4173 4174 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) 4175 { 4176 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, 4177 struct bnxt_re_ucontext, 4178 ib_uctx); 4179 4180 struct bnxt_re_dev *rdev = uctx->rdev; 4181 4182 rdma_user_mmap_entry_remove(uctx->shpage_mmap); 4183 uctx->shpage_mmap = NULL; 4184 if (uctx->shpg) 4185 free_page((unsigned long)uctx->shpg); 4186 4187 if (uctx->dpi.dbr) { 4188 /* Free DPI only if this is the first PD allocated by the 4189 * application and mark the context dpi as NULL 4190 */ 4191 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi); 4192 uctx->dpi.dbr = NULL; 4193 } 4194 } 4195 4196 /* Helper function to mmap the virtual memory from user app */ 4197 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) 4198 { 4199 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, 4200 struct bnxt_re_ucontext, 4201 ib_uctx); 4202 struct bnxt_re_user_mmap_entry *bnxt_entry; 4203 struct rdma_user_mmap_entry *rdma_entry; 4204 int ret = 0; 4205 u64 pfn; 4206 4207 rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma); 4208 if (!rdma_entry) 4209 return -EINVAL; 4210 4211 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, 4212 rdma_entry); 4213 4214 switch (bnxt_entry->mmap_flag) { 4215 case BNXT_RE_MMAP_WC_DB: 4216 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; 4217 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, 4218 pgprot_writecombine(vma->vm_page_prot), 4219 rdma_entry); 4220 break; 4221 case BNXT_RE_MMAP_UC_DB: 4222 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; 4223 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, 4224 pgprot_noncached(vma->vm_page_prot), 4225 rdma_entry); 4226 break; 4227 case BNXT_RE_MMAP_SH_PAGE: 4228 ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg)); 4229 break; 4230 case BNXT_RE_MMAP_DBR_BAR: 4231 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; 4232 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, 4233 pgprot_noncached(vma->vm_page_prot), 4234 rdma_entry); 4235 break; 4236 case BNXT_RE_MMAP_DBR_PAGE: 4237 /* Driver doesn't expect write access for user space */ 4238 if (vma->vm_flags & VM_WRITE) 4239 return -EFAULT; 4240 ret = vm_insert_page(vma, vma->vm_start, 4241 virt_to_page((void *)bnxt_entry->mem_offset)); 4242 break; 4243 default: 4244 ret = -EINVAL; 4245 break; 4246 } 4247 4248 rdma_user_mmap_entry_put(rdma_entry); 4249 return ret; 4250 } 4251 4252 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 4253 { 4254 struct bnxt_re_user_mmap_entry *bnxt_entry; 4255 4256 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, 4257 rdma_entry); 4258 4259 kfree(bnxt_entry); 4260 } 4261 4262 static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs) 4263 { 4264 struct bnxt_re_ucontext *uctx; 4265 4266 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); 4267 bnxt_re_pacing_alert(uctx->rdev); 4268 return 0; 4269 } 4270 4271 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs) 4272 { 4273 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE); 4274 enum bnxt_re_alloc_page_type alloc_type; 4275 struct bnxt_re_user_mmap_entry *entry; 4276 enum bnxt_re_mmap_flag mmap_flag; 4277 struct bnxt_qplib_chip_ctx *cctx; 4278 struct bnxt_re_ucontext *uctx; 4279 struct bnxt_re_dev *rdev; 4280 u64 mmap_offset; 4281 u32 length; 4282 u32 dpi; 4283 u64 addr; 4284 int err; 4285 4286 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); 4287 if (IS_ERR(uctx)) 4288 return PTR_ERR(uctx); 4289 4290 err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE); 4291 if (err) 4292 return err; 4293 4294 rdev = uctx->rdev; 4295 cctx = rdev->chip_ctx; 4296 4297 switch (alloc_type) { 4298 case BNXT_RE_ALLOC_WC_PAGE: 4299 if (cctx->modes.db_push) { 4300 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi, 4301 uctx, BNXT_QPLIB_DPI_TYPE_WC)) 4302 return -ENOMEM; 4303 length = PAGE_SIZE; 4304 dpi = uctx->wcdpi.dpi; 4305 addr = (u64)uctx->wcdpi.umdbr; 4306 mmap_flag = BNXT_RE_MMAP_WC_DB; 4307 } else { 4308 return -EINVAL; 4309 } 4310 4311 break; 4312 case BNXT_RE_ALLOC_DBR_BAR_PAGE: 4313 length = PAGE_SIZE; 4314 addr = (u64)rdev->pacing.dbr_bar_addr; 4315 mmap_flag = BNXT_RE_MMAP_DBR_BAR; 4316 break; 4317 4318 case BNXT_RE_ALLOC_DBR_PAGE: 4319 length = PAGE_SIZE; 4320 addr = (u64)rdev->pacing.dbr_page; 4321 mmap_flag = BNXT_RE_MMAP_DBR_PAGE; 4322 break; 4323 4324 default: 4325 return -EOPNOTSUPP; 4326 } 4327 4328 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset); 4329 if (!entry) 4330 return -ENOMEM; 4331 4332 uobj->object = entry; 4333 uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE); 4334 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, 4335 &mmap_offset, sizeof(mmap_offset)); 4336 if (err) 4337 return err; 4338 4339 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, 4340 &length, sizeof(length)); 4341 if (err) 4342 return err; 4343 4344 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI, 4345 &dpi, sizeof(length)); 4346 if (err) 4347 return err; 4348 4349 return 0; 4350 } 4351 4352 static int alloc_page_obj_cleanup(struct ib_uobject *uobject, 4353 enum rdma_remove_reason why, 4354 struct uverbs_attr_bundle *attrs) 4355 { 4356 struct bnxt_re_user_mmap_entry *entry = uobject->object; 4357 struct bnxt_re_ucontext *uctx = entry->uctx; 4358 4359 switch (entry->mmap_flag) { 4360 case BNXT_RE_MMAP_WC_DB: 4361 if (uctx && uctx->wcdpi.dbr) { 4362 struct bnxt_re_dev *rdev = uctx->rdev; 4363 4364 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi); 4365 uctx->wcdpi.dbr = NULL; 4366 } 4367 break; 4368 case BNXT_RE_MMAP_DBR_BAR: 4369 case BNXT_RE_MMAP_DBR_PAGE: 4370 break; 4371 default: 4372 goto exit; 4373 } 4374 rdma_user_mmap_entry_remove(&entry->rdma_entry); 4375 exit: 4376 return 0; 4377 } 4378 4379 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE, 4380 UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE, 4381 BNXT_RE_OBJECT_ALLOC_PAGE, 4382 UVERBS_ACCESS_NEW, 4383 UA_MANDATORY), 4384 UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE, 4385 enum bnxt_re_alloc_page_type, 4386 UA_MANDATORY), 4387 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, 4388 UVERBS_ATTR_TYPE(u64), 4389 UA_MANDATORY), 4390 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, 4391 UVERBS_ATTR_TYPE(u32), 4392 UA_MANDATORY), 4393 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI, 4394 UVERBS_ATTR_TYPE(u32), 4395 UA_MANDATORY)); 4396 4397 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE, 4398 UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE, 4399 BNXT_RE_OBJECT_ALLOC_PAGE, 4400 UVERBS_ACCESS_DESTROY, 4401 UA_MANDATORY)); 4402 4403 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE, 4404 UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup), 4405 &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE), 4406 &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE)); 4407 4408 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV); 4409 4410 DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV, 4411 &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV)); 4412 4413 const struct uapi_definition bnxt_re_uapi_defs[] = { 4414 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE), 4415 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV), 4416 {} 4417 }; 4418