1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 #include "main.h" 4 5 /** 6 * irdma_query_device - get device attributes 7 * @ibdev: device pointer from stack 8 * @props: returning device attributes 9 * @udata: user data 10 */ 11 static int irdma_query_device(struct ib_device *ibdev, 12 struct ib_device_attr *props, 13 struct ib_udata *udata) 14 { 15 struct irdma_device *iwdev = to_iwdev(ibdev); 16 struct irdma_pci_f *rf = iwdev->rf; 17 struct pci_dev *pcidev = iwdev->rf->pcidev; 18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; 19 20 if (udata->inlen || udata->outlen) 21 return -EINVAL; 22 23 memset(props, 0, sizeof(*props)); 24 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); 25 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | 26 irdma_fw_minor_ver(&rf->sc_dev); 27 props->device_cap_flags = iwdev->device_cap_flags; 28 props->vendor_id = pcidev->vendor; 29 props->vendor_part_id = pcidev->device; 30 31 props->hw_ver = rf->pcidev->revision; 32 props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; 33 props->max_mr_size = hw_attrs->max_mr_size; 34 props->max_qp = rf->max_qp - rf->used_qps; 35 props->max_qp_wr = hw_attrs->max_qp_wr; 36 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags; 37 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags; 38 props->max_cq = rf->max_cq - rf->used_cqs; 39 props->max_cqe = rf->max_cqe; 40 props->max_mr = rf->max_mr - rf->used_mrs; 41 props->max_mw = props->max_mr; 42 props->max_pd = rf->max_pd - rf->used_pds; 43 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; 44 props->max_qp_rd_atom = hw_attrs->max_hw_ird; 45 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; 46 if (rdma_protocol_roce(ibdev, 1)) 47 props->max_pkeys = IRDMA_PKEY_TBL_SZ; 48 props->max_ah = rf->max_ah; 49 props->max_mcast_grp = rf->max_mcg; 50 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; 51 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX; 52 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR; 53 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff 54 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2) 55 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK; 56 57 return 0; 58 } 59 60 /** 61 * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed 62 * @link_speed: netdev phy link speed 63 * @active_speed: IB port speed 64 * @active_width: IB port width 65 */ 66 static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed, 67 u8 *active_width) 68 { 69 if (link_speed <= SPEED_1000) { 70 *active_width = IB_WIDTH_1X; 71 *active_speed = IB_SPEED_SDR; 72 } else if (link_speed <= SPEED_10000) { 73 *active_width = IB_WIDTH_1X; 74 *active_speed = IB_SPEED_FDR10; 75 } else if (link_speed <= SPEED_20000) { 76 *active_width = IB_WIDTH_4X; 77 *active_speed = IB_SPEED_DDR; 78 } else if (link_speed <= SPEED_25000) { 79 *active_width = IB_WIDTH_1X; 80 *active_speed = IB_SPEED_EDR; 81 } else if (link_speed <= SPEED_40000) { 82 *active_width = IB_WIDTH_4X; 83 *active_speed = IB_SPEED_FDR10; 84 } else { 85 *active_width = IB_WIDTH_4X; 86 *active_speed = IB_SPEED_EDR; 87 } 88 } 89 90 /** 91 * irdma_query_port - get port attributes 92 * @ibdev: device pointer from stack 93 * @port: port number for query 94 * @props: returning device attributes 95 */ 96 static int irdma_query_port(struct ib_device *ibdev, u32 port, 97 struct ib_port_attr *props) 98 { 99 struct irdma_device *iwdev = to_iwdev(ibdev); 100 struct net_device *netdev = iwdev->netdev; 101 102 /* no need to zero out pros here. done by caller */ 103 104 props->max_mtu = IB_MTU_4096; 105 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); 106 props->lid = 1; 107 props->lmc = 0; 108 props->sm_lid = 0; 109 props->sm_sl = 0; 110 if (netif_carrier_ok(netdev) && netif_running(netdev)) { 111 props->state = IB_PORT_ACTIVE; 112 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 113 } else { 114 props->state = IB_PORT_DOWN; 115 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 116 } 117 irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed, 118 &props->active_width); 119 120 if (rdma_protocol_roce(ibdev, 1)) { 121 props->gid_tbl_len = 32; 122 props->ip_gids = true; 123 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ; 124 } else { 125 props->gid_tbl_len = 1; 126 } 127 props->qkey_viol_cntr = 0; 128 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP; 129 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; 130 131 return 0; 132 } 133 134 /** 135 * irdma_disassociate_ucontext - Disassociate user context 136 * @context: ib user context 137 */ 138 static void irdma_disassociate_ucontext(struct ib_ucontext *context) 139 { 140 } 141 142 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext, 143 struct vm_area_struct *vma) 144 { 145 u64 pfn; 146 147 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) 148 return -EINVAL; 149 150 vma->vm_private_data = ucontext; 151 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] + 152 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 153 154 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, 155 pgprot_noncached(vma->vm_page_prot), NULL); 156 } 157 158 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 159 { 160 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry); 161 162 kfree(entry); 163 } 164 165 static struct rdma_user_mmap_entry* 166 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset, 167 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset) 168 { 169 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 170 int ret; 171 172 if (!entry) 173 return NULL; 174 175 entry->bar_offset = bar_offset; 176 entry->mmap_flag = mmap_flag; 177 178 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, 179 &entry->rdma_entry, PAGE_SIZE); 180 if (ret) { 181 kfree(entry); 182 return NULL; 183 } 184 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 185 186 return &entry->rdma_entry; 187 } 188 189 /** 190 * irdma_mmap - user memory map 191 * @context: context created during alloc 192 * @vma: kernel info for user memory map 193 */ 194 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 195 { 196 struct rdma_user_mmap_entry *rdma_entry; 197 struct irdma_user_mmap_entry *entry; 198 struct irdma_ucontext *ucontext; 199 u64 pfn; 200 int ret; 201 202 ucontext = to_ucontext(context); 203 204 /* Legacy support for libi40iw with hard-coded mmap key */ 205 if (ucontext->legacy_mode) 206 return irdma_mmap_legacy(ucontext, vma); 207 208 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); 209 if (!rdma_entry) { 210 ibdev_dbg(&ucontext->iwdev->ibdev, 211 "VERBS: pgoff[0x%lx] does not have valid entry\n", 212 vma->vm_pgoff); 213 return -EINVAL; 214 } 215 216 entry = to_irdma_mmap_entry(rdma_entry); 217 ibdev_dbg(&ucontext->iwdev->ibdev, 218 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n", 219 entry->bar_offset, entry->mmap_flag); 220 221 pfn = (entry->bar_offset + 222 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 223 224 switch (entry->mmap_flag) { 225 case IRDMA_MMAP_IO_NC: 226 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 227 pgprot_noncached(vma->vm_page_prot), 228 rdma_entry); 229 break; 230 case IRDMA_MMAP_IO_WC: 231 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 232 pgprot_writecombine(vma->vm_page_prot), 233 rdma_entry); 234 break; 235 default: 236 ret = -EINVAL; 237 } 238 239 if (ret) 240 ibdev_dbg(&ucontext->iwdev->ibdev, 241 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n", 242 entry->bar_offset, entry->mmap_flag, ret); 243 rdma_user_mmap_entry_put(rdma_entry); 244 245 return ret; 246 } 247 248 /** 249 * irdma_alloc_push_page - allocate a push page for qp 250 * @iwqp: qp pointer 251 */ 252 static void irdma_alloc_push_page(struct irdma_qp *iwqp) 253 { 254 struct irdma_cqp_request *cqp_request; 255 struct cqp_cmds_info *cqp_info; 256 struct irdma_device *iwdev = iwqp->iwdev; 257 struct irdma_sc_qp *qp = &iwqp->sc_qp; 258 enum irdma_status_code status; 259 260 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 261 if (!cqp_request) 262 return; 263 264 cqp_info = &cqp_request->info; 265 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; 266 cqp_info->post_sq = 1; 267 cqp_info->in.u.manage_push_page.info.push_idx = 0; 268 cqp_info->in.u.manage_push_page.info.qs_handle = 269 qp->vsi->qos[qp->user_pri].qs_handle; 270 cqp_info->in.u.manage_push_page.info.free_page = 0; 271 cqp_info->in.u.manage_push_page.info.push_page_type = 0; 272 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp; 273 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; 274 275 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 276 if (!status && cqp_request->compl_info.op_ret_val < 277 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) { 278 qp->push_idx = cqp_request->compl_info.op_ret_val; 279 qp->push_offset = 0; 280 } 281 282 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 283 } 284 285 /** 286 * irdma_alloc_ucontext - Allocate the user context data structure 287 * @uctx: uverbs context pointer 288 * @udata: user data 289 * 290 * This keeps track of all objects associated with a particular 291 * user-mode client. 292 */ 293 static int irdma_alloc_ucontext(struct ib_ucontext *uctx, 294 struct ib_udata *udata) 295 { 296 struct ib_device *ibdev = uctx->device; 297 struct irdma_device *iwdev = to_iwdev(ibdev); 298 struct irdma_alloc_ucontext_req req; 299 struct irdma_alloc_ucontext_resp uresp = {}; 300 struct irdma_ucontext *ucontext = to_ucontext(uctx); 301 struct irdma_uk_attrs *uk_attrs; 302 303 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) 304 return -EINVAL; 305 306 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER) 307 goto ver_error; 308 309 ucontext->iwdev = iwdev; 310 ucontext->abi_ver = req.userspace_ver; 311 312 uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; 313 /* GEN_1 legacy support with libi40iw */ 314 if (udata->outlen < sizeof(uresp)) { 315 if (uk_attrs->hw_rev != IRDMA_GEN_1) 316 return -EOPNOTSUPP; 317 318 ucontext->legacy_mode = true; 319 uresp.max_qps = iwdev->rf->max_qp; 320 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; 321 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; 322 uresp.kernel_ver = req.userspace_ver; 323 if (ib_copy_to_udata(udata, &uresp, 324 min(sizeof(uresp), udata->outlen))) 325 return -EFAULT; 326 } else { 327 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 328 329 ucontext->db_mmap_entry = 330 irdma_user_mmap_entry_insert(ucontext, bar_off, 331 IRDMA_MMAP_IO_NC, 332 &uresp.db_mmap_key); 333 if (!ucontext->db_mmap_entry) 334 return -ENOMEM; 335 336 uresp.kernel_ver = IRDMA_ABI_VER; 337 uresp.feature_flags = uk_attrs->feature_flags; 338 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; 339 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; 340 uresp.max_hw_inline = uk_attrs->max_hw_inline; 341 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; 342 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; 343 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; 344 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; 345 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; 346 uresp.hw_rev = uk_attrs->hw_rev; 347 if (ib_copy_to_udata(udata, &uresp, 348 min(sizeof(uresp), udata->outlen))) { 349 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 350 return -EFAULT; 351 } 352 } 353 354 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); 355 spin_lock_init(&ucontext->cq_reg_mem_list_lock); 356 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); 357 spin_lock_init(&ucontext->qp_reg_mem_list_lock); 358 359 return 0; 360 361 ver_error: 362 ibdev_err(&iwdev->ibdev, 363 "Invalid userspace driver version detected. Detected version %d, should be %d\n", 364 req.userspace_ver, IRDMA_ABI_VER); 365 return -EINVAL; 366 } 367 368 /** 369 * irdma_dealloc_ucontext - deallocate the user context data structure 370 * @context: user context created during alloc 371 */ 372 static void irdma_dealloc_ucontext(struct ib_ucontext *context) 373 { 374 struct irdma_ucontext *ucontext = to_ucontext(context); 375 376 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 377 } 378 379 /** 380 * irdma_alloc_pd - allocate protection domain 381 * @pd: PD pointer 382 * @udata: user data 383 */ 384 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) 385 { 386 struct irdma_pd *iwpd = to_iwpd(pd); 387 struct irdma_device *iwdev = to_iwdev(pd->device); 388 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 389 struct irdma_pci_f *rf = iwdev->rf; 390 struct irdma_alloc_pd_resp uresp = {}; 391 struct irdma_sc_pd *sc_pd; 392 u32 pd_id = 0; 393 int err; 394 395 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, 396 &rf->next_pd); 397 if (err) 398 return err; 399 400 sc_pd = &iwpd->sc_pd; 401 if (udata) { 402 struct irdma_ucontext *ucontext = 403 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 404 ibucontext); 405 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); 406 uresp.pd_id = pd_id; 407 if (ib_copy_to_udata(udata, &uresp, 408 min(sizeof(uresp), udata->outlen))) { 409 err = -EFAULT; 410 goto error; 411 } 412 } else { 413 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER); 414 } 415 416 return 0; 417 error: 418 irdma_free_rsrc(rf, rf->allocated_pds, pd_id); 419 420 return err; 421 } 422 423 /** 424 * irdma_dealloc_pd - deallocate pd 425 * @ibpd: ptr of pd to be deallocated 426 * @udata: user data 427 */ 428 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 429 { 430 struct irdma_pd *iwpd = to_iwpd(ibpd); 431 struct irdma_device *iwdev = to_iwdev(ibpd->device); 432 433 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); 434 435 return 0; 436 } 437 438 /** 439 * irdma_get_pbl - Retrieve pbl from a list given a virtual 440 * address 441 * @va: user virtual address 442 * @pbl_list: pbl list to search in (QP's or CQ's) 443 */ 444 static struct irdma_pbl *irdma_get_pbl(unsigned long va, 445 struct list_head *pbl_list) 446 { 447 struct irdma_pbl *iwpbl; 448 449 list_for_each_entry (iwpbl, pbl_list, list) { 450 if (iwpbl->user_base == va) { 451 list_del(&iwpbl->list); 452 iwpbl->on_list = false; 453 return iwpbl; 454 } 455 } 456 457 return NULL; 458 } 459 460 /** 461 * irdma_clean_cqes - clean cq entries for qp 462 * @iwqp: qp ptr (user or kernel) 463 * @iwcq: cq ptr 464 */ 465 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq) 466 { 467 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; 468 unsigned long flags; 469 470 spin_lock_irqsave(&iwcq->lock, flags); 471 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); 472 spin_unlock_irqrestore(&iwcq->lock, flags); 473 } 474 475 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp) 476 { 477 if (iwqp->push_db_mmap_entry) { 478 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry); 479 iwqp->push_db_mmap_entry = NULL; 480 } 481 if (iwqp->push_wqe_mmap_entry) { 482 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 483 iwqp->push_wqe_mmap_entry = NULL; 484 } 485 } 486 487 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext, 488 struct irdma_qp *iwqp, 489 u64 *push_wqe_mmap_key, 490 u64 *push_db_mmap_key) 491 { 492 struct irdma_device *iwdev = ucontext->iwdev; 493 u64 rsvd, bar_off; 494 495 rsvd = IRDMA_PF_BAR_RSVD; 496 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 497 /* skip over db page */ 498 bar_off += IRDMA_HW_PAGE_SIZE; 499 /* push wqe page */ 500 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE; 501 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 502 bar_off, IRDMA_MMAP_IO_WC, 503 push_wqe_mmap_key); 504 if (!iwqp->push_wqe_mmap_entry) 505 return -ENOMEM; 506 507 /* push doorbell page */ 508 bar_off += IRDMA_HW_PAGE_SIZE; 509 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 510 bar_off, IRDMA_MMAP_IO_NC, 511 push_db_mmap_key); 512 if (!iwqp->push_db_mmap_entry) { 513 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 514 return -ENOMEM; 515 } 516 517 return 0; 518 } 519 520 /** 521 * irdma_destroy_qp - destroy qp 522 * @ibqp: qp's ib pointer also to get to device's qp address 523 * @udata: user data 524 */ 525 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 526 { 527 struct irdma_qp *iwqp = to_iwqp(ibqp); 528 struct irdma_device *iwdev = iwqp->iwdev; 529 530 iwqp->sc_qp.qp_uk.destroy_pending = true; 531 532 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) 533 irdma_modify_qp_to_err(&iwqp->sc_qp); 534 535 irdma_qp_rem_ref(&iwqp->ibqp); 536 wait_for_completion(&iwqp->free_qp); 537 irdma_free_lsmm_rsrc(iwqp); 538 if (!iwdev->reset) 539 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); 540 541 if (!iwqp->user_mode) { 542 if (iwqp->iwscq) { 543 irdma_clean_cqes(iwqp, iwqp->iwscq); 544 if (iwqp->iwrcq != iwqp->iwscq) 545 irdma_clean_cqes(iwqp, iwqp->iwrcq); 546 } 547 } 548 irdma_remove_push_mmap_entries(iwqp); 549 irdma_free_qp_rsrc(iwqp); 550 551 return 0; 552 } 553 554 /** 555 * irdma_setup_virt_qp - setup for allocation of virtual qp 556 * @iwdev: irdma device 557 * @iwqp: qp ptr 558 * @init_info: initialize info to return 559 */ 560 static int irdma_setup_virt_qp(struct irdma_device *iwdev, 561 struct irdma_qp *iwqp, 562 struct irdma_qp_init_info *init_info) 563 { 564 struct irdma_pbl *iwpbl = iwqp->iwpbl; 565 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 566 567 iwqp->page = qpmr->sq_page; 568 init_info->shadow_area_pa = qpmr->shadow; 569 if (iwpbl->pbl_allocated) { 570 init_info->virtual_map = true; 571 init_info->sq_pa = qpmr->sq_pbl.idx; 572 init_info->rq_pa = qpmr->rq_pbl.idx; 573 } else { 574 init_info->sq_pa = qpmr->sq_pbl.addr; 575 init_info->rq_pa = qpmr->rq_pbl.addr; 576 } 577 578 return 0; 579 } 580 581 /** 582 * irdma_setup_kmode_qp - setup initialization for kernel mode qp 583 * @iwdev: iwarp device 584 * @iwqp: qp ptr (user or kernel) 585 * @info: initialize info to return 586 * @init_attr: Initial QP create attributes 587 */ 588 static int irdma_setup_kmode_qp(struct irdma_device *iwdev, 589 struct irdma_qp *iwqp, 590 struct irdma_qp_init_info *info, 591 struct ib_qp_init_attr *init_attr) 592 { 593 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem; 594 u32 sqdepth, rqdepth; 595 u8 sqshift, rqshift; 596 u32 size; 597 enum irdma_status_code status; 598 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; 599 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; 600 601 irdma_get_wqe_shift(uk_attrs, 602 uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 : 603 ukinfo->max_sq_frag_cnt, 604 ukinfo->max_inline_data, &sqshift); 605 status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift, 606 &sqdepth); 607 if (status) 608 return -ENOMEM; 609 610 if (uk_attrs->hw_rev == IRDMA_GEN_1) 611 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; 612 else 613 irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0, 614 &rqshift); 615 616 status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift, 617 &rqdepth); 618 if (status) 619 return -ENOMEM; 620 621 iwqp->kqp.sq_wrid_mem = 622 kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); 623 if (!iwqp->kqp.sq_wrid_mem) 624 return -ENOMEM; 625 626 iwqp->kqp.rq_wrid_mem = 627 kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); 628 if (!iwqp->kqp.rq_wrid_mem) { 629 kfree(iwqp->kqp.sq_wrid_mem); 630 iwqp->kqp.sq_wrid_mem = NULL; 631 return -ENOMEM; 632 } 633 634 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; 635 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; 636 637 size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE; 638 size += (IRDMA_SHADOW_AREA_SIZE << 3); 639 640 mem->size = ALIGN(size, 256); 641 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, 642 &mem->pa, GFP_KERNEL); 643 if (!mem->va) { 644 kfree(iwqp->kqp.sq_wrid_mem); 645 iwqp->kqp.sq_wrid_mem = NULL; 646 kfree(iwqp->kqp.rq_wrid_mem); 647 iwqp->kqp.rq_wrid_mem = NULL; 648 return -ENOMEM; 649 } 650 651 ukinfo->sq = mem->va; 652 info->sq_pa = mem->pa; 653 ukinfo->rq = &ukinfo->sq[sqdepth]; 654 info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE); 655 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem; 656 info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE); 657 ukinfo->sq_size = sqdepth >> sqshift; 658 ukinfo->rq_size = rqdepth >> rqshift; 659 ukinfo->qp_id = iwqp->ibqp.qp_num; 660 661 init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift; 662 init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift; 663 664 return 0; 665 } 666 667 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) 668 { 669 struct irdma_pci_f *rf = iwqp->iwdev->rf; 670 struct irdma_cqp_request *cqp_request; 671 struct cqp_cmds_info *cqp_info; 672 struct irdma_create_qp_info *qp_info; 673 enum irdma_status_code status; 674 675 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 676 if (!cqp_request) 677 return -ENOMEM; 678 679 cqp_info = &cqp_request->info; 680 qp_info = &cqp_request->info.in.u.qp_create.info; 681 memset(qp_info, 0, sizeof(*qp_info)); 682 qp_info->mac_valid = true; 683 qp_info->cq_num_valid = true; 684 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; 685 686 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; 687 cqp_info->post_sq = 1; 688 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; 689 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; 690 status = irdma_handle_cqp_op(rf, cqp_request); 691 irdma_put_cqp_request(&rf->cqp, cqp_request); 692 693 return status ? -ENOMEM : 0; 694 } 695 696 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 697 struct irdma_qp_host_ctx_info *ctx_info) 698 { 699 struct irdma_device *iwdev = iwqp->iwdev; 700 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 701 struct irdma_roce_offload_info *roce_info; 702 struct irdma_udp_offload_info *udp_info; 703 704 udp_info = &iwqp->udp_info; 705 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu)); 706 udp_info->cwnd = iwdev->roce_cwnd; 707 udp_info->rexmit_thresh = 2; 708 udp_info->rnr_nak_thresh = 2; 709 udp_info->src_port = 0xc000; 710 udp_info->dst_port = ROCE_V2_UDP_DPORT; 711 roce_info = &iwqp->roce_info; 712 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr); 713 714 roce_info->rd_en = true; 715 roce_info->wr_rdresp_en = true; 716 roce_info->bind_en = true; 717 roce_info->dcqcn_en = false; 718 roce_info->rtomin = 5; 719 720 roce_info->ack_credits = iwdev->roce_ackcreds; 721 roce_info->ird_size = dev->hw_attrs.max_hw_ird; 722 roce_info->ord_size = dev->hw_attrs.max_hw_ord; 723 724 if (!iwqp->user_mode) { 725 roce_info->priv_mode_en = true; 726 roce_info->fast_reg_en = true; 727 roce_info->udprivcq_en = true; 728 } 729 roce_info->roce_tver = 0; 730 731 ctx_info->roce_info = &iwqp->roce_info; 732 ctx_info->udp_info = &iwqp->udp_info; 733 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 734 } 735 736 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 737 struct irdma_qp_host_ctx_info *ctx_info) 738 { 739 struct irdma_device *iwdev = iwqp->iwdev; 740 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 741 struct irdma_iwarp_offload_info *iwarp_info; 742 743 iwarp_info = &iwqp->iwarp_info; 744 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr); 745 iwarp_info->rd_en = true; 746 iwarp_info->wr_rdresp_en = true; 747 iwarp_info->bind_en = true; 748 iwarp_info->ecn_en = true; 749 iwarp_info->rtomin = 5; 750 751 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 752 iwarp_info->ib_rd_en = true; 753 if (!iwqp->user_mode) { 754 iwarp_info->priv_mode_en = true; 755 iwarp_info->fast_reg_en = true; 756 } 757 iwarp_info->ddp_ver = 1; 758 iwarp_info->rdmap_ver = 1; 759 760 ctx_info->iwarp_info = &iwqp->iwarp_info; 761 ctx_info->iwarp_info_valid = true; 762 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 763 ctx_info->iwarp_info_valid = false; 764 } 765 766 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, 767 struct irdma_device *iwdev) 768 { 769 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 770 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 771 772 if (init_attr->create_flags) 773 return -EOPNOTSUPP; 774 775 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || 776 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || 777 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) 778 return -EINVAL; 779 780 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 781 if (init_attr->qp_type != IB_QPT_RC && 782 init_attr->qp_type != IB_QPT_UD && 783 init_attr->qp_type != IB_QPT_GSI) 784 return -EOPNOTSUPP; 785 } else { 786 if (init_attr->qp_type != IB_QPT_RC) 787 return -EOPNOTSUPP; 788 } 789 790 return 0; 791 } 792 793 /** 794 * irdma_create_qp - create qp 795 * @ibpd: ptr of pd 796 * @init_attr: attributes for qp 797 * @udata: user data for create qp 798 */ 799 static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, 800 struct ib_qp_init_attr *init_attr, 801 struct ib_udata *udata) 802 { 803 struct irdma_pd *iwpd = to_iwpd(ibpd); 804 struct irdma_device *iwdev = to_iwdev(ibpd->device); 805 struct irdma_pci_f *rf = iwdev->rf; 806 struct irdma_qp *iwqp; 807 struct irdma_create_qp_req req; 808 struct irdma_create_qp_resp uresp = {}; 809 u32 qp_num = 0; 810 enum irdma_status_code ret; 811 int err_code; 812 int sq_size; 813 int rq_size; 814 struct irdma_sc_qp *qp; 815 struct irdma_sc_dev *dev = &rf->sc_dev; 816 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 817 struct irdma_qp_init_info init_info = {}; 818 struct irdma_qp_host_ctx_info *ctx_info; 819 unsigned long flags; 820 821 err_code = irdma_validate_qp_attrs(init_attr, iwdev); 822 if (err_code) 823 return ERR_PTR(err_code); 824 825 sq_size = init_attr->cap.max_send_wr; 826 rq_size = init_attr->cap.max_recv_wr; 827 828 init_info.vsi = &iwdev->vsi; 829 init_info.qp_uk_init_info.uk_attrs = uk_attrs; 830 init_info.qp_uk_init_info.sq_size = sq_size; 831 init_info.qp_uk_init_info.rq_size = rq_size; 832 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; 833 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; 834 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; 835 836 iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL); 837 if (!iwqp) 838 return ERR_PTR(-ENOMEM); 839 840 qp = &iwqp->sc_qp; 841 qp->qp_uk.back_qp = iwqp; 842 qp->qp_uk.lock = &iwqp->lock; 843 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; 844 845 iwqp->iwdev = iwdev; 846 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE, 847 256); 848 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device, 849 iwqp->q2_ctx_mem.size, 850 &iwqp->q2_ctx_mem.pa, 851 GFP_KERNEL); 852 if (!iwqp->q2_ctx_mem.va) { 853 err_code = -ENOMEM; 854 goto error; 855 } 856 857 init_info.q2 = iwqp->q2_ctx_mem.va; 858 init_info.q2_pa = iwqp->q2_ctx_mem.pa; 859 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE); 860 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE; 861 862 if (init_attr->qp_type == IB_QPT_GSI) 863 qp_num = 1; 864 else 865 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, 866 &qp_num, &rf->next_qp); 867 if (err_code) 868 goto error; 869 870 iwqp->iwpd = iwpd; 871 iwqp->ibqp.qp_num = qp_num; 872 qp = &iwqp->sc_qp; 873 iwqp->iwscq = to_iwcq(init_attr->send_cq); 874 iwqp->iwrcq = to_iwcq(init_attr->recv_cq); 875 iwqp->host_ctx.va = init_info.host_ctx; 876 iwqp->host_ctx.pa = init_info.host_ctx_pa; 877 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE; 878 879 init_info.pd = &iwpd->sc_pd; 880 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; 881 if (!rdma_protocol_roce(&iwdev->ibdev, 1)) 882 init_info.qp_uk_init_info.first_sq_wq = 1; 883 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; 884 init_waitqueue_head(&iwqp->waitq); 885 init_waitqueue_head(&iwqp->mod_qp_waitq); 886 887 if (udata) { 888 err_code = ib_copy_from_udata(&req, udata, 889 min(sizeof(req), udata->inlen)); 890 if (err_code) { 891 ibdev_dbg(&iwdev->ibdev, 892 "VERBS: ib_copy_from_data fail\n"); 893 goto error; 894 } 895 896 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; 897 iwqp->user_mode = 1; 898 if (req.user_wqe_bufs) { 899 struct irdma_ucontext *ucontext = 900 rdma_udata_to_drv_context(udata, 901 struct irdma_ucontext, 902 ibucontext); 903 904 init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode; 905 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 906 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, 907 &ucontext->qp_reg_mem_list); 908 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 909 910 if (!iwqp->iwpbl) { 911 err_code = -ENODATA; 912 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n"); 913 goto error; 914 } 915 } 916 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; 917 err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info); 918 } else { 919 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; 920 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); 921 } 922 923 if (err_code) { 924 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n"); 925 goto error; 926 } 927 928 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 929 if (init_attr->qp_type == IB_QPT_RC) { 930 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC; 931 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 932 IRDMA_WRITE_WITH_IMM | 933 IRDMA_ROCE; 934 } else { 935 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD; 936 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 937 IRDMA_ROCE; 938 } 939 } else { 940 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP; 941 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM; 942 } 943 944 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) 945 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE; 946 947 ret = irdma_sc_qp_init(qp, &init_info); 948 if (ret) { 949 err_code = -EPROTO; 950 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n"); 951 goto error; 952 } 953 954 ctx_info = &iwqp->ctx_info; 955 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 956 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 957 958 if (rdma_protocol_roce(&iwdev->ibdev, 1)) 959 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info); 960 else 961 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info); 962 963 err_code = irdma_cqp_create_qp_cmd(iwqp); 964 if (err_code) 965 goto error; 966 967 refcount_set(&iwqp->refcnt, 1); 968 spin_lock_init(&iwqp->lock); 969 spin_lock_init(&iwqp->sc_qp.pfpdu.lock); 970 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; 971 rf->qp_table[qp_num] = iwqp; 972 iwqp->max_send_wr = sq_size; 973 iwqp->max_recv_wr = rq_size; 974 975 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 976 if (dev->ws_add(&iwdev->vsi, 0)) { 977 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp); 978 err_code = -EINVAL; 979 goto error; 980 } 981 982 irdma_qp_add_qos(&iwqp->sc_qp); 983 } 984 985 if (udata) { 986 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */ 987 if (udata->outlen < sizeof(uresp)) { 988 uresp.lsmm = 1; 989 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1; 990 } else { 991 if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) 992 uresp.lsmm = 1; 993 } 994 uresp.actual_sq_size = sq_size; 995 uresp.actual_rq_size = rq_size; 996 uresp.qp_id = qp_num; 997 uresp.qp_caps = qp->qp_uk.qp_caps; 998 999 err_code = ib_copy_to_udata(udata, &uresp, 1000 min(sizeof(uresp), udata->outlen)); 1001 if (err_code) { 1002 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); 1003 irdma_destroy_qp(&iwqp->ibqp, udata); 1004 return ERR_PTR(err_code); 1005 } 1006 } 1007 1008 init_completion(&iwqp->free_qp); 1009 return &iwqp->ibqp; 1010 1011 error: 1012 irdma_free_qp_rsrc(iwqp); 1013 1014 return ERR_PTR(err_code); 1015 } 1016 1017 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp) 1018 { 1019 int acc_flags = 0; 1020 1021 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { 1022 if (iwqp->roce_info.wr_rdresp_en) { 1023 acc_flags |= IB_ACCESS_LOCAL_WRITE; 1024 acc_flags |= IB_ACCESS_REMOTE_WRITE; 1025 } 1026 if (iwqp->roce_info.rd_en) 1027 acc_flags |= IB_ACCESS_REMOTE_READ; 1028 if (iwqp->roce_info.bind_en) 1029 acc_flags |= IB_ACCESS_MW_BIND; 1030 } else { 1031 if (iwqp->iwarp_info.wr_rdresp_en) { 1032 acc_flags |= IB_ACCESS_LOCAL_WRITE; 1033 acc_flags |= IB_ACCESS_REMOTE_WRITE; 1034 } 1035 if (iwqp->iwarp_info.rd_en) 1036 acc_flags |= IB_ACCESS_REMOTE_READ; 1037 if (iwqp->iwarp_info.bind_en) 1038 acc_flags |= IB_ACCESS_MW_BIND; 1039 } 1040 return acc_flags; 1041 } 1042 1043 /** 1044 * irdma_query_qp - query qp attributes 1045 * @ibqp: qp pointer 1046 * @attr: attributes pointer 1047 * @attr_mask: Not used 1048 * @init_attr: qp attributes to return 1049 */ 1050 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1051 int attr_mask, struct ib_qp_init_attr *init_attr) 1052 { 1053 struct irdma_qp *iwqp = to_iwqp(ibqp); 1054 struct irdma_sc_qp *qp = &iwqp->sc_qp; 1055 1056 memset(attr, 0, sizeof(*attr)); 1057 memset(init_attr, 0, sizeof(*init_attr)); 1058 1059 attr->qp_state = iwqp->ibqp_state; 1060 attr->cur_qp_state = iwqp->ibqp_state; 1061 attr->cap.max_send_wr = iwqp->max_send_wr; 1062 attr->cap.max_recv_wr = iwqp->max_recv_wr; 1063 attr->cap.max_inline_data = qp->qp_uk.max_inline_data; 1064 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; 1065 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; 1066 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp); 1067 attr->port_num = 1; 1068 if (rdma_protocol_roce(ibqp->device, 1)) { 1069 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss); 1070 attr->qkey = iwqp->roce_info.qkey; 1071 attr->rq_psn = iwqp->udp_info.epsn; 1072 attr->sq_psn = iwqp->udp_info.psn_nxt; 1073 attr->dest_qp_num = iwqp->roce_info.dest_qp; 1074 attr->pkey_index = iwqp->roce_info.p_key; 1075 attr->retry_cnt = iwqp->udp_info.rexmit_thresh; 1076 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; 1077 attr->max_rd_atomic = iwqp->roce_info.ord_size; 1078 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; 1079 } 1080 1081 init_attr->event_handler = iwqp->ibqp.event_handler; 1082 init_attr->qp_context = iwqp->ibqp.qp_context; 1083 init_attr->send_cq = iwqp->ibqp.send_cq; 1084 init_attr->recv_cq = iwqp->ibqp.recv_cq; 1085 init_attr->cap = attr->cap; 1086 1087 return 0; 1088 } 1089 1090 /** 1091 * irdma_query_pkey - Query partition key 1092 * @ibdev: device pointer from stack 1093 * @port: port number 1094 * @index: index of pkey 1095 * @pkey: pointer to store the pkey 1096 */ 1097 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 1098 u16 *pkey) 1099 { 1100 if (index >= IRDMA_PKEY_TBL_SZ) 1101 return -EINVAL; 1102 1103 *pkey = IRDMA_DEFAULT_PKEY; 1104 return 0; 1105 } 1106 1107 /** 1108 * irdma_modify_qp_roce - modify qp request 1109 * @ibqp: qp's pointer for modify 1110 * @attr: access attributes 1111 * @attr_mask: state mask 1112 * @udata: user data 1113 */ 1114 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1115 int attr_mask, struct ib_udata *udata) 1116 { 1117 struct irdma_pd *iwpd = to_iwpd(ibqp->pd); 1118 struct irdma_qp *iwqp = to_iwqp(ibqp); 1119 struct irdma_device *iwdev = iwqp->iwdev; 1120 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 1121 struct irdma_qp_host_ctx_info *ctx_info; 1122 struct irdma_roce_offload_info *roce_info; 1123 struct irdma_udp_offload_info *udp_info; 1124 struct irdma_modify_qp_info info = {}; 1125 struct irdma_modify_qp_resp uresp = {}; 1126 struct irdma_modify_qp_req ureq = {}; 1127 unsigned long flags; 1128 u8 issue_modify_qp = 0; 1129 int ret = 0; 1130 1131 ctx_info = &iwqp->ctx_info; 1132 roce_info = &iwqp->roce_info; 1133 udp_info = &iwqp->udp_info; 1134 1135 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1136 return -EOPNOTSUPP; 1137 1138 if (attr_mask & IB_QP_DEST_QPN) 1139 roce_info->dest_qp = attr->dest_qp_num; 1140 1141 if (attr_mask & IB_QP_PKEY_INDEX) { 1142 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index, 1143 &roce_info->p_key); 1144 if (ret) 1145 return ret; 1146 } 1147 1148 if (attr_mask & IB_QP_QKEY) 1149 roce_info->qkey = attr->qkey; 1150 1151 if (attr_mask & IB_QP_PATH_MTU) 1152 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu); 1153 1154 if (attr_mask & IB_QP_SQ_PSN) { 1155 udp_info->psn_nxt = attr->sq_psn; 1156 udp_info->lsn = 0xffff; 1157 udp_info->psn_una = attr->sq_psn; 1158 udp_info->psn_max = attr->sq_psn; 1159 } 1160 1161 if (attr_mask & IB_QP_RQ_PSN) 1162 udp_info->epsn = attr->rq_psn; 1163 1164 if (attr_mask & IB_QP_RNR_RETRY) 1165 udp_info->rnr_nak_thresh = attr->rnr_retry; 1166 1167 if (attr_mask & IB_QP_RETRY_CNT) 1168 udp_info->rexmit_thresh = attr->retry_cnt; 1169 1170 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; 1171 1172 if (attr_mask & IB_QP_AV) { 1173 struct irdma_av *av = &iwqp->roce_ah.av; 1174 const struct ib_gid_attr *sgid_attr; 1175 u16 vlan_id = VLAN_N_VID; 1176 u32 local_ip[4]; 1177 1178 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); 1179 if (attr->ah_attr.ah_flags & IB_AH_GRH) { 1180 udp_info->ttl = attr->ah_attr.grh.hop_limit; 1181 udp_info->flow_label = attr->ah_attr.grh.flow_label; 1182 udp_info->tos = attr->ah_attr.grh.traffic_class; 1183 irdma_qp_rem_qos(&iwqp->sc_qp); 1184 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); 1185 ctx_info->user_pri = rt_tos2priority(udp_info->tos); 1186 iwqp->sc_qp.user_pri = ctx_info->user_pri; 1187 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri)) 1188 return -ENOMEM; 1189 irdma_qp_add_qos(&iwqp->sc_qp); 1190 } 1191 sgid_attr = attr->ah_attr.grh.sgid_attr; 1192 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, 1193 ctx_info->roce_info->mac_addr); 1194 if (ret) 1195 return ret; 1196 1197 if (vlan_id >= VLAN_N_VID && iwdev->dcb) 1198 vlan_id = 0; 1199 if (vlan_id < VLAN_N_VID) { 1200 udp_info->insert_vlan_tag = true; 1201 udp_info->vlan_tag = vlan_id | 1202 ctx_info->user_pri << VLAN_PRIO_SHIFT; 1203 } else { 1204 udp_info->insert_vlan_tag = false; 1205 } 1206 1207 av->attrs = attr->ah_attr; 1208 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); 1209 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); 1210 roce_info->local_qp = ibqp->qp_num; 1211 if (av->sgid_addr.saddr.sa_family == AF_INET6) { 1212 __be32 *daddr = 1213 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; 1214 __be32 *saddr = 1215 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; 1216 1217 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); 1218 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); 1219 1220 udp_info->ipv4 = false; 1221 irdma_copy_ip_ntohl(local_ip, daddr); 1222 1223 udp_info->arp_idx = irdma_arp_table(iwdev->rf, 1224 &local_ip[0], 1225 false, NULL, 1226 IRDMA_ARP_RESOLVE); 1227 } else { 1228 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; 1229 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; 1230 1231 local_ip[0] = ntohl(daddr); 1232 1233 udp_info->ipv4 = true; 1234 udp_info->dest_ip_addr[0] = 0; 1235 udp_info->dest_ip_addr[1] = 0; 1236 udp_info->dest_ip_addr[2] = 0; 1237 udp_info->dest_ip_addr[3] = local_ip[0]; 1238 1239 udp_info->local_ipaddr[0] = 0; 1240 udp_info->local_ipaddr[1] = 0; 1241 udp_info->local_ipaddr[2] = 0; 1242 udp_info->local_ipaddr[3] = ntohl(saddr); 1243 } 1244 udp_info->arp_idx = 1245 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4, 1246 attr->ah_attr.roce.dmac); 1247 } 1248 1249 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1250 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { 1251 ibdev_err(&iwdev->ibdev, 1252 "rd_atomic = %d, above max_hw_ord=%d\n", 1253 attr->max_rd_atomic, 1254 dev->hw_attrs.max_hw_ord); 1255 return -EINVAL; 1256 } 1257 if (attr->max_rd_atomic) 1258 roce_info->ord_size = attr->max_rd_atomic; 1259 info.ord_valid = true; 1260 } 1261 1262 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1263 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { 1264 ibdev_err(&iwdev->ibdev, 1265 "rd_atomic = %d, above max_hw_ird=%d\n", 1266 attr->max_rd_atomic, 1267 dev->hw_attrs.max_hw_ird); 1268 return -EINVAL; 1269 } 1270 if (attr->max_dest_rd_atomic) 1271 roce_info->ird_size = attr->max_dest_rd_atomic; 1272 } 1273 1274 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1275 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1276 roce_info->wr_rdresp_en = true; 1277 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1278 roce_info->wr_rdresp_en = true; 1279 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1280 roce_info->rd_en = true; 1281 } 1282 1283 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1284 1285 ibdev_dbg(&iwdev->ibdev, 1286 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n", 1287 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1288 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask); 1289 1290 spin_lock_irqsave(&iwqp->lock, flags); 1291 if (attr_mask & IB_QP_STATE) { 1292 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, 1293 iwqp->ibqp.qp_type, attr_mask)) { 1294 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n", 1295 iwqp->ibqp.qp_num, iwqp->ibqp_state, 1296 attr->qp_state); 1297 ret = -EINVAL; 1298 goto exit; 1299 } 1300 info.curr_iwarp_state = iwqp->iwarp_state; 1301 1302 switch (attr->qp_state) { 1303 case IB_QPS_INIT: 1304 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1305 ret = -EINVAL; 1306 goto exit; 1307 } 1308 1309 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1310 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1311 issue_modify_qp = 1; 1312 } 1313 break; 1314 case IB_QPS_RTR: 1315 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1316 ret = -EINVAL; 1317 goto exit; 1318 } 1319 info.arp_cache_idx_valid = true; 1320 info.cq_num_valid = true; 1321 info.next_iwarp_state = IRDMA_QP_STATE_RTR; 1322 issue_modify_qp = 1; 1323 break; 1324 case IB_QPS_RTS: 1325 if (iwqp->ibqp_state < IB_QPS_RTR || 1326 iwqp->ibqp_state == IB_QPS_ERR) { 1327 ret = -EINVAL; 1328 goto exit; 1329 } 1330 1331 info.arp_cache_idx_valid = true; 1332 info.cq_num_valid = true; 1333 info.ord_valid = true; 1334 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1335 issue_modify_qp = 1; 1336 if (iwdev->push_mode && udata && 1337 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1338 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1339 spin_unlock_irqrestore(&iwqp->lock, flags); 1340 irdma_alloc_push_page(iwqp); 1341 spin_lock_irqsave(&iwqp->lock, flags); 1342 } 1343 break; 1344 case IB_QPS_SQD: 1345 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD) 1346 goto exit; 1347 1348 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) { 1349 ret = -EINVAL; 1350 goto exit; 1351 } 1352 1353 info.next_iwarp_state = IRDMA_QP_STATE_SQD; 1354 issue_modify_qp = 1; 1355 break; 1356 case IB_QPS_SQE: 1357 case IB_QPS_ERR: 1358 case IB_QPS_RESET: 1359 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) { 1360 spin_unlock_irqrestore(&iwqp->lock, flags); 1361 info.next_iwarp_state = IRDMA_QP_STATE_SQD; 1362 irdma_hw_modify_qp(iwdev, iwqp, &info, true); 1363 spin_lock_irqsave(&iwqp->lock, flags); 1364 } 1365 1366 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1367 spin_unlock_irqrestore(&iwqp->lock, flags); 1368 if (udata) { 1369 if (ib_copy_from_udata(&ureq, udata, 1370 min(sizeof(ureq), udata->inlen))) 1371 return -EINVAL; 1372 1373 irdma_flush_wqes(iwqp, 1374 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1375 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1376 IRDMA_REFLUSH); 1377 } 1378 return 0; 1379 } 1380 1381 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1382 issue_modify_qp = 1; 1383 break; 1384 default: 1385 ret = -EINVAL; 1386 goto exit; 1387 } 1388 1389 iwqp->ibqp_state = attr->qp_state; 1390 } 1391 1392 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1393 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1394 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1395 spin_unlock_irqrestore(&iwqp->lock, flags); 1396 1397 if (attr_mask & IB_QP_STATE) { 1398 if (issue_modify_qp) { 1399 ctx_info->rem_endpoint_idx = udp_info->arp_idx; 1400 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1401 return -EINVAL; 1402 spin_lock_irqsave(&iwqp->lock, flags); 1403 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1404 iwqp->iwarp_state = info.next_iwarp_state; 1405 iwqp->ibqp_state = attr->qp_state; 1406 } 1407 if (iwqp->ibqp_state > IB_QPS_RTS && 1408 !iwqp->flush_issued) { 1409 iwqp->flush_issued = 1; 1410 spin_unlock_irqrestore(&iwqp->lock, flags); 1411 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | 1412 IRDMA_FLUSH_RQ | 1413 IRDMA_FLUSH_WAIT); 1414 } else { 1415 spin_unlock_irqrestore(&iwqp->lock, flags); 1416 } 1417 } else { 1418 iwqp->ibqp_state = attr->qp_state; 1419 } 1420 if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1421 struct irdma_ucontext *ucontext; 1422 1423 ucontext = rdma_udata_to_drv_context(udata, 1424 struct irdma_ucontext, ibucontext); 1425 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1426 !iwqp->push_wqe_mmap_entry && 1427 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1428 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1429 uresp.push_valid = 1; 1430 uresp.push_offset = iwqp->sc_qp.push_offset; 1431 } 1432 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1433 udata->outlen)); 1434 if (ret) { 1435 irdma_remove_push_mmap_entries(iwqp); 1436 ibdev_dbg(&iwdev->ibdev, 1437 "VERBS: copy_to_udata failed\n"); 1438 return ret; 1439 } 1440 } 1441 } 1442 1443 return 0; 1444 exit: 1445 spin_unlock_irqrestore(&iwqp->lock, flags); 1446 1447 return ret; 1448 } 1449 1450 /** 1451 * irdma_modify_qp - modify qp request 1452 * @ibqp: qp's pointer for modify 1453 * @attr: access attributes 1454 * @attr_mask: state mask 1455 * @udata: user data 1456 */ 1457 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1458 struct ib_udata *udata) 1459 { 1460 struct irdma_qp *iwqp = to_iwqp(ibqp); 1461 struct irdma_device *iwdev = iwqp->iwdev; 1462 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 1463 struct irdma_qp_host_ctx_info *ctx_info; 1464 struct irdma_tcp_offload_info *tcp_info; 1465 struct irdma_iwarp_offload_info *offload_info; 1466 struct irdma_modify_qp_info info = {}; 1467 struct irdma_modify_qp_resp uresp = {}; 1468 struct irdma_modify_qp_req ureq = {}; 1469 u8 issue_modify_qp = 0; 1470 u8 dont_wait = 0; 1471 int err; 1472 unsigned long flags; 1473 1474 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1475 return -EOPNOTSUPP; 1476 1477 ctx_info = &iwqp->ctx_info; 1478 offload_info = &iwqp->iwarp_info; 1479 tcp_info = &iwqp->tcp_info; 1480 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1481 ibdev_dbg(&iwdev->ibdev, 1482 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n", 1483 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1484 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq, 1485 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask); 1486 1487 spin_lock_irqsave(&iwqp->lock, flags); 1488 if (attr_mask & IB_QP_STATE) { 1489 info.curr_iwarp_state = iwqp->iwarp_state; 1490 switch (attr->qp_state) { 1491 case IB_QPS_INIT: 1492 case IB_QPS_RTR: 1493 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1494 err = -EINVAL; 1495 goto exit; 1496 } 1497 1498 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1499 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1500 issue_modify_qp = 1; 1501 } 1502 if (iwdev->push_mode && udata && 1503 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1504 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1505 spin_unlock_irqrestore(&iwqp->lock, flags); 1506 irdma_alloc_push_page(iwqp); 1507 spin_lock_irqsave(&iwqp->lock, flags); 1508 } 1509 break; 1510 case IB_QPS_RTS: 1511 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS || 1512 !iwqp->cm_id) { 1513 err = -EINVAL; 1514 goto exit; 1515 } 1516 1517 issue_modify_qp = 1; 1518 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED; 1519 iwqp->hte_added = 1; 1520 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1521 info.tcp_ctx_valid = true; 1522 info.ord_valid = true; 1523 info.arp_cache_idx_valid = true; 1524 info.cq_num_valid = true; 1525 break; 1526 case IB_QPS_SQD: 1527 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) { 1528 err = 0; 1529 goto exit; 1530 } 1531 1532 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING || 1533 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) { 1534 err = 0; 1535 goto exit; 1536 } 1537 1538 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) { 1539 err = -EINVAL; 1540 goto exit; 1541 } 1542 1543 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING; 1544 issue_modify_qp = 1; 1545 break; 1546 case IB_QPS_SQE: 1547 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) { 1548 err = -EINVAL; 1549 goto exit; 1550 } 1551 1552 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE; 1553 issue_modify_qp = 1; 1554 break; 1555 case IB_QPS_ERR: 1556 case IB_QPS_RESET: 1557 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1558 spin_unlock_irqrestore(&iwqp->lock, flags); 1559 if (udata) { 1560 if (ib_copy_from_udata(&ureq, udata, 1561 min(sizeof(ureq), udata->inlen))) 1562 return -EINVAL; 1563 1564 irdma_flush_wqes(iwqp, 1565 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1566 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1567 IRDMA_REFLUSH); 1568 } 1569 return 0; 1570 } 1571 1572 if (iwqp->sc_qp.term_flags) { 1573 spin_unlock_irqrestore(&iwqp->lock, flags); 1574 irdma_terminate_del_timer(&iwqp->sc_qp); 1575 spin_lock_irqsave(&iwqp->lock, flags); 1576 } 1577 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1578 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED && 1579 iwdev->iw_status && 1580 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT) 1581 info.reset_tcp_conn = true; 1582 else 1583 dont_wait = 1; 1584 1585 issue_modify_qp = 1; 1586 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1587 break; 1588 default: 1589 err = -EINVAL; 1590 goto exit; 1591 } 1592 1593 iwqp->ibqp_state = attr->qp_state; 1594 } 1595 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1596 ctx_info->iwarp_info_valid = true; 1597 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1598 offload_info->wr_rdresp_en = true; 1599 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1600 offload_info->wr_rdresp_en = true; 1601 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1602 offload_info->rd_en = true; 1603 } 1604 1605 if (ctx_info->iwarp_info_valid) { 1606 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1607 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1608 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1609 } 1610 spin_unlock_irqrestore(&iwqp->lock, flags); 1611 1612 if (attr_mask & IB_QP_STATE) { 1613 if (issue_modify_qp) { 1614 ctx_info->rem_endpoint_idx = tcp_info->arp_idx; 1615 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1616 return -EINVAL; 1617 } 1618 1619 spin_lock_irqsave(&iwqp->lock, flags); 1620 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1621 iwqp->iwarp_state = info.next_iwarp_state; 1622 iwqp->ibqp_state = attr->qp_state; 1623 } 1624 spin_unlock_irqrestore(&iwqp->lock, flags); 1625 } 1626 1627 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { 1628 if (dont_wait) { 1629 if (iwqp->cm_id && iwqp->hw_tcp_state) { 1630 spin_lock_irqsave(&iwqp->lock, flags); 1631 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; 1632 iwqp->last_aeq = IRDMA_AE_RESET_SENT; 1633 spin_unlock_irqrestore(&iwqp->lock, flags); 1634 irdma_cm_disconn(iwqp); 1635 } 1636 } else { 1637 int close_timer_started; 1638 1639 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); 1640 1641 if (iwqp->cm_node) { 1642 refcount_inc(&iwqp->cm_node->refcnt); 1643 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1644 close_timer_started = atomic_inc_return(&iwqp->close_timer_started); 1645 if (iwqp->cm_id && close_timer_started == 1) 1646 irdma_schedule_cm_timer(iwqp->cm_node, 1647 (struct irdma_puda_buf *)iwqp, 1648 IRDMA_TIMER_TYPE_CLOSE, 1, 0); 1649 1650 irdma_rem_ref_cm_node(iwqp->cm_node); 1651 } else { 1652 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1653 } 1654 } 1655 } 1656 if (attr_mask & IB_QP_STATE && udata && 1657 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1658 struct irdma_ucontext *ucontext; 1659 1660 ucontext = rdma_udata_to_drv_context(udata, 1661 struct irdma_ucontext, ibucontext); 1662 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1663 !iwqp->push_wqe_mmap_entry && 1664 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1665 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1666 uresp.push_valid = 1; 1667 uresp.push_offset = iwqp->sc_qp.push_offset; 1668 } 1669 1670 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1671 udata->outlen)); 1672 if (err) { 1673 irdma_remove_push_mmap_entries(iwqp); 1674 ibdev_dbg(&iwdev->ibdev, 1675 "VERBS: copy_to_udata failed\n"); 1676 return err; 1677 } 1678 } 1679 1680 return 0; 1681 exit: 1682 spin_unlock_irqrestore(&iwqp->lock, flags); 1683 1684 return err; 1685 } 1686 1687 /** 1688 * irdma_cq_free_rsrc - free up resources for cq 1689 * @rf: RDMA PCI function 1690 * @iwcq: cq ptr 1691 */ 1692 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq) 1693 { 1694 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1695 1696 if (!iwcq->user_mode) { 1697 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size, 1698 iwcq->kmem.va, iwcq->kmem.pa); 1699 iwcq->kmem.va = NULL; 1700 dma_free_coherent(rf->sc_dev.hw->device, 1701 iwcq->kmem_shadow.size, 1702 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa); 1703 iwcq->kmem_shadow.va = NULL; 1704 } 1705 1706 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id); 1707 } 1708 1709 /** 1710 * irdma_free_cqbuf - worker to free a cq buffer 1711 * @work: provides access to the cq buffer to free 1712 */ 1713 static void irdma_free_cqbuf(struct work_struct *work) 1714 { 1715 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work); 1716 1717 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size, 1718 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa); 1719 cq_buf->kmem_buf.va = NULL; 1720 kfree(cq_buf); 1721 } 1722 1723 /** 1724 * irdma_process_resize_list - remove resized cq buffers from the resize_list 1725 * @iwcq: cq which owns the resize_list 1726 * @iwdev: irdma device 1727 * @lcqe_buf: the buffer where the last cqe is received 1728 */ 1729 static int irdma_process_resize_list(struct irdma_cq *iwcq, 1730 struct irdma_device *iwdev, 1731 struct irdma_cq_buf *lcqe_buf) 1732 { 1733 struct list_head *tmp_node, *list_node; 1734 struct irdma_cq_buf *cq_buf; 1735 int cnt = 0; 1736 1737 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 1738 cq_buf = list_entry(list_node, struct irdma_cq_buf, list); 1739 if (cq_buf == lcqe_buf) 1740 return cnt; 1741 1742 list_del(&cq_buf->list); 1743 queue_work(iwdev->cleanup_wq, &cq_buf->work); 1744 cnt++; 1745 } 1746 1747 return cnt; 1748 } 1749 1750 /** 1751 * irdma_destroy_cq - destroy cq 1752 * @ib_cq: cq pointer 1753 * @udata: user data 1754 */ 1755 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) 1756 { 1757 struct irdma_device *iwdev = to_iwdev(ib_cq->device); 1758 struct irdma_cq *iwcq = to_iwcq(ib_cq); 1759 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1760 struct irdma_sc_dev *dev = cq->dev; 1761 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; 1762 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq); 1763 unsigned long flags; 1764 1765 spin_lock_irqsave(&iwcq->lock, flags); 1766 if (!list_empty(&iwcq->resize_list)) 1767 irdma_process_resize_list(iwcq, iwdev, NULL); 1768 spin_unlock_irqrestore(&iwcq->lock, flags); 1769 1770 irdma_cq_wq_destroy(iwdev->rf, cq); 1771 irdma_cq_free_rsrc(iwdev->rf, iwcq); 1772 1773 spin_lock_irqsave(&iwceq->ce_lock, flags); 1774 irdma_sc_cleanup_ceqes(cq, ceq); 1775 spin_unlock_irqrestore(&iwceq->ce_lock, flags); 1776 1777 return 0; 1778 } 1779 1780 /** 1781 * irdma_resize_cq - resize cq 1782 * @ibcq: cq to be resized 1783 * @entries: desired cq size 1784 * @udata: user data 1785 */ 1786 static int irdma_resize_cq(struct ib_cq *ibcq, int entries, 1787 struct ib_udata *udata) 1788 { 1789 struct irdma_cq *iwcq = to_iwcq(ibcq); 1790 struct irdma_sc_dev *dev = iwcq->sc_cq.dev; 1791 struct irdma_cqp_request *cqp_request; 1792 struct cqp_cmds_info *cqp_info; 1793 struct irdma_modify_cq_info *m_info; 1794 struct irdma_modify_cq_info info = {}; 1795 struct irdma_dma_mem kmem_buf; 1796 struct irdma_cq_mr *cqmr_buf; 1797 struct irdma_pbl *iwpbl_buf; 1798 struct irdma_device *iwdev; 1799 struct irdma_pci_f *rf; 1800 struct irdma_cq_buf *cq_buf = NULL; 1801 enum irdma_status_code status = 0; 1802 unsigned long flags; 1803 int ret; 1804 1805 iwdev = to_iwdev(ibcq->device); 1806 rf = iwdev->rf; 1807 1808 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 1809 IRDMA_FEATURE_CQ_RESIZE)) 1810 return -EOPNOTSUPP; 1811 1812 if (entries > rf->max_cqe) 1813 return -EINVAL; 1814 1815 if (!iwcq->user_mode) { 1816 entries++; 1817 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1818 entries *= 2; 1819 } 1820 1821 info.cq_size = max(entries, 4); 1822 1823 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) 1824 return 0; 1825 1826 if (udata) { 1827 struct irdma_resize_cq_req req = {}; 1828 struct irdma_ucontext *ucontext = 1829 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 1830 ibucontext); 1831 1832 /* CQ resize not supported with legacy GEN_1 libi40iw */ 1833 if (ucontext->legacy_mode) 1834 return -EOPNOTSUPP; 1835 1836 if (ib_copy_from_udata(&req, udata, 1837 min(sizeof(req), udata->inlen))) 1838 return -EINVAL; 1839 1840 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1841 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer, 1842 &ucontext->cq_reg_mem_list); 1843 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1844 1845 if (!iwpbl_buf) 1846 return -ENOMEM; 1847 1848 cqmr_buf = &iwpbl_buf->cq_mr; 1849 if (iwpbl_buf->pbl_allocated) { 1850 info.virtual_map = true; 1851 info.pbl_chunk_size = 1; 1852 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; 1853 } else { 1854 info.cq_pa = cqmr_buf->cq_pbl.addr; 1855 } 1856 } else { 1857 /* Kmode CQ resize */ 1858 int rsize; 1859 1860 rsize = info.cq_size * sizeof(struct irdma_cqe); 1861 kmem_buf.size = ALIGN(round_up(rsize, 256), 256); 1862 kmem_buf.va = dma_alloc_coherent(dev->hw->device, 1863 kmem_buf.size, &kmem_buf.pa, 1864 GFP_KERNEL); 1865 if (!kmem_buf.va) 1866 return -ENOMEM; 1867 1868 info.cq_base = kmem_buf.va; 1869 info.cq_pa = kmem_buf.pa; 1870 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL); 1871 if (!cq_buf) { 1872 ret = -ENOMEM; 1873 goto error; 1874 } 1875 } 1876 1877 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 1878 if (!cqp_request) { 1879 ret = -ENOMEM; 1880 goto error; 1881 } 1882 1883 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; 1884 info.cq_resize = true; 1885 1886 cqp_info = &cqp_request->info; 1887 m_info = &cqp_info->in.u.cq_modify.info; 1888 memcpy(m_info, &info, sizeof(*m_info)); 1889 1890 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY; 1891 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; 1892 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; 1893 cqp_info->post_sq = 1; 1894 status = irdma_handle_cqp_op(rf, cqp_request); 1895 irdma_put_cqp_request(&rf->cqp, cqp_request); 1896 if (status) { 1897 ret = -EPROTO; 1898 goto error; 1899 } 1900 1901 spin_lock_irqsave(&iwcq->lock, flags); 1902 if (cq_buf) { 1903 cq_buf->kmem_buf = iwcq->kmem; 1904 cq_buf->hw = dev->hw; 1905 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk)); 1906 INIT_WORK(&cq_buf->work, irdma_free_cqbuf); 1907 list_add_tail(&cq_buf->list, &iwcq->resize_list); 1908 iwcq->kmem = kmem_buf; 1909 } 1910 1911 irdma_sc_cq_resize(&iwcq->sc_cq, &info); 1912 ibcq->cqe = info.cq_size - 1; 1913 spin_unlock_irqrestore(&iwcq->lock, flags); 1914 1915 return 0; 1916 error: 1917 if (!udata) { 1918 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va, 1919 kmem_buf.pa); 1920 kmem_buf.va = NULL; 1921 } 1922 kfree(cq_buf); 1923 1924 return ret; 1925 } 1926 1927 static inline int cq_validate_flags(u32 flags, u8 hw_rev) 1928 { 1929 /* GEN1 does not support CQ create flags */ 1930 if (hw_rev == IRDMA_GEN_1) 1931 return flags ? -EOPNOTSUPP : 0; 1932 1933 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0; 1934 } 1935 1936 /** 1937 * irdma_create_cq - create cq 1938 * @ibcq: CQ allocated 1939 * @attr: attributes for cq 1940 * @udata: user data 1941 */ 1942 static int irdma_create_cq(struct ib_cq *ibcq, 1943 const struct ib_cq_init_attr *attr, 1944 struct ib_udata *udata) 1945 { 1946 struct ib_device *ibdev = ibcq->device; 1947 struct irdma_device *iwdev = to_iwdev(ibdev); 1948 struct irdma_pci_f *rf = iwdev->rf; 1949 struct irdma_cq *iwcq = to_iwcq(ibcq); 1950 u32 cq_num = 0; 1951 struct irdma_sc_cq *cq; 1952 struct irdma_sc_dev *dev = &rf->sc_dev; 1953 struct irdma_cq_init_info info = {}; 1954 enum irdma_status_code status; 1955 struct irdma_cqp_request *cqp_request; 1956 struct cqp_cmds_info *cqp_info; 1957 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; 1958 unsigned long flags; 1959 int err_code; 1960 int entries = attr->cqe; 1961 1962 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); 1963 if (err_code) 1964 return err_code; 1965 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, 1966 &rf->next_cq); 1967 if (err_code) 1968 return err_code; 1969 1970 cq = &iwcq->sc_cq; 1971 cq->back_cq = iwcq; 1972 spin_lock_init(&iwcq->lock); 1973 INIT_LIST_HEAD(&iwcq->resize_list); 1974 info.dev = dev; 1975 ukinfo->cq_size = max(entries, 4); 1976 ukinfo->cq_id = cq_num; 1977 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; 1978 if (attr->comp_vector < rf->ceqs_count) 1979 info.ceq_id = attr->comp_vector; 1980 info.ceq_id_valid = true; 1981 info.ceqe_mask = 1; 1982 info.type = IRDMA_CQ_TYPE_IWARP; 1983 info.vsi = &iwdev->vsi; 1984 1985 if (udata) { 1986 struct irdma_ucontext *ucontext; 1987 struct irdma_create_cq_req req = {}; 1988 struct irdma_cq_mr *cqmr; 1989 struct irdma_pbl *iwpbl; 1990 struct irdma_pbl *iwpbl_shadow; 1991 struct irdma_cq_mr *cqmr_shadow; 1992 1993 iwcq->user_mode = true; 1994 ucontext = 1995 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 1996 ibucontext); 1997 if (ib_copy_from_udata(&req, udata, 1998 min(sizeof(req), udata->inlen))) { 1999 err_code = -EFAULT; 2000 goto cq_free_rsrc; 2001 } 2002 2003 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2004 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf, 2005 &ucontext->cq_reg_mem_list); 2006 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2007 if (!iwpbl) { 2008 err_code = -EPROTO; 2009 goto cq_free_rsrc; 2010 } 2011 2012 iwcq->iwpbl = iwpbl; 2013 iwcq->cq_mem_size = 0; 2014 cqmr = &iwpbl->cq_mr; 2015 2016 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 2017 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) { 2018 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2019 iwpbl_shadow = irdma_get_pbl( 2020 (unsigned long)req.user_shadow_area, 2021 &ucontext->cq_reg_mem_list); 2022 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2023 2024 if (!iwpbl_shadow) { 2025 err_code = -EPROTO; 2026 goto cq_free_rsrc; 2027 } 2028 iwcq->iwpbl_shadow = iwpbl_shadow; 2029 cqmr_shadow = &iwpbl_shadow->cq_mr; 2030 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; 2031 cqmr->split = true; 2032 } else { 2033 info.shadow_area_pa = cqmr->shadow; 2034 } 2035 if (iwpbl->pbl_allocated) { 2036 info.virtual_map = true; 2037 info.pbl_chunk_size = 1; 2038 info.first_pm_pbl_idx = cqmr->cq_pbl.idx; 2039 } else { 2040 info.cq_base_pa = cqmr->cq_pbl.addr; 2041 } 2042 } else { 2043 /* Kmode allocations */ 2044 int rsize; 2045 2046 if (entries > rf->max_cqe) { 2047 err_code = -EINVAL; 2048 goto cq_free_rsrc; 2049 } 2050 2051 entries++; 2052 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 2053 entries *= 2; 2054 ukinfo->cq_size = entries; 2055 2056 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); 2057 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256); 2058 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device, 2059 iwcq->kmem.size, 2060 &iwcq->kmem.pa, GFP_KERNEL); 2061 if (!iwcq->kmem.va) { 2062 err_code = -ENOMEM; 2063 goto cq_free_rsrc; 2064 } 2065 2066 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3, 2067 64); 2068 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device, 2069 iwcq->kmem_shadow.size, 2070 &iwcq->kmem_shadow.pa, 2071 GFP_KERNEL); 2072 if (!iwcq->kmem_shadow.va) { 2073 err_code = -ENOMEM; 2074 goto cq_free_rsrc; 2075 } 2076 info.shadow_area_pa = iwcq->kmem_shadow.pa; 2077 ukinfo->shadow_area = iwcq->kmem_shadow.va; 2078 ukinfo->cq_base = iwcq->kmem.va; 2079 info.cq_base_pa = iwcq->kmem.pa; 2080 } 2081 2082 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 2083 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, 2084 (u32)IRDMA_MAX_CQ_READ_THRESH); 2085 2086 if (irdma_sc_cq_init(cq, &info)) { 2087 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n"); 2088 err_code = -EPROTO; 2089 goto cq_free_rsrc; 2090 } 2091 2092 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 2093 if (!cqp_request) { 2094 err_code = -ENOMEM; 2095 goto cq_free_rsrc; 2096 } 2097 2098 cqp_info = &cqp_request->info; 2099 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; 2100 cqp_info->post_sq = 1; 2101 cqp_info->in.u.cq_create.cq = cq; 2102 cqp_info->in.u.cq_create.check_overflow = true; 2103 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; 2104 status = irdma_handle_cqp_op(rf, cqp_request); 2105 irdma_put_cqp_request(&rf->cqp, cqp_request); 2106 if (status) { 2107 err_code = -ENOMEM; 2108 goto cq_free_rsrc; 2109 } 2110 2111 if (udata) { 2112 struct irdma_create_cq_resp resp = {}; 2113 2114 resp.cq_id = info.cq_uk_init_info.cq_id; 2115 resp.cq_size = info.cq_uk_init_info.cq_size; 2116 if (ib_copy_to_udata(udata, &resp, 2117 min(sizeof(resp), udata->outlen))) { 2118 ibdev_dbg(&iwdev->ibdev, 2119 "VERBS: copy to user data\n"); 2120 err_code = -EPROTO; 2121 goto cq_destroy; 2122 } 2123 } 2124 return 0; 2125 cq_destroy: 2126 irdma_cq_wq_destroy(rf, cq); 2127 cq_free_rsrc: 2128 irdma_cq_free_rsrc(rf, iwcq); 2129 2130 return err_code; 2131 } 2132 2133 /** 2134 * irdma_get_mr_access - get hw MR access permissions from IB access flags 2135 * @access: IB access flags 2136 */ 2137 static inline u16 irdma_get_mr_access(int access) 2138 { 2139 u16 hw_access = 0; 2140 2141 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? 2142 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0; 2143 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? 2144 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0; 2145 hw_access |= (access & IB_ACCESS_REMOTE_READ) ? 2146 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0; 2147 hw_access |= (access & IB_ACCESS_MW_BIND) ? 2148 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0; 2149 hw_access |= (access & IB_ZERO_BASED) ? 2150 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0; 2151 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD; 2152 2153 return hw_access; 2154 } 2155 2156 /** 2157 * irdma_free_stag - free stag resource 2158 * @iwdev: irdma device 2159 * @stag: stag to free 2160 */ 2161 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag) 2162 { 2163 u32 stag_idx; 2164 2165 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S; 2166 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); 2167 } 2168 2169 /** 2170 * irdma_create_stag - create random stag 2171 * @iwdev: irdma device 2172 */ 2173 static u32 irdma_create_stag(struct irdma_device *iwdev) 2174 { 2175 u32 stag = 0; 2176 u32 stag_index = 0; 2177 u32 next_stag_index; 2178 u32 driver_key; 2179 u32 random; 2180 u8 consumer_key; 2181 int ret; 2182 2183 get_random_bytes(&random, sizeof(random)); 2184 consumer_key = (u8)random; 2185 2186 driver_key = random & ~iwdev->rf->mr_stagmask; 2187 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8; 2188 next_stag_index %= iwdev->rf->max_mr; 2189 2190 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, 2191 iwdev->rf->max_mr, &stag_index, 2192 &next_stag_index); 2193 if (ret) 2194 return stag; 2195 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S; 2196 stag |= driver_key; 2197 stag += (u32)consumer_key; 2198 2199 return stag; 2200 } 2201 2202 /** 2203 * irdma_next_pbl_addr - Get next pbl address 2204 * @pbl: pointer to a pble 2205 * @pinfo: info pointer 2206 * @idx: index 2207 */ 2208 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo, 2209 u32 *idx) 2210 { 2211 *idx += 1; 2212 if (!(*pinfo) || *idx != (*pinfo)->cnt) 2213 return ++pbl; 2214 *idx = 0; 2215 (*pinfo)++; 2216 2217 return (*pinfo)->addr; 2218 } 2219 2220 /** 2221 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally 2222 * @iwmr: iwmr for IB's user page addresses 2223 * @pbl: ple pointer to save 1 level or 0 level pble 2224 * @level: indicated level 0, 1 or 2 2225 */ 2226 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, 2227 enum irdma_pble_level level) 2228 { 2229 struct ib_umem *region = iwmr->region; 2230 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2231 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2232 struct irdma_pble_info *pinfo; 2233 struct ib_block_iter biter; 2234 u32 idx = 0; 2235 u32 pbl_cnt = 0; 2236 2237 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; 2238 2239 if (iwmr->type == IRDMA_MEMREG_TYPE_QP) 2240 iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl); 2241 2242 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { 2243 *pbl = rdma_block_iter_dma_address(&biter); 2244 if (++pbl_cnt == palloc->total_cnt) 2245 break; 2246 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx); 2247 } 2248 } 2249 2250 /** 2251 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous 2252 * @arr: lvl1 pbl array 2253 * @npages: page count 2254 * @pg_size: page size 2255 * 2256 */ 2257 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) 2258 { 2259 u32 pg_idx; 2260 2261 for (pg_idx = 0; pg_idx < npages; pg_idx++) { 2262 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) 2263 return false; 2264 } 2265 2266 return true; 2267 } 2268 2269 /** 2270 * irdma_check_mr_contiguous - check if MR is physically contiguous 2271 * @palloc: pbl allocation struct 2272 * @pg_size: page size 2273 */ 2274 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, 2275 u32 pg_size) 2276 { 2277 struct irdma_pble_level2 *lvl2 = &palloc->level2; 2278 struct irdma_pble_info *leaf = lvl2->leaf; 2279 u64 *arr = NULL; 2280 u64 *start_addr = NULL; 2281 int i; 2282 bool ret; 2283 2284 if (palloc->level == PBLE_LEVEL_1) { 2285 arr = palloc->level1.addr; 2286 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt, 2287 pg_size); 2288 return ret; 2289 } 2290 2291 start_addr = leaf->addr; 2292 2293 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { 2294 arr = leaf->addr; 2295 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) 2296 return false; 2297 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size); 2298 if (!ret) 2299 return false; 2300 } 2301 2302 return true; 2303 } 2304 2305 /** 2306 * irdma_setup_pbles - copy user pg address to pble's 2307 * @rf: RDMA PCI function 2308 * @iwmr: mr pointer for this memory registration 2309 * @use_pbles: flag if to use pble's 2310 */ 2311 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, 2312 bool use_pbles) 2313 { 2314 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2315 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2316 struct irdma_pble_info *pinfo; 2317 u64 *pbl; 2318 enum irdma_status_code status; 2319 enum irdma_pble_level level = PBLE_LEVEL_1; 2320 2321 if (use_pbles) { 2322 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, 2323 false); 2324 if (status) 2325 return -ENOMEM; 2326 2327 iwpbl->pbl_allocated = true; 2328 level = palloc->level; 2329 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : 2330 palloc->level2.leaf; 2331 pbl = pinfo->addr; 2332 } else { 2333 pbl = iwmr->pgaddrmem; 2334 } 2335 2336 irdma_copy_user_pgaddrs(iwmr, pbl, level); 2337 2338 if (use_pbles) 2339 iwmr->pgaddrmem[0] = *pbl; 2340 2341 return 0; 2342 } 2343 2344 /** 2345 * irdma_handle_q_mem - handle memory for qp and cq 2346 * @iwdev: irdma device 2347 * @req: information for q memory management 2348 * @iwpbl: pble struct 2349 * @use_pbles: flag to use pble 2350 */ 2351 static int irdma_handle_q_mem(struct irdma_device *iwdev, 2352 struct irdma_mem_reg_req *req, 2353 struct irdma_pbl *iwpbl, bool use_pbles) 2354 { 2355 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2356 struct irdma_mr *iwmr = iwpbl->iwmr; 2357 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 2358 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr; 2359 struct irdma_hmc_pble *hmc_p; 2360 u64 *arr = iwmr->pgaddrmem; 2361 u32 pg_size, total; 2362 int err = 0; 2363 bool ret = true; 2364 2365 pg_size = iwmr->page_size; 2366 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); 2367 if (err) 2368 return err; 2369 2370 if (use_pbles && palloc->level != PBLE_LEVEL_1) { 2371 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2372 iwpbl->pbl_allocated = false; 2373 return -ENOMEM; 2374 } 2375 2376 if (use_pbles) 2377 arr = palloc->level1.addr; 2378 2379 switch (iwmr->type) { 2380 case IRDMA_MEMREG_TYPE_QP: 2381 total = req->sq_pages + req->rq_pages; 2382 hmc_p = &qpmr->sq_pbl; 2383 qpmr->shadow = (dma_addr_t)arr[total]; 2384 2385 if (use_pbles) { 2386 ret = irdma_check_mem_contiguous(arr, req->sq_pages, 2387 pg_size); 2388 if (ret) 2389 ret = irdma_check_mem_contiguous(&arr[req->sq_pages], 2390 req->rq_pages, 2391 pg_size); 2392 } 2393 2394 if (!ret) { 2395 hmc_p->idx = palloc->level1.idx; 2396 hmc_p = &qpmr->rq_pbl; 2397 hmc_p->idx = palloc->level1.idx + req->sq_pages; 2398 } else { 2399 hmc_p->addr = arr[0]; 2400 hmc_p = &qpmr->rq_pbl; 2401 hmc_p->addr = arr[req->sq_pages]; 2402 } 2403 break; 2404 case IRDMA_MEMREG_TYPE_CQ: 2405 hmc_p = &cqmr->cq_pbl; 2406 2407 if (!cqmr->split) 2408 cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; 2409 2410 if (use_pbles) 2411 ret = irdma_check_mem_contiguous(arr, req->cq_pages, 2412 pg_size); 2413 2414 if (!ret) 2415 hmc_p->idx = palloc->level1.idx; 2416 else 2417 hmc_p->addr = arr[0]; 2418 break; 2419 default: 2420 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n"); 2421 err = -EINVAL; 2422 } 2423 2424 if (use_pbles && ret) { 2425 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2426 iwpbl->pbl_allocated = false; 2427 } 2428 2429 return err; 2430 } 2431 2432 /** 2433 * irdma_hw_alloc_mw - create the hw memory window 2434 * @iwdev: irdma device 2435 * @iwmr: pointer to memory window info 2436 */ 2437 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) 2438 { 2439 struct irdma_mw_alloc_info *info; 2440 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); 2441 struct irdma_cqp_request *cqp_request; 2442 struct cqp_cmds_info *cqp_info; 2443 enum irdma_status_code status; 2444 2445 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2446 if (!cqp_request) 2447 return -ENOMEM; 2448 2449 cqp_info = &cqp_request->info; 2450 info = &cqp_info->in.u.mw_alloc.info; 2451 memset(info, 0, sizeof(*info)); 2452 if (iwmr->ibmw.type == IB_MW_TYPE_1) 2453 info->mw_wide = true; 2454 2455 info->page_size = PAGE_SIZE; 2456 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2457 info->pd_id = iwpd->sc_pd.pd_id; 2458 info->remote_access = true; 2459 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC; 2460 cqp_info->post_sq = 1; 2461 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev; 2462 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request; 2463 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2464 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2465 2466 return status ? -ENOMEM : 0; 2467 } 2468 2469 /** 2470 * irdma_alloc_mw - Allocate memory window 2471 * @ibmw: Memory Window 2472 * @udata: user data pointer 2473 */ 2474 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 2475 { 2476 struct irdma_device *iwdev = to_iwdev(ibmw->device); 2477 struct irdma_mr *iwmr = to_iwmw(ibmw); 2478 int err_code; 2479 u32 stag; 2480 2481 stag = irdma_create_stag(iwdev); 2482 if (!stag) 2483 return -ENOMEM; 2484 2485 iwmr->stag = stag; 2486 ibmw->rkey = stag; 2487 2488 err_code = irdma_hw_alloc_mw(iwdev, iwmr); 2489 if (err_code) { 2490 irdma_free_stag(iwdev, stag); 2491 return err_code; 2492 } 2493 2494 return 0; 2495 } 2496 2497 /** 2498 * irdma_dealloc_mw - Dealloc memory window 2499 * @ibmw: memory window structure. 2500 */ 2501 static int irdma_dealloc_mw(struct ib_mw *ibmw) 2502 { 2503 struct ib_pd *ibpd = ibmw->pd; 2504 struct irdma_pd *iwpd = to_iwpd(ibpd); 2505 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw); 2506 struct irdma_device *iwdev = to_iwdev(ibmw->device); 2507 struct irdma_cqp_request *cqp_request; 2508 struct cqp_cmds_info *cqp_info; 2509 struct irdma_dealloc_stag_info *info; 2510 2511 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2512 if (!cqp_request) 2513 return -ENOMEM; 2514 2515 cqp_info = &cqp_request->info; 2516 info = &cqp_info->in.u.dealloc_stag.info; 2517 memset(info, 0, sizeof(*info)); 2518 info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff; 2519 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S; 2520 info->mr = false; 2521 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 2522 cqp_info->post_sq = 1; 2523 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 2524 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 2525 irdma_handle_cqp_op(iwdev->rf, cqp_request); 2526 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2527 irdma_free_stag(iwdev, iwmr->stag); 2528 2529 return 0; 2530 } 2531 2532 /** 2533 * irdma_hw_alloc_stag - cqp command to allocate stag 2534 * @iwdev: irdma device 2535 * @iwmr: irdma mr pointer 2536 */ 2537 static int irdma_hw_alloc_stag(struct irdma_device *iwdev, 2538 struct irdma_mr *iwmr) 2539 { 2540 struct irdma_allocate_stag_info *info; 2541 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); 2542 enum irdma_status_code status; 2543 int err = 0; 2544 struct irdma_cqp_request *cqp_request; 2545 struct cqp_cmds_info *cqp_info; 2546 2547 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2548 if (!cqp_request) 2549 return -ENOMEM; 2550 2551 cqp_info = &cqp_request->info; 2552 info = &cqp_info->in.u.alloc_stag.info; 2553 memset(info, 0, sizeof(*info)); 2554 info->page_size = PAGE_SIZE; 2555 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2556 info->pd_id = iwpd->sc_pd.pd_id; 2557 info->total_len = iwmr->len; 2558 info->remote_access = true; 2559 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; 2560 cqp_info->post_sq = 1; 2561 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; 2562 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; 2563 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2564 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2565 if (status) 2566 err = -ENOMEM; 2567 2568 return err; 2569 } 2570 2571 /** 2572 * irdma_alloc_mr - register stag for fast memory registration 2573 * @pd: ibpd pointer 2574 * @mr_type: memory for stag registrion 2575 * @max_num_sg: man number of pages 2576 */ 2577 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 2578 u32 max_num_sg) 2579 { 2580 struct irdma_device *iwdev = to_iwdev(pd->device); 2581 struct irdma_pble_alloc *palloc; 2582 struct irdma_pbl *iwpbl; 2583 struct irdma_mr *iwmr; 2584 enum irdma_status_code status; 2585 u32 stag; 2586 int err_code = -ENOMEM; 2587 2588 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 2589 if (!iwmr) 2590 return ERR_PTR(-ENOMEM); 2591 2592 stag = irdma_create_stag(iwdev); 2593 if (!stag) { 2594 err_code = -ENOMEM; 2595 goto err; 2596 } 2597 2598 iwmr->stag = stag; 2599 iwmr->ibmr.rkey = stag; 2600 iwmr->ibmr.lkey = stag; 2601 iwmr->ibmr.pd = pd; 2602 iwmr->ibmr.device = pd->device; 2603 iwpbl = &iwmr->iwpbl; 2604 iwpbl->iwmr = iwmr; 2605 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 2606 palloc = &iwpbl->pble_alloc; 2607 iwmr->page_cnt = max_num_sg; 2608 status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, 2609 true); 2610 if (status) 2611 goto err_get_pble; 2612 2613 err_code = irdma_hw_alloc_stag(iwdev, iwmr); 2614 if (err_code) 2615 goto err_alloc_stag; 2616 2617 iwpbl->pbl_allocated = true; 2618 2619 return &iwmr->ibmr; 2620 err_alloc_stag: 2621 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2622 err_get_pble: 2623 irdma_free_stag(iwdev, stag); 2624 err: 2625 kfree(iwmr); 2626 2627 return ERR_PTR(err_code); 2628 } 2629 2630 /** 2631 * irdma_set_page - populate pbl list for fmr 2632 * @ibmr: ib mem to access iwarp mr pointer 2633 * @addr: page dma address fro pbl list 2634 */ 2635 static int irdma_set_page(struct ib_mr *ibmr, u64 addr) 2636 { 2637 struct irdma_mr *iwmr = to_iwmr(ibmr); 2638 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2639 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2640 u64 *pbl; 2641 2642 if (unlikely(iwmr->npages == iwmr->page_cnt)) 2643 return -ENOMEM; 2644 2645 pbl = palloc->level1.addr; 2646 pbl[iwmr->npages++] = addr; 2647 2648 return 0; 2649 } 2650 2651 /** 2652 * irdma_map_mr_sg - map of sg list for fmr 2653 * @ibmr: ib mem to access iwarp mr pointer 2654 * @sg: scatter gather list 2655 * @sg_nents: number of sg pages 2656 * @sg_offset: scatter gather list for fmr 2657 */ 2658 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 2659 int sg_nents, unsigned int *sg_offset) 2660 { 2661 struct irdma_mr *iwmr = to_iwmr(ibmr); 2662 2663 iwmr->npages = 0; 2664 2665 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page); 2666 } 2667 2668 /** 2669 * irdma_hwreg_mr - send cqp command for memory registration 2670 * @iwdev: irdma device 2671 * @iwmr: irdma mr pointer 2672 * @access: access for MR 2673 */ 2674 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, 2675 u16 access) 2676 { 2677 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2678 struct irdma_reg_ns_stag_info *stag_info; 2679 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); 2680 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2681 enum irdma_status_code status; 2682 int err = 0; 2683 struct irdma_cqp_request *cqp_request; 2684 struct cqp_cmds_info *cqp_info; 2685 2686 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2687 if (!cqp_request) 2688 return -ENOMEM; 2689 2690 cqp_info = &cqp_request->info; 2691 stag_info = &cqp_info->in.u.mr_reg_non_shared.info; 2692 memset(stag_info, 0, sizeof(*stag_info)); 2693 stag_info->va = iwpbl->user_base; 2694 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2695 stag_info->stag_key = (u8)iwmr->stag; 2696 stag_info->total_len = iwmr->len; 2697 stag_info->access_rights = irdma_get_mr_access(access); 2698 stag_info->pd_id = iwpd->sc_pd.pd_id; 2699 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) 2700 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; 2701 else 2702 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED; 2703 stag_info->page_size = iwmr->page_size; 2704 2705 if (iwpbl->pbl_allocated) { 2706 if (palloc->level == PBLE_LEVEL_1) { 2707 stag_info->first_pm_pbl_index = palloc->level1.idx; 2708 stag_info->chunk_size = 1; 2709 } else { 2710 stag_info->first_pm_pbl_index = palloc->level2.root.idx; 2711 stag_info->chunk_size = 3; 2712 } 2713 } else { 2714 stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; 2715 } 2716 2717 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED; 2718 cqp_info->post_sq = 1; 2719 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; 2720 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; 2721 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2722 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2723 if (status) 2724 err = -ENOMEM; 2725 2726 return err; 2727 } 2728 2729 /** 2730 * irdma_reg_user_mr - Register a user memory region 2731 * @pd: ptr of pd 2732 * @start: virtual start address 2733 * @len: length of mr 2734 * @virt: virtual address 2735 * @access: access of mr 2736 * @udata: user data 2737 */ 2738 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, 2739 u64 virt, int access, 2740 struct ib_udata *udata) 2741 { 2742 struct irdma_device *iwdev = to_iwdev(pd->device); 2743 struct irdma_ucontext *ucontext; 2744 struct irdma_pble_alloc *palloc; 2745 struct irdma_pbl *iwpbl; 2746 struct irdma_mr *iwmr; 2747 struct ib_umem *region; 2748 struct irdma_mem_reg_req req; 2749 u32 total, stag = 0; 2750 u8 shadow_pgcnt = 1; 2751 bool use_pbles = false; 2752 unsigned long flags; 2753 int err = -EINVAL; 2754 int ret; 2755 2756 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 2757 return ERR_PTR(-EINVAL); 2758 2759 region = ib_umem_get(pd->device, start, len, access); 2760 2761 if (IS_ERR(region)) { 2762 ibdev_dbg(&iwdev->ibdev, 2763 "VERBS: Failed to create ib_umem region\n"); 2764 return (struct ib_mr *)region; 2765 } 2766 2767 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { 2768 ib_umem_release(region); 2769 return ERR_PTR(-EFAULT); 2770 } 2771 2772 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 2773 if (!iwmr) { 2774 ib_umem_release(region); 2775 return ERR_PTR(-ENOMEM); 2776 } 2777 2778 iwpbl = &iwmr->iwpbl; 2779 iwpbl->iwmr = iwmr; 2780 iwmr->region = region; 2781 iwmr->ibmr.pd = pd; 2782 iwmr->ibmr.device = pd->device; 2783 iwmr->ibmr.iova = virt; 2784 iwmr->page_size = PAGE_SIZE; 2785 2786 if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { 2787 iwmr->page_size = ib_umem_find_best_pgsz(region, 2788 SZ_4K | SZ_2M | SZ_1G, 2789 virt); 2790 if (unlikely(!iwmr->page_size)) { 2791 kfree(iwmr); 2792 ib_umem_release(region); 2793 return ERR_PTR(-EOPNOTSUPP); 2794 } 2795 } 2796 iwmr->len = region->length; 2797 iwpbl->user_base = virt; 2798 palloc = &iwpbl->pble_alloc; 2799 iwmr->type = req.reg_type; 2800 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); 2801 2802 switch (req.reg_type) { 2803 case IRDMA_MEMREG_TYPE_QP: 2804 total = req.sq_pages + req.rq_pages + shadow_pgcnt; 2805 if (total > iwmr->page_cnt) { 2806 err = -EINVAL; 2807 goto error; 2808 } 2809 total = req.sq_pages + req.rq_pages; 2810 use_pbles = (total > 2); 2811 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); 2812 if (err) 2813 goto error; 2814 2815 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2816 ibucontext); 2817 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2818 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 2819 iwpbl->on_list = true; 2820 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2821 break; 2822 case IRDMA_MEMREG_TYPE_CQ: 2823 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) 2824 shadow_pgcnt = 0; 2825 total = req.cq_pages + shadow_pgcnt; 2826 if (total > iwmr->page_cnt) { 2827 err = -EINVAL; 2828 goto error; 2829 } 2830 2831 use_pbles = (req.cq_pages > 1); 2832 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); 2833 if (err) 2834 goto error; 2835 2836 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2837 ibucontext); 2838 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2839 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 2840 iwpbl->on_list = true; 2841 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2842 break; 2843 case IRDMA_MEMREG_TYPE_MEM: 2844 use_pbles = (iwmr->page_cnt != 1); 2845 2846 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); 2847 if (err) 2848 goto error; 2849 2850 if (use_pbles) { 2851 ret = irdma_check_mr_contiguous(palloc, 2852 iwmr->page_size); 2853 if (ret) { 2854 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2855 iwpbl->pbl_allocated = false; 2856 } 2857 } 2858 2859 stag = irdma_create_stag(iwdev); 2860 if (!stag) { 2861 err = -ENOMEM; 2862 goto error; 2863 } 2864 2865 iwmr->stag = stag; 2866 iwmr->ibmr.rkey = stag; 2867 iwmr->ibmr.lkey = stag; 2868 err = irdma_hwreg_mr(iwdev, iwmr, access); 2869 if (err) { 2870 irdma_free_stag(iwdev, stag); 2871 goto error; 2872 } 2873 2874 break; 2875 default: 2876 goto error; 2877 } 2878 2879 iwmr->type = req.reg_type; 2880 2881 return &iwmr->ibmr; 2882 2883 error: 2884 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) 2885 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2886 ib_umem_release(region); 2887 kfree(iwmr); 2888 2889 return ERR_PTR(err); 2890 } 2891 2892 /** 2893 * irdma_reg_phys_mr - register kernel physical memory 2894 * @pd: ibpd pointer 2895 * @addr: physical address of memory to register 2896 * @size: size of memory to register 2897 * @access: Access rights 2898 * @iova_start: start of virtual address for physical buffers 2899 */ 2900 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, 2901 u64 *iova_start) 2902 { 2903 struct irdma_device *iwdev = to_iwdev(pd->device); 2904 struct irdma_pbl *iwpbl; 2905 struct irdma_mr *iwmr; 2906 enum irdma_status_code status; 2907 u32 stag; 2908 int ret; 2909 2910 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 2911 if (!iwmr) 2912 return ERR_PTR(-ENOMEM); 2913 2914 iwmr->ibmr.pd = pd; 2915 iwmr->ibmr.device = pd->device; 2916 iwpbl = &iwmr->iwpbl; 2917 iwpbl->iwmr = iwmr; 2918 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 2919 iwpbl->user_base = *iova_start; 2920 stag = irdma_create_stag(iwdev); 2921 if (!stag) { 2922 ret = -ENOMEM; 2923 goto err; 2924 } 2925 2926 iwmr->stag = stag; 2927 iwmr->ibmr.iova = *iova_start; 2928 iwmr->ibmr.rkey = stag; 2929 iwmr->ibmr.lkey = stag; 2930 iwmr->page_cnt = 1; 2931 iwmr->pgaddrmem[0] = addr; 2932 iwmr->len = size; 2933 iwmr->page_size = SZ_4K; 2934 status = irdma_hwreg_mr(iwdev, iwmr, access); 2935 if (status) { 2936 irdma_free_stag(iwdev, stag); 2937 ret = -ENOMEM; 2938 goto err; 2939 } 2940 2941 return &iwmr->ibmr; 2942 2943 err: 2944 kfree(iwmr); 2945 2946 return ERR_PTR(ret); 2947 } 2948 2949 /** 2950 * irdma_get_dma_mr - register physical mem 2951 * @pd: ptr of pd 2952 * @acc: access for memory 2953 */ 2954 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc) 2955 { 2956 u64 kva = 0; 2957 2958 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva); 2959 } 2960 2961 /** 2962 * irdma_del_memlist - Deleting pbl list entries for CQ/QP 2963 * @iwmr: iwmr for IB's user page addresses 2964 * @ucontext: ptr to user context 2965 */ 2966 static void irdma_del_memlist(struct irdma_mr *iwmr, 2967 struct irdma_ucontext *ucontext) 2968 { 2969 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2970 unsigned long flags; 2971 2972 switch (iwmr->type) { 2973 case IRDMA_MEMREG_TYPE_CQ: 2974 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2975 if (iwpbl->on_list) { 2976 iwpbl->on_list = false; 2977 list_del(&iwpbl->list); 2978 } 2979 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2980 break; 2981 case IRDMA_MEMREG_TYPE_QP: 2982 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2983 if (iwpbl->on_list) { 2984 iwpbl->on_list = false; 2985 list_del(&iwpbl->list); 2986 } 2987 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2988 break; 2989 default: 2990 break; 2991 } 2992 } 2993 2994 /** 2995 * irdma_dereg_mr - deregister mr 2996 * @ib_mr: mr ptr for dereg 2997 * @udata: user data 2998 */ 2999 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 3000 { 3001 struct ib_pd *ibpd = ib_mr->pd; 3002 struct irdma_pd *iwpd = to_iwpd(ibpd); 3003 struct irdma_mr *iwmr = to_iwmr(ib_mr); 3004 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 3005 struct irdma_dealloc_stag_info *info; 3006 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3007 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 3008 struct irdma_cqp_request *cqp_request; 3009 struct cqp_cmds_info *cqp_info; 3010 3011 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { 3012 if (iwmr->region) { 3013 struct irdma_ucontext *ucontext; 3014 3015 ucontext = rdma_udata_to_drv_context(udata, 3016 struct irdma_ucontext, 3017 ibucontext); 3018 irdma_del_memlist(iwmr, ucontext); 3019 } 3020 goto done; 3021 } 3022 3023 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3024 if (!cqp_request) 3025 return -ENOMEM; 3026 3027 cqp_info = &cqp_request->info; 3028 info = &cqp_info->in.u.dealloc_stag.info; 3029 memset(info, 0, sizeof(*info)); 3030 info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff; 3031 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; 3032 info->mr = true; 3033 if (iwpbl->pbl_allocated) 3034 info->dealloc_pbl = true; 3035 3036 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 3037 cqp_info->post_sq = 1; 3038 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 3039 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 3040 irdma_handle_cqp_op(iwdev->rf, cqp_request); 3041 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3042 irdma_free_stag(iwdev, iwmr->stag); 3043 done: 3044 if (iwpbl->pbl_allocated) 3045 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 3046 ib_umem_release(iwmr->region); 3047 kfree(iwmr); 3048 3049 return 0; 3050 } 3051 3052 /** 3053 * irdma_copy_sg_list - copy sg list for qp 3054 * @sg_list: copied into sg_list 3055 * @sgl: copy from sgl 3056 * @num_sges: count of sg entries 3057 */ 3058 static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl, 3059 int num_sges) 3060 { 3061 unsigned int i; 3062 3063 for (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) { 3064 sg_list[i].tag_off = sgl[i].addr; 3065 sg_list[i].len = sgl[i].length; 3066 sg_list[i].stag = sgl[i].lkey; 3067 } 3068 } 3069 3070 /** 3071 * irdma_post_send - kernel application wr 3072 * @ibqp: qp ptr for wr 3073 * @ib_wr: work request ptr 3074 * @bad_wr: return of bad wr if err 3075 */ 3076 static int irdma_post_send(struct ib_qp *ibqp, 3077 const struct ib_send_wr *ib_wr, 3078 const struct ib_send_wr **bad_wr) 3079 { 3080 struct irdma_qp *iwqp; 3081 struct irdma_qp_uk *ukqp; 3082 struct irdma_sc_dev *dev; 3083 struct irdma_post_sq_info info; 3084 enum irdma_status_code ret; 3085 int err = 0; 3086 unsigned long flags; 3087 bool inv_stag; 3088 struct irdma_ah *ah; 3089 bool reflush = false; 3090 3091 iwqp = to_iwqp(ibqp); 3092 ukqp = &iwqp->sc_qp.qp_uk; 3093 dev = &iwqp->iwdev->rf->sc_dev; 3094 3095 spin_lock_irqsave(&iwqp->lock, flags); 3096 if (iwqp->flush_issued && ukqp->sq_flush_complete) 3097 reflush = true; 3098 while (ib_wr) { 3099 memset(&info, 0, sizeof(info)); 3100 inv_stag = false; 3101 info.wr_id = (ib_wr->wr_id); 3102 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) 3103 info.signaled = true; 3104 if (ib_wr->send_flags & IB_SEND_FENCE) 3105 info.read_fence = true; 3106 switch (ib_wr->opcode) { 3107 case IB_WR_SEND_WITH_IMM: 3108 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) { 3109 info.imm_data_valid = true; 3110 info.imm_data = ntohl(ib_wr->ex.imm_data); 3111 } else { 3112 err = -EINVAL; 3113 break; 3114 } 3115 fallthrough; 3116 case IB_WR_SEND: 3117 case IB_WR_SEND_WITH_INV: 3118 if (ib_wr->opcode == IB_WR_SEND || 3119 ib_wr->opcode == IB_WR_SEND_WITH_IMM) { 3120 if (ib_wr->send_flags & IB_SEND_SOLICITED) 3121 info.op_type = IRDMA_OP_TYPE_SEND_SOL; 3122 else 3123 info.op_type = IRDMA_OP_TYPE_SEND; 3124 } else { 3125 if (ib_wr->send_flags & IB_SEND_SOLICITED) 3126 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV; 3127 else 3128 info.op_type = IRDMA_OP_TYPE_SEND_INV; 3129 info.stag_to_inv = ib_wr->ex.invalidate_rkey; 3130 } 3131 3132 if (ib_wr->send_flags & IB_SEND_INLINE) { 3133 info.op.inline_send.data = (void *)(unsigned long) 3134 ib_wr->sg_list[0].addr; 3135 info.op.inline_send.len = ib_wr->sg_list[0].length; 3136 if (iwqp->ibqp.qp_type == IB_QPT_UD || 3137 iwqp->ibqp.qp_type == IB_QPT_GSI) { 3138 ah = to_iwah(ud_wr(ib_wr)->ah); 3139 info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx; 3140 info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey; 3141 info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn; 3142 } 3143 ret = irdma_uk_inline_send(ukqp, &info, false); 3144 } else { 3145 info.op.send.num_sges = ib_wr->num_sge; 3146 info.op.send.sg_list = (struct irdma_sge *) 3147 ib_wr->sg_list; 3148 if (iwqp->ibqp.qp_type == IB_QPT_UD || 3149 iwqp->ibqp.qp_type == IB_QPT_GSI) { 3150 ah = to_iwah(ud_wr(ib_wr)->ah); 3151 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; 3152 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; 3153 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; 3154 } 3155 ret = irdma_uk_send(ukqp, &info, false); 3156 } 3157 3158 if (ret) { 3159 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) 3160 err = -ENOMEM; 3161 else 3162 err = -EINVAL; 3163 } 3164 break; 3165 case IB_WR_RDMA_WRITE_WITH_IMM: 3166 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) { 3167 info.imm_data_valid = true; 3168 info.imm_data = ntohl(ib_wr->ex.imm_data); 3169 } else { 3170 err = -EINVAL; 3171 break; 3172 } 3173 fallthrough; 3174 case IB_WR_RDMA_WRITE: 3175 if (ib_wr->send_flags & IB_SEND_SOLICITED) 3176 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL; 3177 else 3178 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE; 3179 3180 if (ib_wr->send_flags & IB_SEND_INLINE) { 3181 info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr; 3182 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length; 3183 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; 3184 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; 3185 ret = irdma_uk_inline_rdma_write(ukqp, &info, false); 3186 } else { 3187 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; 3188 info.op.rdma_write.num_lo_sges = ib_wr->num_sge; 3189 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; 3190 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; 3191 ret = irdma_uk_rdma_write(ukqp, &info, false); 3192 } 3193 3194 if (ret) { 3195 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) 3196 err = -ENOMEM; 3197 else 3198 err = -EINVAL; 3199 } 3200 break; 3201 case IB_WR_RDMA_READ_WITH_INV: 3202 inv_stag = true; 3203 fallthrough; 3204 case IB_WR_RDMA_READ: 3205 if (ib_wr->num_sge > 3206 dev->hw_attrs.uk_attrs.max_hw_read_sges) { 3207 err = -EINVAL; 3208 break; 3209 } 3210 info.op_type = IRDMA_OP_TYPE_RDMA_READ; 3211 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; 3212 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; 3213 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; 3214 info.op.rdma_read.num_lo_sges = ib_wr->num_sge; 3215 3216 ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); 3217 if (ret) { 3218 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) 3219 err = -ENOMEM; 3220 else 3221 err = -EINVAL; 3222 } 3223 break; 3224 case IB_WR_LOCAL_INV: 3225 info.op_type = IRDMA_OP_TYPE_INV_STAG; 3226 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; 3227 ret = irdma_uk_stag_local_invalidate(ukqp, &info, true); 3228 if (ret) 3229 err = -ENOMEM; 3230 break; 3231 case IB_WR_REG_MR: { 3232 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); 3233 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; 3234 struct irdma_fast_reg_stag_info stag_info = {}; 3235 3236 stag_info.signaled = info.signaled; 3237 stag_info.read_fence = info.read_fence; 3238 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access); 3239 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; 3240 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; 3241 stag_info.page_size = reg_wr(ib_wr)->mr->page_size; 3242 stag_info.wr_id = ib_wr->wr_id; 3243 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED; 3244 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; 3245 stag_info.total_len = iwmr->ibmr.length; 3246 stag_info.reg_addr_pa = *palloc->level1.addr; 3247 stag_info.first_pm_pbl_index = palloc->level1.idx; 3248 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; 3249 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR) 3250 stag_info.chunk_size = 1; 3251 ret = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, 3252 true); 3253 if (ret) 3254 err = -ENOMEM; 3255 break; 3256 } 3257 default: 3258 err = -EINVAL; 3259 ibdev_dbg(&iwqp->iwdev->ibdev, 3260 "VERBS: upost_send bad opcode = 0x%x\n", 3261 ib_wr->opcode); 3262 break; 3263 } 3264 3265 if (err) 3266 break; 3267 ib_wr = ib_wr->next; 3268 } 3269 3270 if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) { 3271 irdma_uk_qp_post_wr(ukqp); 3272 spin_unlock_irqrestore(&iwqp->lock, flags); 3273 } else if (reflush) { 3274 ukqp->sq_flush_complete = false; 3275 spin_unlock_irqrestore(&iwqp->lock, flags); 3276 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH); 3277 } else { 3278 spin_unlock_irqrestore(&iwqp->lock, flags); 3279 } 3280 if (err) 3281 *bad_wr = ib_wr; 3282 3283 return err; 3284 } 3285 3286 /** 3287 * irdma_post_recv - post receive wr for kernel application 3288 * @ibqp: ib qp pointer 3289 * @ib_wr: work request for receive 3290 * @bad_wr: bad wr caused an error 3291 */ 3292 static int irdma_post_recv(struct ib_qp *ibqp, 3293 const struct ib_recv_wr *ib_wr, 3294 const struct ib_recv_wr **bad_wr) 3295 { 3296 struct irdma_qp *iwqp; 3297 struct irdma_qp_uk *ukqp; 3298 struct irdma_post_rq_info post_recv = {}; 3299 struct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT]; 3300 enum irdma_status_code ret = 0; 3301 unsigned long flags; 3302 int err = 0; 3303 bool reflush = false; 3304 3305 iwqp = to_iwqp(ibqp); 3306 ukqp = &iwqp->sc_qp.qp_uk; 3307 3308 spin_lock_irqsave(&iwqp->lock, flags); 3309 if (iwqp->flush_issued && ukqp->rq_flush_complete) 3310 reflush = true; 3311 while (ib_wr) { 3312 post_recv.num_sges = ib_wr->num_sge; 3313 post_recv.wr_id = ib_wr->wr_id; 3314 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); 3315 post_recv.sg_list = sg_list; 3316 ret = irdma_uk_post_receive(ukqp, &post_recv); 3317 if (ret) { 3318 ibdev_dbg(&iwqp->iwdev->ibdev, 3319 "VERBS: post_recv err %d\n", ret); 3320 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) 3321 err = -ENOMEM; 3322 else 3323 err = -EINVAL; 3324 goto out; 3325 } 3326 3327 ib_wr = ib_wr->next; 3328 } 3329 3330 out: 3331 if (reflush) { 3332 ukqp->rq_flush_complete = false; 3333 spin_unlock_irqrestore(&iwqp->lock, flags); 3334 irdma_flush_wqes(iwqp, IRDMA_FLUSH_RQ | IRDMA_REFLUSH); 3335 } else { 3336 spin_unlock_irqrestore(&iwqp->lock, flags); 3337 } 3338 3339 if (err) 3340 *bad_wr = ib_wr; 3341 3342 return err; 3343 } 3344 3345 /** 3346 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status 3347 * @opcode: iwarp flush code 3348 */ 3349 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode) 3350 { 3351 switch (opcode) { 3352 case FLUSH_PROT_ERR: 3353 return IB_WC_LOC_PROT_ERR; 3354 case FLUSH_REM_ACCESS_ERR: 3355 return IB_WC_REM_ACCESS_ERR; 3356 case FLUSH_LOC_QP_OP_ERR: 3357 return IB_WC_LOC_QP_OP_ERR; 3358 case FLUSH_REM_OP_ERR: 3359 return IB_WC_REM_OP_ERR; 3360 case FLUSH_LOC_LEN_ERR: 3361 return IB_WC_LOC_LEN_ERR; 3362 case FLUSH_GENERAL_ERR: 3363 return IB_WC_WR_FLUSH_ERR; 3364 case FLUSH_FATAL_ERR: 3365 default: 3366 return IB_WC_FATAL_ERR; 3367 } 3368 } 3369 3370 /** 3371 * irdma_process_cqe - process cqe info 3372 * @entry: processed cqe 3373 * @cq_poll_info: cqe info 3374 */ 3375 static void irdma_process_cqe(struct ib_wc *entry, 3376 struct irdma_cq_poll_info *cq_poll_info) 3377 { 3378 struct irdma_qp *iwqp; 3379 struct irdma_sc_qp *qp; 3380 3381 entry->wc_flags = 0; 3382 entry->pkey_index = 0; 3383 entry->wr_id = cq_poll_info->wr_id; 3384 3385 qp = cq_poll_info->qp_handle; 3386 iwqp = qp->qp_uk.back_qp; 3387 entry->qp = qp->qp_uk.back_qp; 3388 3389 if (cq_poll_info->error) { 3390 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ? 3391 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR; 3392 3393 entry->vendor_err = cq_poll_info->major_err << 16 | 3394 cq_poll_info->minor_err; 3395 } else { 3396 entry->status = IB_WC_SUCCESS; 3397 if (cq_poll_info->imm_valid) { 3398 entry->ex.imm_data = htonl(cq_poll_info->imm_data); 3399 entry->wc_flags |= IB_WC_WITH_IMM; 3400 } 3401 if (cq_poll_info->ud_smac_valid) { 3402 ether_addr_copy(entry->smac, cq_poll_info->ud_smac); 3403 entry->wc_flags |= IB_WC_WITH_SMAC; 3404 } 3405 3406 if (cq_poll_info->ud_vlan_valid) { 3407 entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK; 3408 entry->wc_flags |= IB_WC_WITH_VLAN; 3409 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; 3410 } else { 3411 entry->sl = 0; 3412 } 3413 } 3414 3415 switch (cq_poll_info->op_type) { 3416 case IRDMA_OP_TYPE_RDMA_WRITE: 3417 case IRDMA_OP_TYPE_RDMA_WRITE_SOL: 3418 entry->opcode = IB_WC_RDMA_WRITE; 3419 break; 3420 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG: 3421 case IRDMA_OP_TYPE_RDMA_READ: 3422 entry->opcode = IB_WC_RDMA_READ; 3423 break; 3424 case IRDMA_OP_TYPE_SEND_INV: 3425 case IRDMA_OP_TYPE_SEND_SOL: 3426 case IRDMA_OP_TYPE_SEND_SOL_INV: 3427 case IRDMA_OP_TYPE_SEND: 3428 entry->opcode = IB_WC_SEND; 3429 break; 3430 case IRDMA_OP_TYPE_FAST_REG_NSMR: 3431 entry->opcode = IB_WC_REG_MR; 3432 break; 3433 case IRDMA_OP_TYPE_INV_STAG: 3434 entry->opcode = IB_WC_LOCAL_INV; 3435 break; 3436 case IRDMA_OP_TYPE_REC_IMM: 3437 case IRDMA_OP_TYPE_REC: 3438 entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ? 3439 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; 3440 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && 3441 cq_poll_info->stag_invalid_set) { 3442 entry->ex.invalidate_rkey = cq_poll_info->inv_stag; 3443 entry->wc_flags |= IB_WC_WITH_INVALIDATE; 3444 } 3445 break; 3446 default: 3447 ibdev_err(&iwqp->iwdev->ibdev, 3448 "Invalid opcode = %d in CQE\n", cq_poll_info->op_type); 3449 entry->status = IB_WC_GENERAL_ERR; 3450 return; 3451 } 3452 3453 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) { 3454 entry->src_qp = cq_poll_info->ud_src_qpn; 3455 entry->slid = 0; 3456 entry->wc_flags |= 3457 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); 3458 entry->network_hdr_type = cq_poll_info->ipv4 ? 3459 RDMA_NETWORK_IPV4 : 3460 RDMA_NETWORK_IPV6; 3461 } else { 3462 entry->src_qp = cq_poll_info->qp_id; 3463 } 3464 3465 entry->byte_len = cq_poll_info->bytes_xfered; 3466 } 3467 3468 /** 3469 * irdma_poll_one - poll one entry of the CQ 3470 * @ukcq: ukcq to poll 3471 * @cur_cqe: current CQE info to be filled in 3472 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ 3473 * 3474 * Returns the internal irdma device error code or 0 on success 3475 */ 3476 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq, 3477 struct irdma_cq_poll_info *cur_cqe, 3478 struct ib_wc *entry) 3479 { 3480 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe); 3481 3482 if (ret) 3483 return ret; 3484 3485 irdma_process_cqe(entry, cur_cqe); 3486 3487 return 0; 3488 } 3489 3490 /** 3491 * __irdma_poll_cq - poll cq for completion (kernel apps) 3492 * @iwcq: cq to poll 3493 * @num_entries: number of entries to poll 3494 * @entry: wr of a completed entry 3495 */ 3496 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry) 3497 { 3498 struct list_head *tmp_node, *list_node; 3499 struct irdma_cq_buf *last_buf = NULL; 3500 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; 3501 struct irdma_cq_buf *cq_buf; 3502 enum irdma_status_code ret; 3503 struct irdma_device *iwdev; 3504 struct irdma_cq_uk *ukcq; 3505 bool cq_new_cqe = false; 3506 int resized_bufs = 0; 3507 int npolled = 0; 3508 3509 iwdev = to_iwdev(iwcq->ibcq.device); 3510 ukcq = &iwcq->sc_cq.cq_uk; 3511 3512 /* go through the list of previously resized CQ buffers */ 3513 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 3514 cq_buf = container_of(list_node, struct irdma_cq_buf, list); 3515 while (npolled < num_entries) { 3516 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled); 3517 if (!ret) { 3518 ++npolled; 3519 cq_new_cqe = true; 3520 continue; 3521 } 3522 if (ret == IRDMA_ERR_Q_EMPTY) 3523 break; 3524 /* QP using the CQ is destroyed. Skip reporting this CQE */ 3525 if (ret == IRDMA_ERR_Q_DESTROYED) { 3526 cq_new_cqe = true; 3527 continue; 3528 } 3529 goto error; 3530 } 3531 3532 /* save the resized CQ buffer which received the last cqe */ 3533 if (cq_new_cqe) 3534 last_buf = cq_buf; 3535 cq_new_cqe = false; 3536 } 3537 3538 /* check the current CQ for new cqes */ 3539 while (npolled < num_entries) { 3540 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled); 3541 if (!ret) { 3542 ++npolled; 3543 cq_new_cqe = true; 3544 continue; 3545 } 3546 3547 if (ret == IRDMA_ERR_Q_EMPTY) 3548 break; 3549 /* QP using the CQ is destroyed. Skip reporting this CQE */ 3550 if (ret == IRDMA_ERR_Q_DESTROYED) { 3551 cq_new_cqe = true; 3552 continue; 3553 } 3554 goto error; 3555 } 3556 3557 if (cq_new_cqe) 3558 /* all previous CQ resizes are complete */ 3559 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL); 3560 else if (last_buf) 3561 /* only CQ resizes up to the last_buf are complete */ 3562 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf); 3563 if (resized_bufs) 3564 /* report to the HW the number of complete CQ resizes */ 3565 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs); 3566 3567 return npolled; 3568 error: 3569 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n", 3570 __func__, ret); 3571 3572 return -EINVAL; 3573 } 3574 3575 /** 3576 * irdma_poll_cq - poll cq for completion (kernel apps) 3577 * @ibcq: cq to poll 3578 * @num_entries: number of entries to poll 3579 * @entry: wr of a completed entry 3580 */ 3581 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries, 3582 struct ib_wc *entry) 3583 { 3584 struct irdma_cq *iwcq; 3585 unsigned long flags; 3586 int ret; 3587 3588 iwcq = to_iwcq(ibcq); 3589 3590 spin_lock_irqsave(&iwcq->lock, flags); 3591 ret = __irdma_poll_cq(iwcq, num_entries, entry); 3592 spin_unlock_irqrestore(&iwcq->lock, flags); 3593 3594 return ret; 3595 } 3596 3597 /** 3598 * irdma_req_notify_cq - arm cq kernel application 3599 * @ibcq: cq to arm 3600 * @notify_flags: notofication flags 3601 */ 3602 static int irdma_req_notify_cq(struct ib_cq *ibcq, 3603 enum ib_cq_notify_flags notify_flags) 3604 { 3605 struct irdma_cq *iwcq; 3606 struct irdma_cq_uk *ukcq; 3607 unsigned long flags; 3608 enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT; 3609 3610 iwcq = to_iwcq(ibcq); 3611 ukcq = &iwcq->sc_cq.cq_uk; 3612 if (notify_flags == IB_CQ_SOLICITED) 3613 cq_notify = IRDMA_CQ_COMPL_SOLICITED; 3614 3615 spin_lock_irqsave(&iwcq->lock, flags); 3616 irdma_uk_cq_request_notification(ukcq, cq_notify); 3617 spin_unlock_irqrestore(&iwcq->lock, flags); 3618 3619 return 0; 3620 } 3621 3622 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, 3623 struct ib_port_immutable *immutable) 3624 { 3625 struct ib_port_attr attr; 3626 int err; 3627 3628 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 3629 err = ib_query_port(ibdev, port_num, &attr); 3630 if (err) 3631 return err; 3632 3633 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 3634 immutable->pkey_tbl_len = attr.pkey_tbl_len; 3635 immutable->gid_tbl_len = attr.gid_tbl_len; 3636 3637 return 0; 3638 } 3639 3640 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num, 3641 struct ib_port_immutable *immutable) 3642 { 3643 struct ib_port_attr attr; 3644 int err; 3645 3646 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 3647 err = ib_query_port(ibdev, port_num, &attr); 3648 if (err) 3649 return err; 3650 immutable->gid_tbl_len = attr.gid_tbl_len; 3651 3652 return 0; 3653 } 3654 3655 static const char *const irdma_hw_stat_names[] = { 3656 /* 32bit names */ 3657 [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors", 3658 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards", 3659 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts", 3660 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes", 3661 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards", 3662 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts", 3663 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes", 3664 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs", 3665 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors", 3666 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors", 3667 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled", 3668 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored", 3669 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent", 3670 3671 /* 64bit names */ 3672 [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3673 "ip4InOctets", 3674 [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3675 "ip4InPkts", 3676 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = 3677 "ip4InReasmRqd", 3678 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3679 "ip4InMcastOctets", 3680 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3681 "ip4InMcastPkts", 3682 [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3683 "ip4OutOctets", 3684 [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3685 "ip4OutPkts", 3686 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = 3687 "ip4OutSegRqd", 3688 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3689 "ip4OutMcastOctets", 3690 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3691 "ip4OutMcastPkts", 3692 [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3693 "ip6InOctets", 3694 [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3695 "ip6InPkts", 3696 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = 3697 "ip6InReasmRqd", 3698 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3699 "ip6InMcastOctets", 3700 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3701 "ip6InMcastPkts", 3702 [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3703 "ip6OutOctets", 3704 [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3705 "ip6OutPkts", 3706 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = 3707 "ip6OutSegRqd", 3708 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3709 "ip6OutMcastOctets", 3710 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3711 "ip6OutMcastPkts", 3712 [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] = 3713 "tcpInSegs", 3714 [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] = 3715 "tcpOutSegs", 3716 [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] = 3717 "iwInRdmaReads", 3718 [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] = 3719 "iwInRdmaSends", 3720 [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] = 3721 "iwInRdmaWrites", 3722 [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] = 3723 "iwOutRdmaReads", 3724 [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] = 3725 "iwOutRdmaSends", 3726 [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] = 3727 "iwOutRdmaWrites", 3728 [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] = 3729 "iwRdmaBnd", 3730 [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] = 3731 "iwRdmaInv", 3732 [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3733 "RxUDP", 3734 [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3735 "TxUDP", 3736 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = 3737 "RxECNMrkd", 3738 }; 3739 3740 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) 3741 { 3742 struct irdma_device *iwdev = to_iwdev(dev); 3743 3744 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", 3745 irdma_fw_major_ver(&iwdev->rf->sc_dev), 3746 irdma_fw_minor_ver(&iwdev->rf->sc_dev)); 3747 } 3748 3749 /** 3750 * irdma_alloc_hw_port_stats - Allocate a hw stats structure 3751 * @ibdev: device pointer from stack 3752 * @port_num: port number 3753 */ 3754 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev, 3755 u32 port_num) 3756 { 3757 int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 + 3758 IRDMA_HW_STAT_INDEX_MAX_64; 3759 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; 3760 3761 BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) != 3762 (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64)); 3763 3764 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters, 3765 lifespan); 3766 } 3767 3768 /** 3769 * irdma_get_hw_stats - Populates the rdma_hw_stats structure 3770 * @ibdev: device pointer from stack 3771 * @stats: stats pointer from stack 3772 * @port_num: port number 3773 * @index: which hw counter the stack is requesting we update 3774 */ 3775 static int irdma_get_hw_stats(struct ib_device *ibdev, 3776 struct rdma_hw_stats *stats, u32 port_num, 3777 int index) 3778 { 3779 struct irdma_device *iwdev = to_iwdev(ibdev); 3780 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats; 3781 3782 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) 3783 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true); 3784 else 3785 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat); 3786 3787 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats)); 3788 3789 return stats->num_counters; 3790 } 3791 3792 /** 3793 * irdma_query_gid - Query port GID 3794 * @ibdev: device pointer from stack 3795 * @port: port number 3796 * @index: Entry index 3797 * @gid: Global ID 3798 */ 3799 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index, 3800 union ib_gid *gid) 3801 { 3802 struct irdma_device *iwdev = to_iwdev(ibdev); 3803 3804 memset(gid->raw, 0, sizeof(gid->raw)); 3805 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); 3806 3807 return 0; 3808 } 3809 3810 /** 3811 * mcast_list_add - Add a new mcast item to list 3812 * @rf: RDMA PCI function 3813 * @new_elem: pointer to element to add 3814 */ 3815 static void mcast_list_add(struct irdma_pci_f *rf, 3816 struct mc_table_list *new_elem) 3817 { 3818 list_add(&new_elem->list, &rf->mc_qht_list.list); 3819 } 3820 3821 /** 3822 * mcast_list_del - Remove an mcast item from list 3823 * @mc_qht_elem: pointer to mcast table list element 3824 */ 3825 static void mcast_list_del(struct mc_table_list *mc_qht_elem) 3826 { 3827 if (mc_qht_elem) 3828 list_del(&mc_qht_elem->list); 3829 } 3830 3831 /** 3832 * mcast_list_lookup_ip - Search mcast list for address 3833 * @rf: RDMA PCI function 3834 * @ip_mcast: pointer to mcast IP address 3835 */ 3836 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf, 3837 u32 *ip_mcast) 3838 { 3839 struct mc_table_list *mc_qht_el; 3840 struct list_head *pos, *q; 3841 3842 list_for_each_safe (pos, q, &rf->mc_qht_list.list) { 3843 mc_qht_el = list_entry(pos, struct mc_table_list, list); 3844 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast, 3845 sizeof(mc_qht_el->mc_info.dest_ip))) 3846 return mc_qht_el; 3847 } 3848 3849 return NULL; 3850 } 3851 3852 /** 3853 * irdma_mcast_cqp_op - perform a mcast cqp operation 3854 * @iwdev: irdma device 3855 * @mc_grp_ctx: mcast group info 3856 * @op: operation 3857 * 3858 * returns error status 3859 */ 3860 static int irdma_mcast_cqp_op(struct irdma_device *iwdev, 3861 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op) 3862 { 3863 struct cqp_cmds_info *cqp_info; 3864 struct irdma_cqp_request *cqp_request; 3865 enum irdma_status_code status; 3866 3867 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3868 if (!cqp_request) 3869 return -ENOMEM; 3870 3871 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx; 3872 cqp_info = &cqp_request->info; 3873 cqp_info->cqp_cmd = op; 3874 cqp_info->post_sq = 1; 3875 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request; 3876 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; 3877 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 3878 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3879 if (status) 3880 return -ENOMEM; 3881 3882 return 0; 3883 } 3884 3885 /** 3886 * irdma_mcast_mac - Get the multicast MAC for an IP address 3887 * @ip_addr: IPv4 or IPv6 address 3888 * @mac: pointer to result MAC address 3889 * @ipv4: flag indicating IPv4 or IPv6 3890 * 3891 */ 3892 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4) 3893 { 3894 u8 *ip = (u8 *)ip_addr; 3895 3896 if (ipv4) { 3897 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00, 3898 0x00, 0x00}; 3899 3900 mac4[3] = ip[2] & 0x7F; 3901 mac4[4] = ip[1]; 3902 mac4[5] = ip[0]; 3903 ether_addr_copy(mac, mac4); 3904 } else { 3905 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00, 3906 0x00, 0x00}; 3907 3908 mac6[2] = ip[3]; 3909 mac6[3] = ip[2]; 3910 mac6[4] = ip[1]; 3911 mac6[5] = ip[0]; 3912 ether_addr_copy(mac, mac6); 3913 } 3914 } 3915 3916 /** 3917 * irdma_attach_mcast - attach a qp to a multicast group 3918 * @ibqp: ptr to qp 3919 * @ibgid: pointer to global ID 3920 * @lid: local ID 3921 * 3922 * returns error status 3923 */ 3924 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 3925 { 3926 struct irdma_qp *iwqp = to_iwqp(ibqp); 3927 struct irdma_device *iwdev = iwqp->iwdev; 3928 struct irdma_pci_f *rf = iwdev->rf; 3929 struct mc_table_list *mc_qht_elem; 3930 struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; 3931 unsigned long flags; 3932 u32 ip_addr[4] = {}; 3933 u32 mgn; 3934 u32 no_mgs; 3935 int ret = 0; 3936 bool ipv4; 3937 u16 vlan_id; 3938 union { 3939 struct sockaddr saddr; 3940 struct sockaddr_in saddr_in; 3941 struct sockaddr_in6 saddr_in6; 3942 } sgid_addr; 3943 unsigned char dmac[ETH_ALEN]; 3944 3945 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 3946 3947 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) { 3948 irdma_copy_ip_ntohl(ip_addr, 3949 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 3950 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL); 3951 ipv4 = false; 3952 ibdev_dbg(&iwdev->ibdev, 3953 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num, 3954 ip_addr); 3955 irdma_mcast_mac(ip_addr, dmac, false); 3956 } else { 3957 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 3958 ipv4 = true; 3959 vlan_id = irdma_get_vlan_ipv4(ip_addr); 3960 irdma_mcast_mac(ip_addr, dmac, true); 3961 ibdev_dbg(&iwdev->ibdev, 3962 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n", 3963 ibqp->qp_num, ip_addr, dmac); 3964 } 3965 3966 spin_lock_irqsave(&rf->qh_list_lock, flags); 3967 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 3968 if (!mc_qht_elem) { 3969 struct irdma_dma_mem *dma_mem_mc; 3970 3971 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3972 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL); 3973 if (!mc_qht_elem) 3974 return -ENOMEM; 3975 3976 mc_qht_elem->mc_info.ipv4_valid = ipv4; 3977 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr, 3978 sizeof(mc_qht_elem->mc_info.dest_ip)); 3979 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg, 3980 &mgn, &rf->next_mcg); 3981 if (ret) { 3982 kfree(mc_qht_elem); 3983 return -ENOMEM; 3984 } 3985 3986 mc_qht_elem->mc_info.mgn = mgn; 3987 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc; 3988 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX, 3989 IRDMA_HW_PAGE_SIZE); 3990 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device, 3991 dma_mem_mc->size, 3992 &dma_mem_mc->pa, 3993 GFP_KERNEL); 3994 if (!dma_mem_mc->va) { 3995 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn); 3996 kfree(mc_qht_elem); 3997 return -ENOMEM; 3998 } 3999 4000 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn; 4001 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr, 4002 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr)); 4003 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4; 4004 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id; 4005 if (vlan_id < VLAN_N_VID) 4006 mc_qht_elem->mc_grp_ctx.vlan_valid = true; 4007 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id; 4008 mc_qht_elem->mc_grp_ctx.qs_handle = 4009 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle; 4010 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac); 4011 4012 spin_lock_irqsave(&rf->qh_list_lock, flags); 4013 mcast_list_add(rf, mc_qht_elem); 4014 } else { 4015 if (mc_qht_elem->mc_grp_ctx.no_of_mgs == 4016 IRDMA_MAX_MGS_PER_CTX) { 4017 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4018 return -ENOMEM; 4019 } 4020 } 4021 4022 mcg_info.qp_id = iwqp->ibqp.qp_num; 4023 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs; 4024 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4025 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4026 4027 /* Only if there is a change do we need to modify or create */ 4028 if (!no_mgs) { 4029 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4030 IRDMA_OP_MC_CREATE); 4031 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4032 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4033 IRDMA_OP_MC_MODIFY); 4034 } else { 4035 return 0; 4036 } 4037 4038 if (ret) 4039 goto error; 4040 4041 return 0; 4042 4043 error: 4044 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4045 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4046 mcast_list_del(mc_qht_elem); 4047 dma_free_coherent(rf->hw.device, 4048 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, 4049 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, 4050 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); 4051 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; 4052 irdma_free_rsrc(rf, rf->allocated_mcgs, 4053 mc_qht_elem->mc_grp_ctx.mg_id); 4054 kfree(mc_qht_elem); 4055 } 4056 4057 return ret; 4058 } 4059 4060 /** 4061 * irdma_detach_mcast - detach a qp from a multicast group 4062 * @ibqp: ptr to qp 4063 * @ibgid: pointer to global ID 4064 * @lid: local ID 4065 * 4066 * returns error status 4067 */ 4068 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 4069 { 4070 struct irdma_qp *iwqp = to_iwqp(ibqp); 4071 struct irdma_device *iwdev = iwqp->iwdev; 4072 struct irdma_pci_f *rf = iwdev->rf; 4073 u32 ip_addr[4] = {}; 4074 struct mc_table_list *mc_qht_elem; 4075 struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; 4076 int ret; 4077 unsigned long flags; 4078 union { 4079 struct sockaddr saddr; 4080 struct sockaddr_in saddr_in; 4081 struct sockaddr_in6 saddr_in6; 4082 } sgid_addr; 4083 4084 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 4085 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) 4086 irdma_copy_ip_ntohl(ip_addr, 4087 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4088 else 4089 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 4090 4091 spin_lock_irqsave(&rf->qh_list_lock, flags); 4092 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 4093 if (!mc_qht_elem) { 4094 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4095 ibdev_dbg(&iwdev->ibdev, 4096 "VERBS: address not found MCG\n"); 4097 return 0; 4098 } 4099 4100 mcg_info.qp_id = iwqp->ibqp.qp_num; 4101 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4102 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4103 mcast_list_del(mc_qht_elem); 4104 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4105 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4106 IRDMA_OP_MC_DESTROY); 4107 if (ret) { 4108 ibdev_dbg(&iwdev->ibdev, 4109 "VERBS: failed MC_DESTROY MCG\n"); 4110 spin_lock_irqsave(&rf->qh_list_lock, flags); 4111 mcast_list_add(rf, mc_qht_elem); 4112 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4113 return -EAGAIN; 4114 } 4115 4116 dma_free_coherent(rf->hw.device, 4117 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, 4118 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, 4119 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); 4120 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; 4121 irdma_free_rsrc(rf, rf->allocated_mcgs, 4122 mc_qht_elem->mc_grp_ctx.mg_id); 4123 kfree(mc_qht_elem); 4124 } else { 4125 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4126 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4127 IRDMA_OP_MC_MODIFY); 4128 if (ret) { 4129 ibdev_dbg(&iwdev->ibdev, 4130 "VERBS: failed Modify MCG\n"); 4131 return ret; 4132 } 4133 } 4134 4135 return 0; 4136 } 4137 4138 /** 4139 * irdma_create_ah - create address handle 4140 * @ibah: address handle 4141 * @attr: address handle attributes 4142 * @udata: User data 4143 * 4144 * returns 0 on success, error otherwise 4145 */ 4146 static int irdma_create_ah(struct ib_ah *ibah, 4147 struct rdma_ah_init_attr *attr, 4148 struct ib_udata *udata) 4149 { 4150 struct irdma_pd *pd = to_iwpd(ibah->pd); 4151 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 4152 struct rdma_ah_attr *ah_attr = attr->ah_attr; 4153 const struct ib_gid_attr *sgid_attr; 4154 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 4155 struct irdma_pci_f *rf = iwdev->rf; 4156 struct irdma_sc_ah *sc_ah; 4157 u32 ah_id = 0; 4158 struct irdma_ah_info *ah_info; 4159 struct irdma_create_ah_resp uresp; 4160 union { 4161 struct sockaddr saddr; 4162 struct sockaddr_in saddr_in; 4163 struct sockaddr_in6 saddr_in6; 4164 } sgid_addr, dgid_addr; 4165 int err; 4166 u8 dmac[ETH_ALEN]; 4167 4168 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, 4169 &rf->next_ah); 4170 if (err) 4171 return err; 4172 4173 ah->pd = pd; 4174 sc_ah = &ah->sc_ah; 4175 sc_ah->ah_info.ah_idx = ah_id; 4176 sc_ah->ah_info.vsi = &iwdev->vsi; 4177 irdma_sc_init_ah(&rf->sc_dev, sc_ah); 4178 ah->sgid_index = ah_attr->grh.sgid_index; 4179 sgid_attr = ah_attr->grh.sgid_attr; 4180 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid)); 4181 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); 4182 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid); 4183 ah->av.attrs = *ah_attr; 4184 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr); 4185 ah->av.sgid_addr.saddr = sgid_addr.saddr; 4186 ah->av.dgid_addr.saddr = dgid_addr.saddr; 4187 ah_info = &sc_ah->ah_info; 4188 ah_info->ah_idx = ah_id; 4189 ah_info->pd_idx = pd->sc_pd.pd_id; 4190 if (ah_attr->ah_flags & IB_AH_GRH) { 4191 ah_info->flow_label = ah_attr->grh.flow_label; 4192 ah_info->hop_ttl = ah_attr->grh.hop_limit; 4193 ah_info->tc_tos = ah_attr->grh.traffic_class; 4194 } 4195 4196 ether_addr_copy(dmac, ah_attr->roce.dmac); 4197 if (rdma_gid_attr_network_type(sgid_attr) == RDMA_NETWORK_IPV4) { 4198 ah_info->ipv4_valid = true; 4199 ah_info->dest_ip_addr[0] = 4200 ntohl(dgid_addr.saddr_in.sin_addr.s_addr); 4201 ah_info->src_ip_addr[0] = 4202 ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 4203 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0], 4204 ah_info->dest_ip_addr[0]); 4205 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) { 4206 ah_info->do_lpbk = true; 4207 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true); 4208 } 4209 } else { 4210 irdma_copy_ip_ntohl(ah_info->dest_ip_addr, 4211 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4212 irdma_copy_ip_ntohl(ah_info->src_ip_addr, 4213 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4214 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr, 4215 ah_info->dest_ip_addr); 4216 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) { 4217 ah_info->do_lpbk = true; 4218 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false); 4219 } 4220 } 4221 4222 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag, 4223 ah_info->mac_addr); 4224 if (err) 4225 goto error; 4226 4227 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, 4228 ah_info->ipv4_valid, dmac); 4229 4230 if (ah_info->dst_arpindex == -1) { 4231 err = -EINVAL; 4232 goto error; 4233 } 4234 4235 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb) 4236 ah_info->vlan_tag = 0; 4237 4238 if (ah_info->vlan_tag < VLAN_N_VID) { 4239 ah_info->insert_vlan_tag = true; 4240 ah_info->vlan_tag |= 4241 rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT; 4242 } 4243 4244 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE, 4245 attr->flags & RDMA_CREATE_AH_SLEEPABLE, 4246 irdma_gsi_ud_qp_ah_cb, sc_ah); 4247 4248 if (err) { 4249 ibdev_dbg(&iwdev->ibdev, 4250 "VERBS: CQP-OP Create AH fail"); 4251 goto error; 4252 } 4253 4254 if (!(attr->flags & RDMA_CREATE_AH_SLEEPABLE)) { 4255 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD; 4256 4257 do { 4258 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); 4259 mdelay(1); 4260 } while (!sc_ah->ah_info.ah_valid && --cnt); 4261 4262 if (!cnt) { 4263 ibdev_dbg(&iwdev->ibdev, 4264 "VERBS: CQP create AH timed out"); 4265 err = -ETIMEDOUT; 4266 goto error; 4267 } 4268 } 4269 4270 if (udata) { 4271 uresp.ah_id = ah->sc_ah.ah_info.ah_idx; 4272 err = ib_copy_to_udata(udata, &uresp, 4273 min(sizeof(uresp), udata->outlen)); 4274 } 4275 return 0; 4276 4277 error: 4278 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); 4279 4280 return err; 4281 } 4282 4283 /** 4284 * irdma_destroy_ah - Destroy address handle 4285 * @ibah: pointer to address handle 4286 * @ah_flags: flags for sleepable 4287 */ 4288 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) 4289 { 4290 struct irdma_device *iwdev = to_iwdev(ibah->device); 4291 struct irdma_ah *ah = to_iwah(ibah); 4292 4293 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, 4294 false, NULL, ah); 4295 4296 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, 4297 ah->sc_ah.ah_info.ah_idx); 4298 4299 return 0; 4300 } 4301 4302 /** 4303 * irdma_query_ah - Query address handle 4304 * @ibah: pointer to address handle 4305 * @ah_attr: address handle attributes 4306 */ 4307 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) 4308 { 4309 struct irdma_ah *ah = to_iwah(ibah); 4310 4311 memset(ah_attr, 0, sizeof(*ah_attr)); 4312 if (ah->av.attrs.ah_flags & IB_AH_GRH) { 4313 ah_attr->ah_flags = IB_AH_GRH; 4314 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; 4315 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; 4316 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; 4317 ah_attr->grh.sgid_index = ah->sgid_index; 4318 ah_attr->grh.sgid_index = ah->sgid_index; 4319 memcpy(&ah_attr->grh.dgid, &ah->dgid, 4320 sizeof(ah_attr->grh.dgid)); 4321 } 4322 4323 return 0; 4324 } 4325 4326 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, 4327 u32 port_num) 4328 { 4329 return IB_LINK_LAYER_ETHERNET; 4330 } 4331 4332 static __be64 irdma_mac_to_guid(struct net_device *ndev) 4333 { 4334 unsigned char *mac = ndev->dev_addr; 4335 __be64 guid; 4336 unsigned char *dst = (unsigned char *)&guid; 4337 4338 dst[0] = mac[0] ^ 2; 4339 dst[1] = mac[1]; 4340 dst[2] = mac[2]; 4341 dst[3] = 0xff; 4342 dst[4] = 0xfe; 4343 dst[5] = mac[3]; 4344 dst[6] = mac[4]; 4345 dst[7] = mac[5]; 4346 4347 return guid; 4348 } 4349 4350 static const struct ib_device_ops irdma_roce_dev_ops = { 4351 .attach_mcast = irdma_attach_mcast, 4352 .create_ah = irdma_create_ah, 4353 .create_user_ah = irdma_create_ah, 4354 .destroy_ah = irdma_destroy_ah, 4355 .detach_mcast = irdma_detach_mcast, 4356 .get_link_layer = irdma_get_link_layer, 4357 .get_port_immutable = irdma_roce_port_immutable, 4358 .modify_qp = irdma_modify_qp_roce, 4359 .query_ah = irdma_query_ah, 4360 .query_pkey = irdma_query_pkey, 4361 }; 4362 4363 static const struct ib_device_ops irdma_iw_dev_ops = { 4364 .modify_qp = irdma_modify_qp, 4365 .get_port_immutable = irdma_iw_port_immutable, 4366 .query_gid = irdma_query_gid, 4367 }; 4368 4369 static const struct ib_device_ops irdma_dev_ops = { 4370 .owner = THIS_MODULE, 4371 .driver_id = RDMA_DRIVER_IRDMA, 4372 .uverbs_abi_ver = IRDMA_ABI_VER, 4373 4374 .alloc_hw_port_stats = irdma_alloc_hw_port_stats, 4375 .alloc_mr = irdma_alloc_mr, 4376 .alloc_mw = irdma_alloc_mw, 4377 .alloc_pd = irdma_alloc_pd, 4378 .alloc_ucontext = irdma_alloc_ucontext, 4379 .create_cq = irdma_create_cq, 4380 .create_qp = irdma_create_qp, 4381 .dealloc_driver = irdma_ib_dealloc_device, 4382 .dealloc_mw = irdma_dealloc_mw, 4383 .dealloc_pd = irdma_dealloc_pd, 4384 .dealloc_ucontext = irdma_dealloc_ucontext, 4385 .dereg_mr = irdma_dereg_mr, 4386 .destroy_cq = irdma_destroy_cq, 4387 .destroy_qp = irdma_destroy_qp, 4388 .disassociate_ucontext = irdma_disassociate_ucontext, 4389 .get_dev_fw_str = irdma_get_dev_fw_str, 4390 .get_dma_mr = irdma_get_dma_mr, 4391 .get_hw_stats = irdma_get_hw_stats, 4392 .map_mr_sg = irdma_map_mr_sg, 4393 .mmap = irdma_mmap, 4394 .mmap_free = irdma_mmap_free, 4395 .poll_cq = irdma_poll_cq, 4396 .post_recv = irdma_post_recv, 4397 .post_send = irdma_post_send, 4398 .query_device = irdma_query_device, 4399 .query_port = irdma_query_port, 4400 .query_qp = irdma_query_qp, 4401 .reg_user_mr = irdma_reg_user_mr, 4402 .req_notify_cq = irdma_req_notify_cq, 4403 .resize_cq = irdma_resize_cq, 4404 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd), 4405 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext), 4406 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah), 4407 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq), 4408 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw), 4409 }; 4410 4411 /** 4412 * irdma_init_roce_device - initialization of roce rdma device 4413 * @iwdev: irdma device 4414 */ 4415 static void irdma_init_roce_device(struct irdma_device *iwdev) 4416 { 4417 iwdev->ibdev.node_type = RDMA_NODE_IB_CA; 4418 iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev); 4419 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); 4420 } 4421 4422 /** 4423 * irdma_init_iw_device - initialization of iwarp rdma device 4424 * @iwdev: irdma device 4425 */ 4426 static int irdma_init_iw_device(struct irdma_device *iwdev) 4427 { 4428 struct net_device *netdev = iwdev->netdev; 4429 4430 iwdev->ibdev.node_type = RDMA_NODE_RNIC; 4431 ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); 4432 iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref; 4433 iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref; 4434 iwdev->ibdev.ops.iw_get_qp = irdma_get_qp; 4435 iwdev->ibdev.ops.iw_connect = irdma_connect; 4436 iwdev->ibdev.ops.iw_accept = irdma_accept; 4437 iwdev->ibdev.ops.iw_reject = irdma_reject; 4438 iwdev->ibdev.ops.iw_create_listen = irdma_create_listen; 4439 iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen; 4440 memcpy(iwdev->ibdev.iw_ifname, netdev->name, 4441 sizeof(iwdev->ibdev.iw_ifname)); 4442 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops); 4443 4444 return 0; 4445 } 4446 4447 /** 4448 * irdma_init_rdma_device - initialization of rdma device 4449 * @iwdev: irdma device 4450 */ 4451 static int irdma_init_rdma_device(struct irdma_device *iwdev) 4452 { 4453 struct pci_dev *pcidev = iwdev->rf->pcidev; 4454 int ret; 4455 4456 if (iwdev->roce_mode) { 4457 irdma_init_roce_device(iwdev); 4458 } else { 4459 ret = irdma_init_iw_device(iwdev); 4460 if (ret) 4461 return ret; 4462 } 4463 iwdev->ibdev.phys_port_cnt = 1; 4464 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; 4465 iwdev->ibdev.dev.parent = &pcidev->dev; 4466 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops); 4467 4468 return 0; 4469 } 4470 4471 /** 4472 * irdma_port_ibevent - indicate port event 4473 * @iwdev: irdma device 4474 */ 4475 void irdma_port_ibevent(struct irdma_device *iwdev) 4476 { 4477 struct ib_event event; 4478 4479 event.device = &iwdev->ibdev; 4480 event.element.port_num = 1; 4481 event.event = 4482 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 4483 ib_dispatch_event(&event); 4484 } 4485 4486 /** 4487 * irdma_ib_unregister_device - unregister rdma device from IB 4488 * core 4489 * @iwdev: irdma device 4490 */ 4491 void irdma_ib_unregister_device(struct irdma_device *iwdev) 4492 { 4493 iwdev->iw_status = 0; 4494 irdma_port_ibevent(iwdev); 4495 ib_unregister_device(&iwdev->ibdev); 4496 } 4497 4498 /** 4499 * irdma_ib_register_device - register irdma device to IB core 4500 * @iwdev: irdma device 4501 */ 4502 int irdma_ib_register_device(struct irdma_device *iwdev) 4503 { 4504 int ret; 4505 4506 ret = irdma_init_rdma_device(iwdev); 4507 if (ret) 4508 return ret; 4509 4510 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1); 4511 if (ret) 4512 goto error; 4513 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX); 4514 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device); 4515 if (ret) 4516 goto error; 4517 4518 iwdev->iw_status = 1; 4519 irdma_port_ibevent(iwdev); 4520 4521 return 0; 4522 4523 error: 4524 if (ret) 4525 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n"); 4526 4527 return ret; 4528 } 4529 4530 /** 4531 * irdma_ib_dealloc_device 4532 * @ibdev: ib device 4533 * 4534 * callback from ibdev dealloc_driver to deallocate resources 4535 * unber irdma device 4536 */ 4537 void irdma_ib_dealloc_device(struct ib_device *ibdev) 4538 { 4539 struct irdma_device *iwdev = to_iwdev(ibdev); 4540 4541 irdma_rt_deinit_hw(iwdev); 4542 irdma_ctrl_deinit_hw(iwdev->rf); 4543 kfree(iwdev->rf); 4544 } 4545