1 /* 2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 #include <linux/errno.h> 37 38 #include <rdma/ib_user_verbs.h> 39 #include <rdma/ib_addr.h> 40 41 #include "usnic_abi.h" 42 #include "usnic_ib.h" 43 #include "usnic_common_util.h" 44 #include "usnic_ib_qp_grp.h" 45 #include "usnic_fwd.h" 46 #include "usnic_log.h" 47 #include "usnic_uiom.h" 48 #include "usnic_transport.h" 49 #include "usnic_ib_verbs.h" 50 51 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM 52 53 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver) 54 { 55 *fw_ver = *((u64 *)fw_ver_str); 56 } 57 58 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp, 59 struct ib_udata *udata) 60 { 61 struct usnic_ib_dev *us_ibdev; 62 struct usnic_ib_create_qp_resp resp; 63 struct pci_dev *pdev; 64 struct vnic_dev_bar *bar; 65 struct usnic_vnic_res_chunk *chunk; 66 struct usnic_ib_qp_grp_flow *default_flow; 67 int i, err; 68 69 memset(&resp, 0, sizeof(resp)); 70 71 us_ibdev = qp_grp->vf->pf; 72 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); 73 if (!pdev) { 74 usnic_err("Failed to get pdev of qp_grp %d\n", 75 qp_grp->grp_id); 76 return -EFAULT; 77 } 78 79 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0); 80 if (!bar) { 81 usnic_err("Failed to get bar0 of qp_grp %d vf %s", 82 qp_grp->grp_id, pci_name(pdev)); 83 return -EFAULT; 84 } 85 86 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic); 87 resp.bar_bus_addr = bar->bus_addr; 88 resp.bar_len = bar->len; 89 90 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); 91 if (IS_ERR(chunk)) { 92 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 93 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), 94 qp_grp->grp_id, 95 PTR_ERR(chunk)); 96 return PTR_ERR(chunk); 97 } 98 99 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ); 100 resp.rq_cnt = chunk->cnt; 101 for (i = 0; i < chunk->cnt; i++) 102 resp.rq_idx[i] = chunk->res[i]->vnic_idx; 103 104 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ); 105 if (IS_ERR(chunk)) { 106 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 107 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ), 108 qp_grp->grp_id, 109 PTR_ERR(chunk)); 110 return PTR_ERR(chunk); 111 } 112 113 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ); 114 resp.wq_cnt = chunk->cnt; 115 for (i = 0; i < chunk->cnt; i++) 116 resp.wq_idx[i] = chunk->res[i]->vnic_idx; 117 118 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ); 119 if (IS_ERR(chunk)) { 120 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 121 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ), 122 qp_grp->grp_id, 123 PTR_ERR(chunk)); 124 return PTR_ERR(chunk); 125 } 126 127 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ); 128 resp.cq_cnt = chunk->cnt; 129 for (i = 0; i < chunk->cnt; i++) 130 resp.cq_idx[i] = chunk->res[i]->vnic_idx; 131 132 default_flow = list_first_entry(&qp_grp->flows_lst, 133 struct usnic_ib_qp_grp_flow, link); 134 resp.transport = default_flow->trans_type; 135 136 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 137 if (err) { 138 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name); 139 return err; 140 } 141 142 return 0; 143 } 144 145 static struct usnic_ib_qp_grp* 146 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, 147 struct usnic_ib_pd *pd, 148 struct usnic_transport_spec *trans_spec, 149 struct usnic_vnic_res_spec *res_spec) 150 { 151 struct usnic_ib_vf *vf; 152 struct usnic_vnic *vnic; 153 struct usnic_ib_qp_grp *qp_grp; 154 struct device *dev, **dev_list; 155 int i; 156 157 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); 158 159 if (list_empty(&us_ibdev->vf_dev_list)) { 160 usnic_info("No vfs to allocate\n"); 161 return NULL; 162 } 163 164 if (usnic_ib_share_vf) { 165 /* Try to find resouces on a used vf which is in pd */ 166 dev_list = usnic_uiom_get_dev_list(pd->umem_pd); 167 for (i = 0; dev_list[i]; i++) { 168 dev = dev_list[i]; 169 vf = pci_get_drvdata(to_pci_dev(dev)); 170 spin_lock(&vf->lock); 171 vnic = vf->vnic; 172 if (!usnic_vnic_check_room(vnic, res_spec)) { 173 usnic_dbg("Found used vnic %s from %s\n", 174 us_ibdev->ib_dev.name, 175 pci_name(usnic_vnic_get_pdev( 176 vnic))); 177 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, 178 vf, pd, 179 res_spec, 180 trans_spec); 181 182 spin_unlock(&vf->lock); 183 goto qp_grp_check; 184 } 185 spin_unlock(&vf->lock); 186 187 } 188 usnic_uiom_free_dev_list(dev_list); 189 } 190 191 /* Try to find resources on an unused vf */ 192 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) { 193 spin_lock(&vf->lock); 194 vnic = vf->vnic; 195 if (vf->qp_grp_ref_cnt == 0 && 196 usnic_vnic_check_room(vnic, res_spec) == 0) { 197 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, 198 pd, res_spec, 199 trans_spec); 200 201 spin_unlock(&vf->lock); 202 goto qp_grp_check; 203 } 204 spin_unlock(&vf->lock); 205 } 206 207 usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name); 208 return ERR_PTR(-ENOMEM); 209 210 qp_grp_check: 211 if (IS_ERR_OR_NULL(qp_grp)) { 212 usnic_err("Failed to allocate qp_grp\n"); 213 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); 214 } 215 return qp_grp; 216 } 217 218 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) 219 { 220 struct usnic_ib_vf *vf = qp_grp->vf; 221 222 WARN_ON(qp_grp->state != IB_QPS_RESET); 223 224 spin_lock(&vf->lock); 225 usnic_ib_qp_grp_destroy(qp_grp); 226 spin_unlock(&vf->lock); 227 } 228 229 static void eth_speed_to_ib_speed(int speed, u8 *active_speed, 230 u8 *active_width) 231 { 232 if (speed <= 10000) { 233 *active_width = IB_WIDTH_1X; 234 *active_speed = IB_SPEED_FDR10; 235 } else if (speed <= 20000) { 236 *active_width = IB_WIDTH_4X; 237 *active_speed = IB_SPEED_DDR; 238 } else if (speed <= 30000) { 239 *active_width = IB_WIDTH_4X; 240 *active_speed = IB_SPEED_QDR; 241 } else if (speed <= 40000) { 242 *active_width = IB_WIDTH_4X; 243 *active_speed = IB_SPEED_FDR10; 244 } else { 245 *active_width = IB_WIDTH_4X; 246 *active_speed = IB_SPEED_EDR; 247 } 248 } 249 250 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd) 251 { 252 if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN || 253 cmd.spec.trans_type >= USNIC_TRANSPORT_MAX) 254 return -EINVAL; 255 256 return 0; 257 } 258 259 /* Start of ib callback functions */ 260 261 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, 262 u8 port_num) 263 { 264 return IB_LINK_LAYER_ETHERNET; 265 } 266 267 int usnic_ib_query_device(struct ib_device *ibdev, 268 struct ib_device_attr *props, 269 struct ib_udata *uhw) 270 { 271 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 272 union ib_gid gid; 273 struct ethtool_drvinfo info; 274 int qp_per_vf; 275 276 usnic_dbg("\n"); 277 if (uhw->inlen || uhw->outlen) 278 return -EINVAL; 279 280 mutex_lock(&us_ibdev->usdev_lock); 281 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 282 memset(props, 0, sizeof(*props)); 283 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, 284 &gid.raw[0]); 285 memcpy(&props->sys_image_guid, &gid.global.interface_id, 286 sizeof(gid.global.interface_id)); 287 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver); 288 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE; 289 props->page_size_cap = USNIC_UIOM_PAGE_SIZE; 290 props->vendor_id = PCI_VENDOR_ID_CISCO; 291 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC; 292 props->hw_ver = us_ibdev->pdev->subsystem_device; 293 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], 294 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); 295 props->max_qp = qp_per_vf * 296 kref_read(&us_ibdev->vf_cnt); 297 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | 298 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 299 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * 300 kref_read(&us_ibdev->vf_cnt); 301 props->max_pd = USNIC_UIOM_MAX_PD_CNT; 302 props->max_mr = USNIC_UIOM_MAX_MR_CNT; 303 props->local_ca_ack_delay = 0; 304 props->max_pkeys = 0; 305 props->atomic_cap = IB_ATOMIC_NONE; 306 props->masked_atomic_cap = props->atomic_cap; 307 props->max_qp_rd_atom = 0; 308 props->max_qp_init_rd_atom = 0; 309 props->max_res_rd_atom = 0; 310 props->max_srq = 0; 311 props->max_srq_wr = 0; 312 props->max_srq_sge = 0; 313 props->max_fast_reg_page_list_len = 0; 314 props->max_mcast_grp = 0; 315 props->max_mcast_qp_attach = 0; 316 props->max_total_mcast_qp_attach = 0; 317 props->max_map_per_fmr = 0; 318 /* Owned by Userspace 319 * max_qp_wr, max_sge, max_sge_rd, max_cqe */ 320 mutex_unlock(&us_ibdev->usdev_lock); 321 322 return 0; 323 } 324 325 int usnic_ib_query_port(struct ib_device *ibdev, u8 port, 326 struct ib_port_attr *props) 327 { 328 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 329 struct ethtool_link_ksettings cmd; 330 331 usnic_dbg("\n"); 332 333 mutex_lock(&us_ibdev->usdev_lock); 334 __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd); 335 /* props being zeroed by the caller, avoid zeroing it here */ 336 337 props->lid = 0; 338 props->lmc = 1; 339 props->sm_lid = 0; 340 props->sm_sl = 0; 341 342 if (!us_ibdev->ufdev->link_up) { 343 props->state = IB_PORT_DOWN; 344 props->phys_state = 3; 345 } else if (!us_ibdev->ufdev->inaddr) { 346 props->state = IB_PORT_INIT; 347 props->phys_state = 4; 348 } else { 349 props->state = IB_PORT_ACTIVE; 350 props->phys_state = 5; 351 } 352 353 props->port_cap_flags = 0; 354 props->gid_tbl_len = 1; 355 props->pkey_tbl_len = 1; 356 props->bad_pkey_cntr = 0; 357 props->qkey_viol_cntr = 0; 358 eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed, 359 &props->active_width); 360 props->max_mtu = IB_MTU_4096; 361 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); 362 /* Userspace will adjust for hdrs */ 363 props->max_msg_sz = us_ibdev->ufdev->mtu; 364 props->max_vl_num = 1; 365 mutex_unlock(&us_ibdev->usdev_lock); 366 367 return 0; 368 } 369 370 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 371 int qp_attr_mask, 372 struct ib_qp_init_attr *qp_init_attr) 373 { 374 struct usnic_ib_qp_grp *qp_grp; 375 struct usnic_ib_vf *vf; 376 int err; 377 378 usnic_dbg("\n"); 379 380 memset(qp_attr, 0, sizeof(*qp_attr)); 381 memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 382 383 qp_grp = to_uqp_grp(qp); 384 vf = qp_grp->vf; 385 mutex_lock(&vf->pf->usdev_lock); 386 usnic_dbg("\n"); 387 qp_attr->qp_state = qp_grp->state; 388 qp_attr->cur_qp_state = qp_grp->state; 389 390 switch (qp_grp->ibqp.qp_type) { 391 case IB_QPT_UD: 392 qp_attr->qkey = 0; 393 break; 394 default: 395 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type); 396 err = -EINVAL; 397 goto err_out; 398 } 399 400 mutex_unlock(&vf->pf->usdev_lock); 401 return 0; 402 403 err_out: 404 mutex_unlock(&vf->pf->usdev_lock); 405 return err; 406 } 407 408 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 409 union ib_gid *gid) 410 { 411 412 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 413 usnic_dbg("\n"); 414 415 if (index > 1) 416 return -EINVAL; 417 418 mutex_lock(&us_ibdev->usdev_lock); 419 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 420 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, 421 &gid->raw[0]); 422 mutex_unlock(&us_ibdev->usdev_lock); 423 424 return 0; 425 } 426 427 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 428 u16 *pkey) 429 { 430 if (index > 1) 431 return -EINVAL; 432 433 *pkey = 0xffff; 434 return 0; 435 } 436 437 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, 438 struct ib_ucontext *context, 439 struct ib_udata *udata) 440 { 441 struct usnic_ib_pd *pd; 442 void *umem_pd; 443 444 usnic_dbg("\n"); 445 446 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 447 if (!pd) 448 return ERR_PTR(-ENOMEM); 449 450 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); 451 if (IS_ERR_OR_NULL(umem_pd)) { 452 kfree(pd); 453 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM); 454 } 455 456 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n", 457 pd, context, ibdev->name); 458 return &pd->ibpd; 459 } 460 461 int usnic_ib_dealloc_pd(struct ib_pd *pd) 462 { 463 usnic_info("freeing domain 0x%p\n", pd); 464 465 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); 466 kfree(pd); 467 return 0; 468 } 469 470 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, 471 struct ib_qp_init_attr *init_attr, 472 struct ib_udata *udata) 473 { 474 int err; 475 struct usnic_ib_dev *us_ibdev; 476 struct usnic_ib_qp_grp *qp_grp; 477 struct usnic_ib_ucontext *ucontext; 478 int cq_cnt; 479 struct usnic_vnic_res_spec res_spec; 480 struct usnic_ib_create_qp_cmd cmd; 481 struct usnic_transport_spec trans_spec; 482 483 usnic_dbg("\n"); 484 485 ucontext = to_uucontext(pd->uobject->context); 486 us_ibdev = to_usdev(pd->device); 487 488 if (init_attr->create_flags) 489 return ERR_PTR(-EINVAL); 490 491 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); 492 if (err) { 493 usnic_err("%s: cannot copy udata for create_qp\n", 494 us_ibdev->ib_dev.name); 495 return ERR_PTR(-EINVAL); 496 } 497 498 err = create_qp_validate_user_data(cmd); 499 if (err) { 500 usnic_err("%s: Failed to validate user data\n", 501 us_ibdev->ib_dev.name); 502 return ERR_PTR(-EINVAL); 503 } 504 505 if (init_attr->qp_type != IB_QPT_UD) { 506 usnic_err("%s asked to make a non-UD QP: %d\n", 507 us_ibdev->ib_dev.name, init_attr->qp_type); 508 return ERR_PTR(-EINVAL); 509 } 510 511 trans_spec = cmd.spec; 512 mutex_lock(&us_ibdev->usdev_lock); 513 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; 514 res_spec = min_transport_spec[trans_spec.trans_type]; 515 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); 516 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), 517 &trans_spec, 518 &res_spec); 519 if (IS_ERR_OR_NULL(qp_grp)) { 520 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM; 521 goto out_release_mutex; 522 } 523 524 err = usnic_ib_fill_create_qp_resp(qp_grp, udata); 525 if (err) { 526 err = -EBUSY; 527 goto out_release_qp_grp; 528 } 529 530 qp_grp->ctx = ucontext; 531 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list); 532 usnic_ib_log_vf(qp_grp->vf); 533 mutex_unlock(&us_ibdev->usdev_lock); 534 return &qp_grp->ibqp; 535 536 out_release_qp_grp: 537 qp_grp_destroy(qp_grp); 538 out_release_mutex: 539 mutex_unlock(&us_ibdev->usdev_lock); 540 return ERR_PTR(err); 541 } 542 543 int usnic_ib_destroy_qp(struct ib_qp *qp) 544 { 545 struct usnic_ib_qp_grp *qp_grp; 546 struct usnic_ib_vf *vf; 547 548 usnic_dbg("\n"); 549 550 qp_grp = to_uqp_grp(qp); 551 vf = qp_grp->vf; 552 mutex_lock(&vf->pf->usdev_lock); 553 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) { 554 usnic_err("Failed to move qp grp %u to reset\n", 555 qp_grp->grp_id); 556 } 557 558 list_del(&qp_grp->link); 559 qp_grp_destroy(qp_grp); 560 mutex_unlock(&vf->pf->usdev_lock); 561 562 return 0; 563 } 564 565 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 566 int attr_mask, struct ib_udata *udata) 567 { 568 struct usnic_ib_qp_grp *qp_grp; 569 int status; 570 usnic_dbg("\n"); 571 572 qp_grp = to_uqp_grp(ibqp); 573 574 mutex_lock(&qp_grp->vf->pf->usdev_lock); 575 if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) { 576 /* usnic devices only have one port */ 577 status = -EINVAL; 578 goto out_unlock; 579 } 580 if (attr_mask & IB_QP_STATE) { 581 status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL); 582 } else { 583 usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask); 584 status = -EINVAL; 585 } 586 587 out_unlock: 588 mutex_unlock(&qp_grp->vf->pf->usdev_lock); 589 return status; 590 } 591 592 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, 593 const struct ib_cq_init_attr *attr, 594 struct ib_ucontext *context, 595 struct ib_udata *udata) 596 { 597 struct ib_cq *cq; 598 599 usnic_dbg("\n"); 600 if (attr->flags) 601 return ERR_PTR(-EINVAL); 602 603 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 604 if (!cq) 605 return ERR_PTR(-EBUSY); 606 607 return cq; 608 } 609 610 int usnic_ib_destroy_cq(struct ib_cq *cq) 611 { 612 usnic_dbg("\n"); 613 kfree(cq); 614 return 0; 615 } 616 617 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, 618 u64 virt_addr, int access_flags, 619 struct ib_udata *udata) 620 { 621 struct usnic_ib_mr *mr; 622 int err; 623 624 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start, 625 virt_addr, length); 626 627 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 628 if (!mr) 629 return ERR_PTR(-ENOMEM); 630 631 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, 632 access_flags, 0); 633 if (IS_ERR_OR_NULL(mr->umem)) { 634 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; 635 goto err_free; 636 } 637 638 mr->ibmr.lkey = mr->ibmr.rkey = 0; 639 return &mr->ibmr; 640 641 err_free: 642 kfree(mr); 643 return ERR_PTR(err); 644 } 645 646 int usnic_ib_dereg_mr(struct ib_mr *ibmr) 647 { 648 struct usnic_ib_mr *mr = to_umr(ibmr); 649 650 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); 651 652 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); 653 kfree(mr); 654 return 0; 655 } 656 657 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev, 658 struct ib_udata *udata) 659 { 660 struct usnic_ib_ucontext *context; 661 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 662 usnic_dbg("\n"); 663 664 context = kmalloc(sizeof(*context), GFP_KERNEL); 665 if (!context) 666 return ERR_PTR(-ENOMEM); 667 668 INIT_LIST_HEAD(&context->qp_grp_list); 669 mutex_lock(&us_ibdev->usdev_lock); 670 list_add_tail(&context->link, &us_ibdev->ctx_list); 671 mutex_unlock(&us_ibdev->usdev_lock); 672 673 return &context->ibucontext; 674 } 675 676 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 677 { 678 struct usnic_ib_ucontext *context = to_uucontext(ibcontext); 679 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device); 680 usnic_dbg("\n"); 681 682 mutex_lock(&us_ibdev->usdev_lock); 683 BUG_ON(!list_empty(&context->qp_grp_list)); 684 list_del(&context->link); 685 mutex_unlock(&us_ibdev->usdev_lock); 686 kfree(context); 687 return 0; 688 } 689 690 int usnic_ib_mmap(struct ib_ucontext *context, 691 struct vm_area_struct *vma) 692 { 693 struct usnic_ib_ucontext *uctx = to_ucontext(context); 694 struct usnic_ib_dev *us_ibdev; 695 struct usnic_ib_qp_grp *qp_grp; 696 struct usnic_ib_vf *vf; 697 struct vnic_dev_bar *bar; 698 dma_addr_t bus_addr; 699 unsigned int len; 700 unsigned int vfid; 701 702 usnic_dbg("\n"); 703 704 us_ibdev = to_usdev(context->device); 705 vma->vm_flags |= VM_IO; 706 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 707 vfid = vma->vm_pgoff; 708 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n", 709 vma->vm_pgoff, PAGE_SHIFT, vfid); 710 711 mutex_lock(&us_ibdev->usdev_lock); 712 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) { 713 vf = qp_grp->vf; 714 if (usnic_vnic_get_index(vf->vnic) == vfid) { 715 bar = usnic_vnic_get_bar(vf->vnic, 0); 716 if ((vma->vm_end - vma->vm_start) != bar->len) { 717 usnic_err("Bar0 Len %lu - Request map %lu\n", 718 bar->len, 719 vma->vm_end - vma->vm_start); 720 mutex_unlock(&us_ibdev->usdev_lock); 721 return -EINVAL; 722 } 723 bus_addr = bar->bus_addr; 724 len = bar->len; 725 usnic_dbg("bus: %pa vaddr: %p size: %ld\n", 726 &bus_addr, bar->vaddr, bar->len); 727 mutex_unlock(&us_ibdev->usdev_lock); 728 729 return remap_pfn_range(vma, 730 vma->vm_start, 731 bus_addr >> PAGE_SHIFT, 732 len, vma->vm_page_prot); 733 } 734 } 735 736 mutex_unlock(&us_ibdev->usdev_lock); 737 usnic_err("No VF %u found\n", vfid); 738 return -EINVAL; 739 } 740 741 /* In ib callbacks section - Start of stub funcs */ 742 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, 743 struct rdma_ah_attr *ah_attr, 744 struct ib_udata *udata) 745 746 { 747 usnic_dbg("\n"); 748 return ERR_PTR(-EPERM); 749 } 750 751 int usnic_ib_destroy_ah(struct ib_ah *ah) 752 { 753 usnic_dbg("\n"); 754 return -EINVAL; 755 } 756 757 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 758 struct ib_send_wr **bad_wr) 759 { 760 usnic_dbg("\n"); 761 return -EINVAL; 762 } 763 764 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 765 struct ib_recv_wr **bad_wr) 766 { 767 usnic_dbg("\n"); 768 return -EINVAL; 769 } 770 771 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries, 772 struct ib_wc *wc) 773 { 774 usnic_dbg("\n"); 775 return -EINVAL; 776 } 777 778 int usnic_ib_req_notify_cq(struct ib_cq *cq, 779 enum ib_cq_notify_flags flags) 780 { 781 usnic_dbg("\n"); 782 return -EINVAL; 783 } 784 785 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc) 786 { 787 usnic_dbg("\n"); 788 return ERR_PTR(-ENOMEM); 789 } 790 791 792 /* In ib callbacks section - End of stub funcs */ 793 /* End of ib callbacks section */ 794