1 /* 2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 #include <linux/errno.h> 37 38 #include <rdma/ib_user_verbs.h> 39 #include <rdma/ib_addr.h> 40 41 #include "usnic_abi.h" 42 #include "usnic_ib.h" 43 #include "usnic_common_util.h" 44 #include "usnic_ib_qp_grp.h" 45 #include "usnic_fwd.h" 46 #include "usnic_log.h" 47 #include "usnic_uiom.h" 48 #include "usnic_transport.h" 49 #include "usnic_ib_verbs.h" 50 51 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM 52 53 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver) 54 { 55 *fw_ver = *((u64 *)fw_ver_str); 56 } 57 58 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp, 59 struct ib_udata *udata) 60 { 61 struct usnic_ib_dev *us_ibdev; 62 struct usnic_ib_create_qp_resp resp; 63 struct pci_dev *pdev; 64 struct vnic_dev_bar *bar; 65 struct usnic_vnic_res_chunk *chunk; 66 struct usnic_ib_qp_grp_flow *default_flow; 67 int i, err; 68 69 memset(&resp, 0, sizeof(resp)); 70 71 us_ibdev = qp_grp->vf->pf; 72 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); 73 if (!pdev) { 74 usnic_err("Failed to get pdev of qp_grp %d\n", 75 qp_grp->grp_id); 76 return -EFAULT; 77 } 78 79 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0); 80 if (!bar) { 81 usnic_err("Failed to get bar0 of qp_grp %d vf %s", 82 qp_grp->grp_id, pci_name(pdev)); 83 return -EFAULT; 84 } 85 86 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic); 87 resp.bar_bus_addr = bar->bus_addr; 88 resp.bar_len = bar->len; 89 90 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); 91 if (IS_ERR(chunk)) { 92 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 93 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), 94 qp_grp->grp_id, 95 PTR_ERR(chunk)); 96 return PTR_ERR(chunk); 97 } 98 99 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ); 100 resp.rq_cnt = chunk->cnt; 101 for (i = 0; i < chunk->cnt; i++) 102 resp.rq_idx[i] = chunk->res[i]->vnic_idx; 103 104 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ); 105 if (IS_ERR(chunk)) { 106 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 107 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ), 108 qp_grp->grp_id, 109 PTR_ERR(chunk)); 110 return PTR_ERR(chunk); 111 } 112 113 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ); 114 resp.wq_cnt = chunk->cnt; 115 for (i = 0; i < chunk->cnt; i++) 116 resp.wq_idx[i] = chunk->res[i]->vnic_idx; 117 118 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ); 119 if (IS_ERR(chunk)) { 120 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 121 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ), 122 qp_grp->grp_id, 123 PTR_ERR(chunk)); 124 return PTR_ERR(chunk); 125 } 126 127 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ); 128 resp.cq_cnt = chunk->cnt; 129 for (i = 0; i < chunk->cnt; i++) 130 resp.cq_idx[i] = chunk->res[i]->vnic_idx; 131 132 default_flow = list_first_entry(&qp_grp->flows_lst, 133 struct usnic_ib_qp_grp_flow, link); 134 resp.transport = default_flow->trans_type; 135 136 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 137 if (err) { 138 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name); 139 return err; 140 } 141 142 return 0; 143 } 144 145 static struct usnic_ib_qp_grp* 146 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, 147 struct usnic_ib_pd *pd, 148 struct usnic_transport_spec *trans_spec, 149 struct usnic_vnic_res_spec *res_spec) 150 { 151 struct usnic_ib_vf *vf; 152 struct usnic_vnic *vnic; 153 struct usnic_ib_qp_grp *qp_grp; 154 struct device *dev, **dev_list; 155 int i; 156 157 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); 158 159 if (list_empty(&us_ibdev->vf_dev_list)) { 160 usnic_info("No vfs to allocate\n"); 161 return NULL; 162 } 163 164 if (usnic_ib_share_vf) { 165 /* Try to find resouces on a used vf which is in pd */ 166 dev_list = usnic_uiom_get_dev_list(pd->umem_pd); 167 if (IS_ERR(dev_list)) 168 return ERR_CAST(dev_list); 169 for (i = 0; dev_list[i]; i++) { 170 dev = dev_list[i]; 171 vf = pci_get_drvdata(to_pci_dev(dev)); 172 spin_lock(&vf->lock); 173 vnic = vf->vnic; 174 if (!usnic_vnic_check_room(vnic, res_spec)) { 175 usnic_dbg("Found used vnic %s from %s\n", 176 us_ibdev->ib_dev.name, 177 pci_name(usnic_vnic_get_pdev( 178 vnic))); 179 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, 180 vf, pd, 181 res_spec, 182 trans_spec); 183 184 spin_unlock(&vf->lock); 185 goto qp_grp_check; 186 } 187 spin_unlock(&vf->lock); 188 189 } 190 usnic_uiom_free_dev_list(dev_list); 191 } 192 193 /* Try to find resources on an unused vf */ 194 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) { 195 spin_lock(&vf->lock); 196 vnic = vf->vnic; 197 if (vf->qp_grp_ref_cnt == 0 && 198 usnic_vnic_check_room(vnic, res_spec) == 0) { 199 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, 200 pd, res_spec, 201 trans_spec); 202 203 spin_unlock(&vf->lock); 204 goto qp_grp_check; 205 } 206 spin_unlock(&vf->lock); 207 } 208 209 usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name); 210 return ERR_PTR(-ENOMEM); 211 212 qp_grp_check: 213 if (IS_ERR_OR_NULL(qp_grp)) { 214 usnic_err("Failed to allocate qp_grp\n"); 215 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); 216 } 217 return qp_grp; 218 } 219 220 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) 221 { 222 struct usnic_ib_vf *vf = qp_grp->vf; 223 224 WARN_ON(qp_grp->state != IB_QPS_RESET); 225 226 spin_lock(&vf->lock); 227 usnic_ib_qp_grp_destroy(qp_grp); 228 spin_unlock(&vf->lock); 229 } 230 231 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd) 232 { 233 if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN || 234 cmd.spec.trans_type >= USNIC_TRANSPORT_MAX) 235 return -EINVAL; 236 237 return 0; 238 } 239 240 /* Start of ib callback functions */ 241 242 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, 243 u8 port_num) 244 { 245 return IB_LINK_LAYER_ETHERNET; 246 } 247 248 int usnic_ib_query_device(struct ib_device *ibdev, 249 struct ib_device_attr *props, 250 struct ib_udata *uhw) 251 { 252 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 253 union ib_gid gid; 254 struct ethtool_drvinfo info; 255 int qp_per_vf; 256 257 usnic_dbg("\n"); 258 if (uhw->inlen || uhw->outlen) 259 return -EINVAL; 260 261 mutex_lock(&us_ibdev->usdev_lock); 262 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 263 memset(props, 0, sizeof(*props)); 264 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, 265 &gid.raw[0]); 266 memcpy(&props->sys_image_guid, &gid.global.interface_id, 267 sizeof(gid.global.interface_id)); 268 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver); 269 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE; 270 props->page_size_cap = USNIC_UIOM_PAGE_SIZE; 271 props->vendor_id = PCI_VENDOR_ID_CISCO; 272 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC; 273 props->hw_ver = us_ibdev->pdev->subsystem_device; 274 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], 275 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); 276 props->max_qp = qp_per_vf * 277 kref_read(&us_ibdev->vf_cnt); 278 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | 279 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 280 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * 281 kref_read(&us_ibdev->vf_cnt); 282 props->max_pd = USNIC_UIOM_MAX_PD_CNT; 283 props->max_mr = USNIC_UIOM_MAX_MR_CNT; 284 props->local_ca_ack_delay = 0; 285 props->max_pkeys = 0; 286 props->atomic_cap = IB_ATOMIC_NONE; 287 props->masked_atomic_cap = props->atomic_cap; 288 props->max_qp_rd_atom = 0; 289 props->max_qp_init_rd_atom = 0; 290 props->max_res_rd_atom = 0; 291 props->max_srq = 0; 292 props->max_srq_wr = 0; 293 props->max_srq_sge = 0; 294 props->max_fast_reg_page_list_len = 0; 295 props->max_mcast_grp = 0; 296 props->max_mcast_qp_attach = 0; 297 props->max_total_mcast_qp_attach = 0; 298 props->max_map_per_fmr = 0; 299 /* Owned by Userspace 300 * max_qp_wr, max_sge, max_sge_rd, max_cqe */ 301 mutex_unlock(&us_ibdev->usdev_lock); 302 303 return 0; 304 } 305 306 int usnic_ib_query_port(struct ib_device *ibdev, u8 port, 307 struct ib_port_attr *props) 308 { 309 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 310 311 usnic_dbg("\n"); 312 313 mutex_lock(&us_ibdev->usdev_lock); 314 if (ib_get_eth_speed(ibdev, port, &props->active_speed, 315 &props->active_width)) { 316 mutex_unlock(&us_ibdev->usdev_lock); 317 return -EINVAL; 318 } 319 320 /* props being zeroed by the caller, avoid zeroing it here */ 321 322 props->lid = 0; 323 props->lmc = 1; 324 props->sm_lid = 0; 325 props->sm_sl = 0; 326 327 if (!us_ibdev->ufdev->link_up) { 328 props->state = IB_PORT_DOWN; 329 props->phys_state = 3; 330 } else if (!us_ibdev->ufdev->inaddr) { 331 props->state = IB_PORT_INIT; 332 props->phys_state = 4; 333 } else { 334 props->state = IB_PORT_ACTIVE; 335 props->phys_state = 5; 336 } 337 338 props->port_cap_flags = 0; 339 props->gid_tbl_len = 1; 340 props->pkey_tbl_len = 1; 341 props->bad_pkey_cntr = 0; 342 props->qkey_viol_cntr = 0; 343 props->max_mtu = IB_MTU_4096; 344 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); 345 /* Userspace will adjust for hdrs */ 346 props->max_msg_sz = us_ibdev->ufdev->mtu; 347 props->max_vl_num = 1; 348 mutex_unlock(&us_ibdev->usdev_lock); 349 350 return 0; 351 } 352 353 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 354 int qp_attr_mask, 355 struct ib_qp_init_attr *qp_init_attr) 356 { 357 struct usnic_ib_qp_grp *qp_grp; 358 struct usnic_ib_vf *vf; 359 int err; 360 361 usnic_dbg("\n"); 362 363 memset(qp_attr, 0, sizeof(*qp_attr)); 364 memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 365 366 qp_grp = to_uqp_grp(qp); 367 vf = qp_grp->vf; 368 mutex_lock(&vf->pf->usdev_lock); 369 usnic_dbg("\n"); 370 qp_attr->qp_state = qp_grp->state; 371 qp_attr->cur_qp_state = qp_grp->state; 372 373 switch (qp_grp->ibqp.qp_type) { 374 case IB_QPT_UD: 375 qp_attr->qkey = 0; 376 break; 377 default: 378 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type); 379 err = -EINVAL; 380 goto err_out; 381 } 382 383 mutex_unlock(&vf->pf->usdev_lock); 384 return 0; 385 386 err_out: 387 mutex_unlock(&vf->pf->usdev_lock); 388 return err; 389 } 390 391 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 392 union ib_gid *gid) 393 { 394 395 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 396 usnic_dbg("\n"); 397 398 if (index > 1) 399 return -EINVAL; 400 401 mutex_lock(&us_ibdev->usdev_lock); 402 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 403 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, 404 &gid->raw[0]); 405 mutex_unlock(&us_ibdev->usdev_lock); 406 407 return 0; 408 } 409 410 struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num) 411 { 412 struct usnic_ib_dev *us_ibdev = to_usdev(device); 413 414 if (us_ibdev->netdev) 415 dev_hold(us_ibdev->netdev); 416 417 return us_ibdev->netdev; 418 } 419 420 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 421 u16 *pkey) 422 { 423 if (index > 1) 424 return -EINVAL; 425 426 *pkey = 0xffff; 427 return 0; 428 } 429 430 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, 431 struct ib_ucontext *context, 432 struct ib_udata *udata) 433 { 434 struct usnic_ib_pd *pd; 435 void *umem_pd; 436 437 usnic_dbg("\n"); 438 439 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 440 if (!pd) 441 return ERR_PTR(-ENOMEM); 442 443 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); 444 if (IS_ERR_OR_NULL(umem_pd)) { 445 kfree(pd); 446 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM); 447 } 448 449 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n", 450 pd, context, ibdev->name); 451 return &pd->ibpd; 452 } 453 454 int usnic_ib_dealloc_pd(struct ib_pd *pd) 455 { 456 usnic_info("freeing domain 0x%p\n", pd); 457 458 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); 459 kfree(pd); 460 return 0; 461 } 462 463 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, 464 struct ib_qp_init_attr *init_attr, 465 struct ib_udata *udata) 466 { 467 int err; 468 struct usnic_ib_dev *us_ibdev; 469 struct usnic_ib_qp_grp *qp_grp; 470 struct usnic_ib_ucontext *ucontext; 471 int cq_cnt; 472 struct usnic_vnic_res_spec res_spec; 473 struct usnic_ib_create_qp_cmd cmd; 474 struct usnic_transport_spec trans_spec; 475 476 usnic_dbg("\n"); 477 478 ucontext = to_uucontext(pd->uobject->context); 479 us_ibdev = to_usdev(pd->device); 480 481 if (init_attr->create_flags) 482 return ERR_PTR(-EINVAL); 483 484 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); 485 if (err) { 486 usnic_err("%s: cannot copy udata for create_qp\n", 487 us_ibdev->ib_dev.name); 488 return ERR_PTR(-EINVAL); 489 } 490 491 err = create_qp_validate_user_data(cmd); 492 if (err) { 493 usnic_err("%s: Failed to validate user data\n", 494 us_ibdev->ib_dev.name); 495 return ERR_PTR(-EINVAL); 496 } 497 498 if (init_attr->qp_type != IB_QPT_UD) { 499 usnic_err("%s asked to make a non-UD QP: %d\n", 500 us_ibdev->ib_dev.name, init_attr->qp_type); 501 return ERR_PTR(-EINVAL); 502 } 503 504 trans_spec = cmd.spec; 505 mutex_lock(&us_ibdev->usdev_lock); 506 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; 507 res_spec = min_transport_spec[trans_spec.trans_type]; 508 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); 509 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), 510 &trans_spec, 511 &res_spec); 512 if (IS_ERR_OR_NULL(qp_grp)) { 513 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM; 514 goto out_release_mutex; 515 } 516 517 err = usnic_ib_fill_create_qp_resp(qp_grp, udata); 518 if (err) { 519 err = -EBUSY; 520 goto out_release_qp_grp; 521 } 522 523 qp_grp->ctx = ucontext; 524 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list); 525 usnic_ib_log_vf(qp_grp->vf); 526 mutex_unlock(&us_ibdev->usdev_lock); 527 return &qp_grp->ibqp; 528 529 out_release_qp_grp: 530 qp_grp_destroy(qp_grp); 531 out_release_mutex: 532 mutex_unlock(&us_ibdev->usdev_lock); 533 return ERR_PTR(err); 534 } 535 536 int usnic_ib_destroy_qp(struct ib_qp *qp) 537 { 538 struct usnic_ib_qp_grp *qp_grp; 539 struct usnic_ib_vf *vf; 540 541 usnic_dbg("\n"); 542 543 qp_grp = to_uqp_grp(qp); 544 vf = qp_grp->vf; 545 mutex_lock(&vf->pf->usdev_lock); 546 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) { 547 usnic_err("Failed to move qp grp %u to reset\n", 548 qp_grp->grp_id); 549 } 550 551 list_del(&qp_grp->link); 552 qp_grp_destroy(qp_grp); 553 mutex_unlock(&vf->pf->usdev_lock); 554 555 return 0; 556 } 557 558 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 559 int attr_mask, struct ib_udata *udata) 560 { 561 struct usnic_ib_qp_grp *qp_grp; 562 int status; 563 usnic_dbg("\n"); 564 565 qp_grp = to_uqp_grp(ibqp); 566 567 mutex_lock(&qp_grp->vf->pf->usdev_lock); 568 if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) { 569 /* usnic devices only have one port */ 570 status = -EINVAL; 571 goto out_unlock; 572 } 573 if (attr_mask & IB_QP_STATE) { 574 status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL); 575 } else { 576 usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask); 577 status = -EINVAL; 578 } 579 580 out_unlock: 581 mutex_unlock(&qp_grp->vf->pf->usdev_lock); 582 return status; 583 } 584 585 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, 586 const struct ib_cq_init_attr *attr, 587 struct ib_ucontext *context, 588 struct ib_udata *udata) 589 { 590 struct ib_cq *cq; 591 592 usnic_dbg("\n"); 593 if (attr->flags) 594 return ERR_PTR(-EINVAL); 595 596 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 597 if (!cq) 598 return ERR_PTR(-EBUSY); 599 600 return cq; 601 } 602 603 int usnic_ib_destroy_cq(struct ib_cq *cq) 604 { 605 usnic_dbg("\n"); 606 kfree(cq); 607 return 0; 608 } 609 610 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, 611 u64 virt_addr, int access_flags, 612 struct ib_udata *udata) 613 { 614 struct usnic_ib_mr *mr; 615 int err; 616 617 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start, 618 virt_addr, length); 619 620 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 621 if (!mr) 622 return ERR_PTR(-ENOMEM); 623 624 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, 625 access_flags, 0); 626 if (IS_ERR_OR_NULL(mr->umem)) { 627 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; 628 goto err_free; 629 } 630 631 mr->ibmr.lkey = mr->ibmr.rkey = 0; 632 return &mr->ibmr; 633 634 err_free: 635 kfree(mr); 636 return ERR_PTR(err); 637 } 638 639 int usnic_ib_dereg_mr(struct ib_mr *ibmr) 640 { 641 struct usnic_ib_mr *mr = to_umr(ibmr); 642 643 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); 644 645 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); 646 kfree(mr); 647 return 0; 648 } 649 650 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev, 651 struct ib_udata *udata) 652 { 653 struct usnic_ib_ucontext *context; 654 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); 655 usnic_dbg("\n"); 656 657 context = kmalloc(sizeof(*context), GFP_KERNEL); 658 if (!context) 659 return ERR_PTR(-ENOMEM); 660 661 INIT_LIST_HEAD(&context->qp_grp_list); 662 mutex_lock(&us_ibdev->usdev_lock); 663 list_add_tail(&context->link, &us_ibdev->ctx_list); 664 mutex_unlock(&us_ibdev->usdev_lock); 665 666 return &context->ibucontext; 667 } 668 669 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 670 { 671 struct usnic_ib_ucontext *context = to_uucontext(ibcontext); 672 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device); 673 usnic_dbg("\n"); 674 675 mutex_lock(&us_ibdev->usdev_lock); 676 BUG_ON(!list_empty(&context->qp_grp_list)); 677 list_del(&context->link); 678 mutex_unlock(&us_ibdev->usdev_lock); 679 kfree(context); 680 return 0; 681 } 682 683 int usnic_ib_mmap(struct ib_ucontext *context, 684 struct vm_area_struct *vma) 685 { 686 struct usnic_ib_ucontext *uctx = to_ucontext(context); 687 struct usnic_ib_dev *us_ibdev; 688 struct usnic_ib_qp_grp *qp_grp; 689 struct usnic_ib_vf *vf; 690 struct vnic_dev_bar *bar; 691 dma_addr_t bus_addr; 692 unsigned int len; 693 unsigned int vfid; 694 695 usnic_dbg("\n"); 696 697 us_ibdev = to_usdev(context->device); 698 vma->vm_flags |= VM_IO; 699 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 700 vfid = vma->vm_pgoff; 701 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n", 702 vma->vm_pgoff, PAGE_SHIFT, vfid); 703 704 mutex_lock(&us_ibdev->usdev_lock); 705 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) { 706 vf = qp_grp->vf; 707 if (usnic_vnic_get_index(vf->vnic) == vfid) { 708 bar = usnic_vnic_get_bar(vf->vnic, 0); 709 if ((vma->vm_end - vma->vm_start) != bar->len) { 710 usnic_err("Bar0 Len %lu - Request map %lu\n", 711 bar->len, 712 vma->vm_end - vma->vm_start); 713 mutex_unlock(&us_ibdev->usdev_lock); 714 return -EINVAL; 715 } 716 bus_addr = bar->bus_addr; 717 len = bar->len; 718 usnic_dbg("bus: %pa vaddr: %p size: %ld\n", 719 &bus_addr, bar->vaddr, bar->len); 720 mutex_unlock(&us_ibdev->usdev_lock); 721 722 return remap_pfn_range(vma, 723 vma->vm_start, 724 bus_addr >> PAGE_SHIFT, 725 len, vma->vm_page_prot); 726 } 727 } 728 729 mutex_unlock(&us_ibdev->usdev_lock); 730 usnic_err("No VF %u found\n", vfid); 731 return -EINVAL; 732 } 733 734 /* In ib callbacks section - Start of stub funcs */ 735 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, 736 struct rdma_ah_attr *ah_attr, 737 struct ib_udata *udata) 738 739 { 740 usnic_dbg("\n"); 741 return ERR_PTR(-EPERM); 742 } 743 744 int usnic_ib_destroy_ah(struct ib_ah *ah) 745 { 746 usnic_dbg("\n"); 747 return -EINVAL; 748 } 749 750 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 751 struct ib_send_wr **bad_wr) 752 { 753 usnic_dbg("\n"); 754 return -EINVAL; 755 } 756 757 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 758 struct ib_recv_wr **bad_wr) 759 { 760 usnic_dbg("\n"); 761 return -EINVAL; 762 } 763 764 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries, 765 struct ib_wc *wc) 766 { 767 usnic_dbg("\n"); 768 return -EINVAL; 769 } 770 771 int usnic_ib_req_notify_cq(struct ib_cq *cq, 772 enum ib_cq_notify_flags flags) 773 { 774 usnic_dbg("\n"); 775 return -EINVAL; 776 } 777 778 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc) 779 { 780 usnic_dbg("\n"); 781 return ERR_PTR(-ENOMEM); 782 } 783 784 785 /* In ib callbacks section - End of stub funcs */ 786 /* End of ib callbacks section */ 787