1 /* 2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * Author: Upinder Malhi <umalhi@cisco.com> 33 * Author: Anant Deepak <anadeepa@cisco.com> 34 * Author: Cesare Cantu' <cantuc@cisco.com> 35 * Author: Jeff Squyres <jsquyres@cisco.com> 36 * Author: Kiran Thirumalai <kithirum@cisco.com> 37 * Author: Xuyang Wang <xuywang@cisco.com> 38 * Author: Reese Faucette <rfaucett@cisco.com> 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/inetdevice.h> 44 #include <linux/init.h> 45 #include <linux/slab.h> 46 #include <linux/errno.h> 47 #include <linux/pci.h> 48 #include <linux/netdevice.h> 49 50 #include <rdma/ib_user_verbs.h> 51 #include <rdma/ib_addr.h> 52 53 #include "usnic_abi.h" 54 #include "usnic_common_util.h" 55 #include "usnic_ib.h" 56 #include "usnic_ib_qp_grp.h" 57 #include "usnic_log.h" 58 #include "usnic_fwd.h" 59 #include "usnic_debugfs.h" 60 #include "usnic_ib_verbs.h" 61 #include "usnic_transport.h" 62 #include "usnic_uiom.h" 63 #include "usnic_ib_sysfs.h" 64 65 unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR; 66 unsigned int usnic_ib_share_vf = 1; 67 68 static const char usnic_version[] = 69 DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v" 70 DRV_VERSION " (" DRV_RELDATE ")\n"; 71 72 static DEFINE_MUTEX(usnic_ib_ibdev_list_lock); 73 static LIST_HEAD(usnic_ib_ibdev_list); 74 75 /* Callback dump funcs */ 76 static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz) 77 { 78 struct usnic_ib_vf *vf = obj; 79 return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev)); 80 } 81 /* End callback dump funcs */ 82 83 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) 84 { 85 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf, 86 usnic_ib_dump_vf_hdr, 87 usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows); 88 } 89 90 void usnic_ib_log_vf(struct usnic_ib_vf *vf) 91 { 92 char buf[1000]; 93 usnic_ib_dump_vf(vf, buf, sizeof(buf)); 94 usnic_dbg("%s\n", buf); 95 } 96 97 /* Start of netdev section */ 98 static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) 99 { 100 struct usnic_ib_ucontext *ctx; 101 struct usnic_ib_qp_grp *qp_grp; 102 enum ib_qp_state cur_state; 103 int status; 104 105 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); 106 107 list_for_each_entry(ctx, &us_ibdev->ctx_list, link) { 108 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) { 109 cur_state = qp_grp->state; 110 if (cur_state == IB_QPS_INIT || 111 cur_state == IB_QPS_RTR || 112 cur_state == IB_QPS_RTS) { 113 status = usnic_ib_qp_grp_modify(qp_grp, 114 IB_QPS_ERR, 115 NULL); 116 if (status) { 117 usnic_err("Failed to transistion qp grp %u from %s to %s\n", 118 qp_grp->grp_id, 119 usnic_ib_qp_grp_state_to_string 120 (cur_state), 121 usnic_ib_qp_grp_state_to_string 122 (IB_QPS_ERR)); 123 } 124 } 125 } 126 } 127 } 128 129 static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev, 130 unsigned long event) 131 { 132 struct net_device *netdev; 133 struct ib_event ib_event; 134 135 memset(&ib_event, 0, sizeof(ib_event)); 136 137 mutex_lock(&us_ibdev->usdev_lock); 138 netdev = us_ibdev->netdev; 139 switch (event) { 140 case NETDEV_REBOOT: 141 usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev)); 142 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 143 ib_event.event = IB_EVENT_PORT_ERR; 144 ib_event.device = &us_ibdev->ib_dev; 145 ib_event.element.port_num = 1; 146 ib_dispatch_event(&ib_event); 147 break; 148 case NETDEV_UP: 149 case NETDEV_DOWN: 150 case NETDEV_CHANGE: 151 if (!us_ibdev->ufdev->link_up && 152 netif_carrier_ok(netdev)) { 153 usnic_fwd_carrier_up(us_ibdev->ufdev); 154 usnic_info("Link UP on %s\n", 155 dev_name(&us_ibdev->ib_dev.dev)); 156 ib_event.event = IB_EVENT_PORT_ACTIVE; 157 ib_event.device = &us_ibdev->ib_dev; 158 ib_event.element.port_num = 1; 159 ib_dispatch_event(&ib_event); 160 } else if (us_ibdev->ufdev->link_up && 161 !netif_carrier_ok(netdev)) { 162 usnic_fwd_carrier_down(us_ibdev->ufdev); 163 usnic_info("Link DOWN on %s\n", 164 dev_name(&us_ibdev->ib_dev.dev)); 165 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 166 ib_event.event = IB_EVENT_PORT_ERR; 167 ib_event.device = &us_ibdev->ib_dev; 168 ib_event.element.port_num = 1; 169 ib_dispatch_event(&ib_event); 170 } else { 171 usnic_dbg("Ignoring %s on %s\n", 172 netdev_cmd_to_name(event), 173 dev_name(&us_ibdev->ib_dev.dev)); 174 } 175 break; 176 case NETDEV_CHANGEADDR: 177 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr, 178 sizeof(us_ibdev->ufdev->mac))) { 179 usnic_dbg("Ignoring addr change on %s\n", 180 dev_name(&us_ibdev->ib_dev.dev)); 181 } else { 182 usnic_info(" %s old mac: %pM new mac: %pM\n", 183 dev_name(&us_ibdev->ib_dev.dev), 184 us_ibdev->ufdev->mac, 185 netdev->dev_addr); 186 usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr); 187 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 188 ib_event.event = IB_EVENT_GID_CHANGE; 189 ib_event.device = &us_ibdev->ib_dev; 190 ib_event.element.port_num = 1; 191 ib_dispatch_event(&ib_event); 192 } 193 194 break; 195 case NETDEV_CHANGEMTU: 196 if (us_ibdev->ufdev->mtu != netdev->mtu) { 197 usnic_info("MTU Change on %s old: %u new: %u\n", 198 dev_name(&us_ibdev->ib_dev.dev), 199 us_ibdev->ufdev->mtu, netdev->mtu); 200 usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu); 201 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 202 } else { 203 usnic_dbg("Ignoring MTU change on %s\n", 204 dev_name(&us_ibdev->ib_dev.dev)); 205 } 206 break; 207 default: 208 usnic_dbg("Ignoring event %s on %s", 209 netdev_cmd_to_name(event), 210 dev_name(&us_ibdev->ib_dev.dev)); 211 } 212 mutex_unlock(&us_ibdev->usdev_lock); 213 } 214 215 static int usnic_ib_netdevice_event(struct notifier_block *notifier, 216 unsigned long event, void *ptr) 217 { 218 struct usnic_ib_dev *us_ibdev; 219 220 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 221 222 mutex_lock(&usnic_ib_ibdev_list_lock); 223 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 224 if (us_ibdev->netdev == netdev) { 225 usnic_ib_handle_usdev_event(us_ibdev, event); 226 break; 227 } 228 } 229 mutex_unlock(&usnic_ib_ibdev_list_lock); 230 231 return NOTIFY_DONE; 232 } 233 234 static struct notifier_block usnic_ib_netdevice_notifier = { 235 .notifier_call = usnic_ib_netdevice_event 236 }; 237 /* End of netdev section */ 238 239 /* Start of inet section */ 240 static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev, 241 unsigned long event, void *ptr) 242 { 243 struct in_ifaddr *ifa = ptr; 244 struct ib_event ib_event; 245 246 mutex_lock(&us_ibdev->usdev_lock); 247 248 switch (event) { 249 case NETDEV_DOWN: 250 usnic_info("%s via ip notifiers", 251 netdev_cmd_to_name(event)); 252 usnic_fwd_del_ipaddr(us_ibdev->ufdev); 253 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 254 ib_event.event = IB_EVENT_GID_CHANGE; 255 ib_event.device = &us_ibdev->ib_dev; 256 ib_event.element.port_num = 1; 257 ib_dispatch_event(&ib_event); 258 break; 259 case NETDEV_UP: 260 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); 261 usnic_info("%s via ip notifiers: ip %pI4", 262 netdev_cmd_to_name(event), 263 &us_ibdev->ufdev->inaddr); 264 ib_event.event = IB_EVENT_GID_CHANGE; 265 ib_event.device = &us_ibdev->ib_dev; 266 ib_event.element.port_num = 1; 267 ib_dispatch_event(&ib_event); 268 break; 269 default: 270 usnic_info("Ignoring event %s on %s", 271 netdev_cmd_to_name(event), 272 dev_name(&us_ibdev->ib_dev.dev)); 273 } 274 mutex_unlock(&us_ibdev->usdev_lock); 275 276 return NOTIFY_DONE; 277 } 278 279 static int usnic_ib_inetaddr_event(struct notifier_block *notifier, 280 unsigned long event, void *ptr) 281 { 282 struct usnic_ib_dev *us_ibdev; 283 struct in_ifaddr *ifa = ptr; 284 struct net_device *netdev = ifa->ifa_dev->dev; 285 286 mutex_lock(&usnic_ib_ibdev_list_lock); 287 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 288 if (us_ibdev->netdev == netdev) { 289 usnic_ib_handle_inet_event(us_ibdev, event, ptr); 290 break; 291 } 292 } 293 mutex_unlock(&usnic_ib_ibdev_list_lock); 294 295 return NOTIFY_DONE; 296 } 297 static struct notifier_block usnic_ib_inetaddr_notifier = { 298 .notifier_call = usnic_ib_inetaddr_event 299 }; 300 /* End of inet section*/ 301 302 static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num, 303 struct ib_port_immutable *immutable) 304 { 305 struct ib_port_attr attr; 306 int err; 307 308 immutable->core_cap_flags = RDMA_CORE_PORT_USNIC; 309 310 err = ib_query_port(ibdev, port_num, &attr); 311 if (err) 312 return err; 313 314 immutable->pkey_tbl_len = attr.pkey_tbl_len; 315 immutable->gid_tbl_len = attr.gid_tbl_len; 316 317 return 0; 318 } 319 320 static void usnic_get_dev_fw_str(struct ib_device *device, char *str) 321 { 322 struct usnic_ib_dev *us_ibdev = 323 container_of(device, struct usnic_ib_dev, ib_dev); 324 struct ethtool_drvinfo info; 325 326 mutex_lock(&us_ibdev->usdev_lock); 327 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 328 mutex_unlock(&us_ibdev->usdev_lock); 329 330 snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version); 331 } 332 333 static const struct ib_device_ops usnic_dev_ops = { 334 .alloc_pd = usnic_ib_alloc_pd, 335 .alloc_ucontext = usnic_ib_alloc_ucontext, 336 .create_ah = usnic_ib_create_ah, 337 .create_cq = usnic_ib_create_cq, 338 .create_qp = usnic_ib_create_qp, 339 .dealloc_pd = usnic_ib_dealloc_pd, 340 .dealloc_ucontext = usnic_ib_dealloc_ucontext, 341 .dereg_mr = usnic_ib_dereg_mr, 342 .destroy_ah = usnic_ib_destroy_ah, 343 .destroy_cq = usnic_ib_destroy_cq, 344 .destroy_qp = usnic_ib_destroy_qp, 345 .get_dev_fw_str = usnic_get_dev_fw_str, 346 .get_dma_mr = usnic_ib_get_dma_mr, 347 .get_link_layer = usnic_ib_port_link_layer, 348 .get_netdev = usnic_get_netdev, 349 .get_port_immutable = usnic_port_immutable, 350 .mmap = usnic_ib_mmap, 351 .modify_qp = usnic_ib_modify_qp, 352 .poll_cq = usnic_ib_poll_cq, 353 .post_recv = usnic_ib_post_recv, 354 .post_send = usnic_ib_post_send, 355 .query_device = usnic_ib_query_device, 356 .query_gid = usnic_ib_query_gid, 357 .query_pkey = usnic_ib_query_pkey, 358 .query_port = usnic_ib_query_port, 359 .query_qp = usnic_ib_query_qp, 360 .reg_user_mr = usnic_ib_reg_mr, 361 .req_notify_cq = usnic_ib_req_notify_cq, 362 }; 363 364 /* Start of PF discovery section */ 365 static void *usnic_ib_device_add(struct pci_dev *dev) 366 { 367 struct usnic_ib_dev *us_ibdev; 368 union ib_gid gid; 369 struct in_device *ind; 370 struct net_device *netdev; 371 372 usnic_dbg("\n"); 373 netdev = pci_get_drvdata(dev); 374 375 us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev)); 376 if (!us_ibdev) { 377 usnic_err("Device %s context alloc failed\n", 378 netdev_name(pci_get_drvdata(dev))); 379 return ERR_PTR(-EFAULT); 380 } 381 382 us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); 383 if (!us_ibdev->ufdev) { 384 usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev)); 385 goto err_dealloc; 386 } 387 388 mutex_init(&us_ibdev->usdev_lock); 389 INIT_LIST_HEAD(&us_ibdev->vf_dev_list); 390 INIT_LIST_HEAD(&us_ibdev->ctx_list); 391 392 us_ibdev->pdev = dev; 393 us_ibdev->netdev = pci_get_drvdata(dev); 394 us_ibdev->ib_dev.owner = THIS_MODULE; 395 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; 396 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; 397 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; 398 us_ibdev->ib_dev.dev.parent = &dev->dev; 399 us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION; 400 401 us_ibdev->ib_dev.uverbs_cmd_mask = 402 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 403 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 404 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 405 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 406 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 407 (1ull << IB_USER_VERBS_CMD_REG_MR) | 408 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 409 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 410 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 411 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 412 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 413 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 414 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 415 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 416 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 417 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 418 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 419 420 ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops); 421 422 us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC; 423 rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group); 424 425 if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", NULL)) 426 goto err_fwd_dealloc; 427 428 usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu); 429 usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr); 430 if (netif_carrier_ok(us_ibdev->netdev)) 431 usnic_fwd_carrier_up(us_ibdev->ufdev); 432 433 ind = in_dev_get(netdev); 434 if (ind->ifa_list) 435 usnic_fwd_add_ipaddr(us_ibdev->ufdev, 436 ind->ifa_list->ifa_address); 437 in_dev_put(ind); 438 439 usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, 440 us_ibdev->ufdev->inaddr, &gid.raw[0]); 441 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, 442 sizeof(gid.global.interface_id)); 443 kref_init(&us_ibdev->vf_cnt); 444 445 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n", 446 dev_name(&us_ibdev->ib_dev.dev), 447 netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac, 448 us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu); 449 return us_ibdev; 450 451 err_fwd_dealloc: 452 usnic_fwd_dev_free(us_ibdev->ufdev); 453 err_dealloc: 454 usnic_err("failed -- deallocing device\n"); 455 ib_dealloc_device(&us_ibdev->ib_dev); 456 return NULL; 457 } 458 459 static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev) 460 { 461 usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev)); 462 usnic_ib_sysfs_unregister_usdev(us_ibdev); 463 usnic_fwd_dev_free(us_ibdev->ufdev); 464 ib_unregister_device(&us_ibdev->ib_dev); 465 ib_dealloc_device(&us_ibdev->ib_dev); 466 } 467 468 static void usnic_ib_undiscover_pf(struct kref *kref) 469 { 470 struct usnic_ib_dev *us_ibdev, *tmp; 471 struct pci_dev *dev; 472 bool found = false; 473 474 dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev; 475 mutex_lock(&usnic_ib_ibdev_list_lock); 476 list_for_each_entry_safe(us_ibdev, tmp, 477 &usnic_ib_ibdev_list, ib_dev_link) { 478 if (us_ibdev->pdev == dev) { 479 list_del(&us_ibdev->ib_dev_link); 480 usnic_ib_device_remove(us_ibdev); 481 found = true; 482 break; 483 } 484 } 485 486 WARN(!found, "Failed to remove PF %s\n", pci_name(dev)); 487 488 mutex_unlock(&usnic_ib_ibdev_list_lock); 489 } 490 491 static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) 492 { 493 struct usnic_ib_dev *us_ibdev; 494 struct pci_dev *parent_pci, *vf_pci; 495 int err; 496 497 vf_pci = usnic_vnic_get_pdev(vnic); 498 parent_pci = pci_physfn(vf_pci); 499 500 BUG_ON(!parent_pci); 501 502 mutex_lock(&usnic_ib_ibdev_list_lock); 503 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 504 if (us_ibdev->pdev == parent_pci) { 505 kref_get(&us_ibdev->vf_cnt); 506 goto out; 507 } 508 } 509 510 us_ibdev = usnic_ib_device_add(parent_pci); 511 if (IS_ERR_OR_NULL(us_ibdev)) { 512 us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); 513 goto out; 514 } 515 516 err = usnic_ib_sysfs_register_usdev(us_ibdev); 517 if (err) { 518 usnic_ib_device_remove(us_ibdev); 519 us_ibdev = ERR_PTR(err); 520 goto out; 521 } 522 523 list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list); 524 out: 525 mutex_unlock(&usnic_ib_ibdev_list_lock); 526 return us_ibdev; 527 } 528 /* End of PF discovery section */ 529 530 /* Start of PCI section */ 531 532 static const struct pci_device_id usnic_ib_pci_ids[] = { 533 {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)}, 534 {0,} 535 }; 536 537 static int usnic_ib_pci_probe(struct pci_dev *pdev, 538 const struct pci_device_id *id) 539 { 540 int err; 541 struct usnic_ib_dev *pf; 542 struct usnic_ib_vf *vf; 543 enum usnic_vnic_res_type res_type; 544 545 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 546 if (!vf) 547 return -ENOMEM; 548 549 err = pci_enable_device(pdev); 550 if (err) { 551 usnic_err("Failed to enable %s with err %d\n", 552 pci_name(pdev), err); 553 goto out_clean_vf; 554 } 555 556 err = pci_request_regions(pdev, DRV_NAME); 557 if (err) { 558 usnic_err("Failed to request region for %s with err %d\n", 559 pci_name(pdev), err); 560 goto out_disable_device; 561 } 562 563 pci_set_master(pdev); 564 pci_set_drvdata(pdev, vf); 565 566 vf->vnic = usnic_vnic_alloc(pdev); 567 if (IS_ERR_OR_NULL(vf->vnic)) { 568 err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM; 569 usnic_err("Failed to alloc vnic for %s with err %d\n", 570 pci_name(pdev), err); 571 goto out_release_regions; 572 } 573 574 pf = usnic_ib_discover_pf(vf->vnic); 575 if (IS_ERR_OR_NULL(pf)) { 576 usnic_err("Failed to discover pf of vnic %s with err%ld\n", 577 pci_name(pdev), PTR_ERR(pf)); 578 err = pf ? PTR_ERR(pf) : -EFAULT; 579 goto out_clean_vnic; 580 } 581 582 vf->pf = pf; 583 spin_lock_init(&vf->lock); 584 mutex_lock(&pf->usdev_lock); 585 list_add_tail(&vf->link, &pf->vf_dev_list); 586 /* 587 * Save max settings (will be same for each VF, easier to re-write than 588 * to say "if (!set) { set_values(); set=1; } 589 */ 590 for (res_type = USNIC_VNIC_RES_TYPE_EOL+1; 591 res_type < USNIC_VNIC_RES_TYPE_MAX; 592 res_type++) { 593 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic, 594 res_type); 595 } 596 597 mutex_unlock(&pf->usdev_lock); 598 599 usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev), 600 dev_name(&pf->ib_dev.dev)); 601 usnic_ib_log_vf(vf); 602 return 0; 603 604 out_clean_vnic: 605 usnic_vnic_free(vf->vnic); 606 out_release_regions: 607 pci_set_drvdata(pdev, NULL); 608 pci_clear_master(pdev); 609 pci_release_regions(pdev); 610 out_disable_device: 611 pci_disable_device(pdev); 612 out_clean_vf: 613 kfree(vf); 614 return err; 615 } 616 617 static void usnic_ib_pci_remove(struct pci_dev *pdev) 618 { 619 struct usnic_ib_vf *vf = pci_get_drvdata(pdev); 620 struct usnic_ib_dev *pf = vf->pf; 621 622 mutex_lock(&pf->usdev_lock); 623 list_del(&vf->link); 624 mutex_unlock(&pf->usdev_lock); 625 626 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); 627 usnic_vnic_free(vf->vnic); 628 pci_set_drvdata(pdev, NULL); 629 pci_clear_master(pdev); 630 pci_release_regions(pdev); 631 pci_disable_device(pdev); 632 kfree(vf); 633 634 usnic_info("Removed VF %s\n", pci_name(pdev)); 635 } 636 637 /* PCI driver entry points */ 638 static struct pci_driver usnic_ib_pci_driver = { 639 .name = DRV_NAME, 640 .id_table = usnic_ib_pci_ids, 641 .probe = usnic_ib_pci_probe, 642 .remove = usnic_ib_pci_remove, 643 }; 644 /* End of PCI section */ 645 646 /* Start of module section */ 647 static int __init usnic_ib_init(void) 648 { 649 int err; 650 651 printk_once(KERN_INFO "%s", usnic_version); 652 653 err = usnic_uiom_init(DRV_NAME); 654 if (err) { 655 usnic_err("Unable to initialize umem with err %d\n", err); 656 return err; 657 } 658 659 err = pci_register_driver(&usnic_ib_pci_driver); 660 if (err) { 661 usnic_err("Unable to register with PCI\n"); 662 goto out_umem_fini; 663 } 664 665 err = register_netdevice_notifier(&usnic_ib_netdevice_notifier); 666 if (err) { 667 usnic_err("Failed to register netdev notifier\n"); 668 goto out_pci_unreg; 669 } 670 671 err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 672 if (err) { 673 usnic_err("Failed to register inet addr notifier\n"); 674 goto out_unreg_netdev_notifier; 675 } 676 677 err = usnic_transport_init(); 678 if (err) { 679 usnic_err("Failed to initialize transport\n"); 680 goto out_unreg_inetaddr_notifier; 681 } 682 683 usnic_debugfs_init(); 684 685 return 0; 686 687 out_unreg_inetaddr_notifier: 688 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 689 out_unreg_netdev_notifier: 690 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 691 out_pci_unreg: 692 pci_unregister_driver(&usnic_ib_pci_driver); 693 out_umem_fini: 694 usnic_uiom_fini(); 695 696 return err; 697 } 698 699 static void __exit usnic_ib_destroy(void) 700 { 701 usnic_dbg("\n"); 702 usnic_debugfs_exit(); 703 usnic_transport_fini(); 704 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 705 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 706 pci_unregister_driver(&usnic_ib_pci_driver); 707 usnic_uiom_fini(); 708 } 709 710 MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); 711 MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>"); 712 MODULE_LICENSE("Dual BSD/GPL"); 713 module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR); 714 module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR); 715 MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3"); 716 MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs"); 717 MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids); 718 719 module_init(usnic_ib_init); 720 module_exit(usnic_ib_destroy); 721 /* End of module section */ 722