1 /* 2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 * 17 * Author: Upinder Malhi <umalhi@cisco.com> 18 * Author: Anant Deepak <anadeepa@cisco.com> 19 * Author: Cesare Cantu' <cantuc@cisco.com> 20 * Author: Jeff Squyres <jsquyres@cisco.com> 21 * Author: Kiran Thirumalai <kithirum@cisco.com> 22 * Author: Xuyang Wang <xuywang@cisco.com> 23 * Author: Reese Faucette <rfaucett@cisco.com> 24 * 25 */ 26 27 #include <linux/module.h> 28 #include <linux/inetdevice.h> 29 #include <linux/init.h> 30 #include <linux/slab.h> 31 #include <linux/errno.h> 32 #include <linux/pci.h> 33 #include <linux/netdevice.h> 34 35 #include <rdma/ib_user_verbs.h> 36 #include <rdma/ib_addr.h> 37 38 #include "usnic_abi.h" 39 #include "usnic_common_util.h" 40 #include "usnic_ib.h" 41 #include "usnic_ib_qp_grp.h" 42 #include "usnic_log.h" 43 #include "usnic_fwd.h" 44 #include "usnic_debugfs.h" 45 #include "usnic_ib_verbs.h" 46 #include "usnic_transport.h" 47 #include "usnic_uiom.h" 48 #include "usnic_ib_sysfs.h" 49 50 unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR; 51 unsigned int usnic_ib_share_vf = 1; 52 53 static const char usnic_version[] = 54 DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v" 55 DRV_VERSION " (" DRV_RELDATE ")\n"; 56 57 static DEFINE_MUTEX(usnic_ib_ibdev_list_lock); 58 static LIST_HEAD(usnic_ib_ibdev_list); 59 60 /* Callback dump funcs */ 61 static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz) 62 { 63 struct usnic_ib_vf *vf = obj; 64 return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name); 65 } 66 /* End callback dump funcs */ 67 68 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) 69 { 70 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf, 71 usnic_ib_dump_vf_hdr, 72 usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows); 73 } 74 75 void usnic_ib_log_vf(struct usnic_ib_vf *vf) 76 { 77 char buf[1000]; 78 usnic_ib_dump_vf(vf, buf, sizeof(buf)); 79 usnic_dbg("%s\n", buf); 80 } 81 82 /* Start of netdev section */ 83 static inline const char *usnic_ib_netdev_event_to_string(unsigned long event) 84 { 85 const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN", 86 "NETDEV_REBOOT", "NETDEV_CHANGE", 87 "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU", 88 "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE", 89 "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP", 90 "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE", 91 "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE", 92 "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN" 93 }; 94 95 if (event >= ARRAY_SIZE(event2str)) 96 return "UNKNOWN_NETDEV_EVENT"; 97 else 98 return event2str[event]; 99 } 100 101 static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) 102 { 103 struct usnic_ib_ucontext *ctx; 104 struct usnic_ib_qp_grp *qp_grp; 105 enum ib_qp_state cur_state; 106 int status; 107 108 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); 109 110 list_for_each_entry(ctx, &us_ibdev->ctx_list, link) { 111 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) { 112 cur_state = qp_grp->state; 113 if (cur_state == IB_QPS_INIT || 114 cur_state == IB_QPS_RTR || 115 cur_state == IB_QPS_RTS) { 116 status = usnic_ib_qp_grp_modify(qp_grp, 117 IB_QPS_ERR, 118 NULL); 119 if (status) { 120 usnic_err("Failed to transistion qp grp %u from %s to %s\n", 121 qp_grp->grp_id, 122 usnic_ib_qp_grp_state_to_string 123 (cur_state), 124 usnic_ib_qp_grp_state_to_string 125 (IB_QPS_ERR)); 126 } 127 } 128 } 129 } 130 } 131 132 static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev, 133 unsigned long event) 134 { 135 struct net_device *netdev; 136 struct ib_event ib_event; 137 138 memset(&ib_event, 0, sizeof(ib_event)); 139 140 mutex_lock(&us_ibdev->usdev_lock); 141 netdev = us_ibdev->netdev; 142 switch (event) { 143 case NETDEV_REBOOT: 144 usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name); 145 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 146 ib_event.event = IB_EVENT_PORT_ERR; 147 ib_event.device = &us_ibdev->ib_dev; 148 ib_event.element.port_num = 1; 149 ib_dispatch_event(&ib_event); 150 break; 151 case NETDEV_UP: 152 case NETDEV_DOWN: 153 case NETDEV_CHANGE: 154 if (!us_ibdev->ufdev->link_up && 155 netif_carrier_ok(netdev)) { 156 usnic_fwd_carrier_up(us_ibdev->ufdev); 157 usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name); 158 ib_event.event = IB_EVENT_PORT_ACTIVE; 159 ib_event.device = &us_ibdev->ib_dev; 160 ib_event.element.port_num = 1; 161 ib_dispatch_event(&ib_event); 162 } else if (us_ibdev->ufdev->link_up && 163 !netif_carrier_ok(netdev)) { 164 usnic_fwd_carrier_down(us_ibdev->ufdev); 165 usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name); 166 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 167 ib_event.event = IB_EVENT_PORT_ERR; 168 ib_event.device = &us_ibdev->ib_dev; 169 ib_event.element.port_num = 1; 170 ib_dispatch_event(&ib_event); 171 } else { 172 usnic_dbg("Ignoring %s on %s\n", 173 usnic_ib_netdev_event_to_string(event), 174 us_ibdev->ib_dev.name); 175 } 176 break; 177 case NETDEV_CHANGEADDR: 178 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr, 179 sizeof(us_ibdev->ufdev->mac))) { 180 usnic_dbg("Ignoring addr change on %s\n", 181 us_ibdev->ib_dev.name); 182 } else { 183 usnic_info(" %s old mac: %pM new mac: %pM\n", 184 us_ibdev->ib_dev.name, 185 us_ibdev->ufdev->mac, 186 netdev->dev_addr); 187 usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr); 188 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 189 ib_event.event = IB_EVENT_GID_CHANGE; 190 ib_event.device = &us_ibdev->ib_dev; 191 ib_event.element.port_num = 1; 192 ib_dispatch_event(&ib_event); 193 } 194 195 break; 196 case NETDEV_CHANGEMTU: 197 if (us_ibdev->ufdev->mtu != netdev->mtu) { 198 usnic_info("MTU Change on %s old: %u new: %u\n", 199 us_ibdev->ib_dev.name, 200 us_ibdev->ufdev->mtu, netdev->mtu); 201 usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu); 202 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 203 } else { 204 usnic_dbg("Ignoring MTU change on %s\n", 205 us_ibdev->ib_dev.name); 206 } 207 break; 208 default: 209 usnic_dbg("Ignoring event %s on %s", 210 usnic_ib_netdev_event_to_string(event), 211 us_ibdev->ib_dev.name); 212 } 213 mutex_unlock(&us_ibdev->usdev_lock); 214 } 215 216 static int usnic_ib_netdevice_event(struct notifier_block *notifier, 217 unsigned long event, void *ptr) 218 { 219 struct usnic_ib_dev *us_ibdev; 220 221 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 222 223 mutex_lock(&usnic_ib_ibdev_list_lock); 224 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 225 if (us_ibdev->netdev == netdev) { 226 usnic_ib_handle_usdev_event(us_ibdev, event); 227 break; 228 } 229 } 230 mutex_unlock(&usnic_ib_ibdev_list_lock); 231 232 return NOTIFY_DONE; 233 } 234 235 static struct notifier_block usnic_ib_netdevice_notifier = { 236 .notifier_call = usnic_ib_netdevice_event 237 }; 238 /* End of netdev section */ 239 240 /* Start of inet section */ 241 static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev, 242 unsigned long event, void *ptr) 243 { 244 struct in_ifaddr *ifa = ptr; 245 struct ib_event ib_event; 246 247 mutex_lock(&us_ibdev->usdev_lock); 248 249 switch (event) { 250 case NETDEV_DOWN: 251 usnic_info("%s via ip notifiers", 252 usnic_ib_netdev_event_to_string(event)); 253 usnic_fwd_del_ipaddr(us_ibdev->ufdev); 254 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 255 ib_event.event = IB_EVENT_GID_CHANGE; 256 ib_event.device = &us_ibdev->ib_dev; 257 ib_event.element.port_num = 1; 258 ib_dispatch_event(&ib_event); 259 break; 260 case NETDEV_UP: 261 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); 262 usnic_info("%s via ip notifiers: ip %pI4", 263 usnic_ib_netdev_event_to_string(event), 264 &us_ibdev->ufdev->inaddr); 265 ib_event.event = IB_EVENT_GID_CHANGE; 266 ib_event.device = &us_ibdev->ib_dev; 267 ib_event.element.port_num = 1; 268 ib_dispatch_event(&ib_event); 269 break; 270 default: 271 usnic_info("Ignoring event %s on %s", 272 usnic_ib_netdev_event_to_string(event), 273 us_ibdev->ib_dev.name); 274 } 275 mutex_unlock(&us_ibdev->usdev_lock); 276 277 return NOTIFY_DONE; 278 } 279 280 static int usnic_ib_inetaddr_event(struct notifier_block *notifier, 281 unsigned long event, void *ptr) 282 { 283 struct usnic_ib_dev *us_ibdev; 284 struct in_ifaddr *ifa = ptr; 285 struct net_device *netdev = ifa->ifa_dev->dev; 286 287 mutex_lock(&usnic_ib_ibdev_list_lock); 288 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 289 if (us_ibdev->netdev == netdev) { 290 usnic_ib_handle_inet_event(us_ibdev, event, ptr); 291 break; 292 } 293 } 294 mutex_unlock(&usnic_ib_ibdev_list_lock); 295 296 return NOTIFY_DONE; 297 } 298 static struct notifier_block usnic_ib_inetaddr_notifier = { 299 .notifier_call = usnic_ib_inetaddr_event 300 }; 301 /* End of inet section*/ 302 303 static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num, 304 struct ib_port_immutable *immutable) 305 { 306 struct ib_port_attr attr; 307 int err; 308 309 err = usnic_ib_query_port(ibdev, port_num, &attr); 310 if (err) 311 return err; 312 313 immutable->pkey_tbl_len = attr.pkey_tbl_len; 314 immutable->gid_tbl_len = attr.gid_tbl_len; 315 316 return 0; 317 } 318 319 /* Start of PF discovery section */ 320 static void *usnic_ib_device_add(struct pci_dev *dev) 321 { 322 struct usnic_ib_dev *us_ibdev; 323 union ib_gid gid; 324 struct in_ifaddr *in; 325 struct net_device *netdev; 326 327 usnic_dbg("\n"); 328 netdev = pci_get_drvdata(dev); 329 330 us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev)); 331 if (IS_ERR_OR_NULL(us_ibdev)) { 332 usnic_err("Device %s context alloc failed\n", 333 netdev_name(pci_get_drvdata(dev))); 334 return ERR_PTR(us_ibdev ? PTR_ERR(us_ibdev) : -EFAULT); 335 } 336 337 us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); 338 if (IS_ERR_OR_NULL(us_ibdev->ufdev)) { 339 usnic_err("Failed to alloc ufdev for %s with err %ld\n", 340 pci_name(dev), PTR_ERR(us_ibdev->ufdev)); 341 goto err_dealloc; 342 } 343 344 mutex_init(&us_ibdev->usdev_lock); 345 INIT_LIST_HEAD(&us_ibdev->vf_dev_list); 346 INIT_LIST_HEAD(&us_ibdev->ctx_list); 347 348 us_ibdev->pdev = dev; 349 us_ibdev->netdev = pci_get_drvdata(dev); 350 us_ibdev->ib_dev.owner = THIS_MODULE; 351 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; 352 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; 353 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; 354 us_ibdev->ib_dev.dma_device = &dev->dev; 355 us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION; 356 strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX); 357 358 us_ibdev->ib_dev.uverbs_cmd_mask = 359 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 360 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 361 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 362 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 363 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 364 (1ull << IB_USER_VERBS_CMD_REG_MR) | 365 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 366 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 367 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 368 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 369 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 370 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 371 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 372 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 373 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 374 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 375 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 376 377 us_ibdev->ib_dev.query_device = usnic_ib_query_device; 378 us_ibdev->ib_dev.query_port = usnic_ib_query_port; 379 us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey; 380 us_ibdev->ib_dev.query_gid = usnic_ib_query_gid; 381 us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer; 382 us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd; 383 us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd; 384 us_ibdev->ib_dev.create_qp = usnic_ib_create_qp; 385 us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp; 386 us_ibdev->ib_dev.query_qp = usnic_ib_query_qp; 387 us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp; 388 us_ibdev->ib_dev.create_cq = usnic_ib_create_cq; 389 us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq; 390 us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr; 391 us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr; 392 us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext; 393 us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext; 394 us_ibdev->ib_dev.mmap = usnic_ib_mmap; 395 us_ibdev->ib_dev.create_ah = usnic_ib_create_ah; 396 us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah; 397 us_ibdev->ib_dev.post_send = usnic_ib_post_send; 398 us_ibdev->ib_dev.post_recv = usnic_ib_post_recv; 399 us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq; 400 us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq; 401 us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr; 402 us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable; 403 404 405 if (ib_register_device(&us_ibdev->ib_dev, NULL)) 406 goto err_fwd_dealloc; 407 408 usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu); 409 usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr); 410 if (netif_carrier_ok(us_ibdev->netdev)) 411 usnic_fwd_carrier_up(us_ibdev->ufdev); 412 413 in = ((struct in_device *)(netdev->ip_ptr))->ifa_list; 414 if (in != NULL) 415 usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address); 416 417 usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, 418 us_ibdev->ufdev->inaddr, &gid.raw[0]); 419 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, 420 sizeof(gid.global.interface_id)); 421 kref_init(&us_ibdev->vf_cnt); 422 423 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n", 424 us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev), 425 us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up, 426 us_ibdev->ufdev->mtu); 427 return us_ibdev; 428 429 err_fwd_dealloc: 430 usnic_fwd_dev_free(us_ibdev->ufdev); 431 err_dealloc: 432 usnic_err("failed -- deallocing device\n"); 433 ib_dealloc_device(&us_ibdev->ib_dev); 434 return NULL; 435 } 436 437 static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev) 438 { 439 usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name); 440 usnic_ib_sysfs_unregister_usdev(us_ibdev); 441 usnic_fwd_dev_free(us_ibdev->ufdev); 442 ib_unregister_device(&us_ibdev->ib_dev); 443 ib_dealloc_device(&us_ibdev->ib_dev); 444 } 445 446 static void usnic_ib_undiscover_pf(struct kref *kref) 447 { 448 struct usnic_ib_dev *us_ibdev, *tmp; 449 struct pci_dev *dev; 450 bool found = false; 451 452 dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev; 453 mutex_lock(&usnic_ib_ibdev_list_lock); 454 list_for_each_entry_safe(us_ibdev, tmp, 455 &usnic_ib_ibdev_list, ib_dev_link) { 456 if (us_ibdev->pdev == dev) { 457 list_del(&us_ibdev->ib_dev_link); 458 usnic_ib_device_remove(us_ibdev); 459 found = true; 460 break; 461 } 462 } 463 464 WARN(!found, "Failed to remove PF %s\n", pci_name(dev)); 465 466 mutex_unlock(&usnic_ib_ibdev_list_lock); 467 } 468 469 static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) 470 { 471 struct usnic_ib_dev *us_ibdev; 472 struct pci_dev *parent_pci, *vf_pci; 473 int err; 474 475 vf_pci = usnic_vnic_get_pdev(vnic); 476 parent_pci = pci_physfn(vf_pci); 477 478 BUG_ON(!parent_pci); 479 480 mutex_lock(&usnic_ib_ibdev_list_lock); 481 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 482 if (us_ibdev->pdev == parent_pci) { 483 kref_get(&us_ibdev->vf_cnt); 484 goto out; 485 } 486 } 487 488 us_ibdev = usnic_ib_device_add(parent_pci); 489 if (IS_ERR_OR_NULL(us_ibdev)) { 490 us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); 491 goto out; 492 } 493 494 err = usnic_ib_sysfs_register_usdev(us_ibdev); 495 if (err) { 496 usnic_ib_device_remove(us_ibdev); 497 us_ibdev = ERR_PTR(err); 498 goto out; 499 } 500 501 list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list); 502 out: 503 mutex_unlock(&usnic_ib_ibdev_list_lock); 504 return us_ibdev; 505 } 506 /* End of PF discovery section */ 507 508 /* Start of PCI section */ 509 510 static const struct pci_device_id usnic_ib_pci_ids[] = { 511 {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)}, 512 {0,} 513 }; 514 515 static int usnic_ib_pci_probe(struct pci_dev *pdev, 516 const struct pci_device_id *id) 517 { 518 int err; 519 struct usnic_ib_dev *pf; 520 struct usnic_ib_vf *vf; 521 enum usnic_vnic_res_type res_type; 522 523 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 524 if (!vf) 525 return -ENOMEM; 526 527 err = pci_enable_device(pdev); 528 if (err) { 529 usnic_err("Failed to enable %s with err %d\n", 530 pci_name(pdev), err); 531 goto out_clean_vf; 532 } 533 534 err = pci_request_regions(pdev, DRV_NAME); 535 if (err) { 536 usnic_err("Failed to request region for %s with err %d\n", 537 pci_name(pdev), err); 538 goto out_disable_device; 539 } 540 541 pci_set_master(pdev); 542 pci_set_drvdata(pdev, vf); 543 544 vf->vnic = usnic_vnic_alloc(pdev); 545 if (IS_ERR_OR_NULL(vf->vnic)) { 546 err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM; 547 usnic_err("Failed to alloc vnic for %s with err %d\n", 548 pci_name(pdev), err); 549 goto out_release_regions; 550 } 551 552 pf = usnic_ib_discover_pf(vf->vnic); 553 if (IS_ERR_OR_NULL(pf)) { 554 usnic_err("Failed to discover pf of vnic %s with err%ld\n", 555 pci_name(pdev), PTR_ERR(pf)); 556 err = pf ? PTR_ERR(pf) : -EFAULT; 557 goto out_clean_vnic; 558 } 559 560 vf->pf = pf; 561 spin_lock_init(&vf->lock); 562 mutex_lock(&pf->usdev_lock); 563 list_add_tail(&vf->link, &pf->vf_dev_list); 564 /* 565 * Save max settings (will be same for each VF, easier to re-write than 566 * to say "if (!set) { set_values(); set=1; } 567 */ 568 for (res_type = USNIC_VNIC_RES_TYPE_EOL+1; 569 res_type < USNIC_VNIC_RES_TYPE_MAX; 570 res_type++) { 571 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic, 572 res_type); 573 } 574 575 mutex_unlock(&pf->usdev_lock); 576 577 usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev), 578 pf->ib_dev.name); 579 usnic_ib_log_vf(vf); 580 return 0; 581 582 out_clean_vnic: 583 usnic_vnic_free(vf->vnic); 584 out_release_regions: 585 pci_set_drvdata(pdev, NULL); 586 pci_clear_master(pdev); 587 pci_release_regions(pdev); 588 out_disable_device: 589 pci_disable_device(pdev); 590 out_clean_vf: 591 kfree(vf); 592 return err; 593 } 594 595 static void usnic_ib_pci_remove(struct pci_dev *pdev) 596 { 597 struct usnic_ib_vf *vf = pci_get_drvdata(pdev); 598 struct usnic_ib_dev *pf = vf->pf; 599 600 mutex_lock(&pf->usdev_lock); 601 list_del(&vf->link); 602 mutex_unlock(&pf->usdev_lock); 603 604 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); 605 usnic_vnic_free(vf->vnic); 606 pci_set_drvdata(pdev, NULL); 607 pci_clear_master(pdev); 608 pci_release_regions(pdev); 609 pci_disable_device(pdev); 610 kfree(vf); 611 612 usnic_info("Removed VF %s\n", pci_name(pdev)); 613 } 614 615 /* PCI driver entry points */ 616 static struct pci_driver usnic_ib_pci_driver = { 617 .name = DRV_NAME, 618 .id_table = usnic_ib_pci_ids, 619 .probe = usnic_ib_pci_probe, 620 .remove = usnic_ib_pci_remove, 621 }; 622 /* End of PCI section */ 623 624 /* Start of module section */ 625 static int __init usnic_ib_init(void) 626 { 627 int err; 628 629 printk_once(KERN_INFO "%s", usnic_version); 630 631 err = usnic_uiom_init(DRV_NAME); 632 if (err) { 633 usnic_err("Unable to initalize umem with err %d\n", err); 634 return err; 635 } 636 637 if (pci_register_driver(&usnic_ib_pci_driver)) { 638 usnic_err("Unable to register with PCI\n"); 639 goto out_umem_fini; 640 } 641 642 err = register_netdevice_notifier(&usnic_ib_netdevice_notifier); 643 if (err) { 644 usnic_err("Failed to register netdev notifier\n"); 645 goto out_pci_unreg; 646 } 647 648 err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 649 if (err) { 650 usnic_err("Failed to register inet addr notifier\n"); 651 goto out_unreg_netdev_notifier; 652 } 653 654 err = usnic_transport_init(); 655 if (err) { 656 usnic_err("Failed to initialize transport\n"); 657 goto out_unreg_inetaddr_notifier; 658 } 659 660 usnic_debugfs_init(); 661 662 return 0; 663 664 out_unreg_inetaddr_notifier: 665 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 666 out_unreg_netdev_notifier: 667 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 668 out_pci_unreg: 669 pci_unregister_driver(&usnic_ib_pci_driver); 670 out_umem_fini: 671 usnic_uiom_fini(); 672 673 return err; 674 } 675 676 static void __exit usnic_ib_destroy(void) 677 { 678 usnic_dbg("\n"); 679 usnic_debugfs_exit(); 680 usnic_transport_fini(); 681 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 682 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 683 pci_unregister_driver(&usnic_ib_pci_driver); 684 usnic_uiom_fini(); 685 } 686 687 MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); 688 MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>"); 689 MODULE_LICENSE("Dual BSD/GPL"); 690 MODULE_VERSION(DRV_VERSION); 691 module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR); 692 module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR); 693 MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3"); 694 MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs"); 695 MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids); 696 697 module_init(usnic_ib_init); 698 module_exit(usnic_ib_destroy); 699 /* End of module section */ 700