1 /* 2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * Author: Upinder Malhi <umalhi@cisco.com> 33 * Author: Anant Deepak <anadeepa@cisco.com> 34 * Author: Cesare Cantu' <cantuc@cisco.com> 35 * Author: Jeff Squyres <jsquyres@cisco.com> 36 * Author: Kiran Thirumalai <kithirum@cisco.com> 37 * Author: Xuyang Wang <xuywang@cisco.com> 38 * Author: Reese Faucette <rfaucett@cisco.com> 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/inetdevice.h> 44 #include <linux/init.h> 45 #include <linux/slab.h> 46 #include <linux/errno.h> 47 #include <linux/pci.h> 48 #include <linux/netdevice.h> 49 50 #include <rdma/ib_user_verbs.h> 51 #include <rdma/ib_addr.h> 52 53 #include "usnic_abi.h" 54 #include "usnic_common_util.h" 55 #include "usnic_ib.h" 56 #include "usnic_ib_qp_grp.h" 57 #include "usnic_log.h" 58 #include "usnic_fwd.h" 59 #include "usnic_debugfs.h" 60 #include "usnic_ib_verbs.h" 61 #include "usnic_transport.h" 62 #include "usnic_uiom.h" 63 #include "usnic_ib_sysfs.h" 64 65 unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR; 66 unsigned int usnic_ib_share_vf = 1; 67 68 static const char usnic_version[] = 69 DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v" 70 DRV_VERSION " (" DRV_RELDATE ")\n"; 71 72 static DEFINE_MUTEX(usnic_ib_ibdev_list_lock); 73 static LIST_HEAD(usnic_ib_ibdev_list); 74 75 /* Callback dump funcs */ 76 static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz) 77 { 78 struct usnic_ib_vf *vf = obj; 79 return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name); 80 } 81 /* End callback dump funcs */ 82 83 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) 84 { 85 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf, 86 usnic_ib_dump_vf_hdr, 87 usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows); 88 } 89 90 void usnic_ib_log_vf(struct usnic_ib_vf *vf) 91 { 92 char buf[1000]; 93 usnic_ib_dump_vf(vf, buf, sizeof(buf)); 94 usnic_dbg("%s\n", buf); 95 } 96 97 /* Start of netdev section */ 98 static inline const char *usnic_ib_netdev_event_to_string(unsigned long event) 99 { 100 const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN", 101 "NETDEV_REBOOT", "NETDEV_CHANGE", 102 "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU", 103 "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE", 104 "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP", 105 "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE", 106 "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE", 107 "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN" 108 }; 109 110 if (event >= ARRAY_SIZE(event2str)) 111 return "UNKNOWN_NETDEV_EVENT"; 112 else 113 return event2str[event]; 114 } 115 116 static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) 117 { 118 struct usnic_ib_ucontext *ctx; 119 struct usnic_ib_qp_grp *qp_grp; 120 enum ib_qp_state cur_state; 121 int status; 122 123 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); 124 125 list_for_each_entry(ctx, &us_ibdev->ctx_list, link) { 126 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) { 127 cur_state = qp_grp->state; 128 if (cur_state == IB_QPS_INIT || 129 cur_state == IB_QPS_RTR || 130 cur_state == IB_QPS_RTS) { 131 status = usnic_ib_qp_grp_modify(qp_grp, 132 IB_QPS_ERR, 133 NULL); 134 if (status) { 135 usnic_err("Failed to transistion qp grp %u from %s to %s\n", 136 qp_grp->grp_id, 137 usnic_ib_qp_grp_state_to_string 138 (cur_state), 139 usnic_ib_qp_grp_state_to_string 140 (IB_QPS_ERR)); 141 } 142 } 143 } 144 } 145 } 146 147 static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev, 148 unsigned long event) 149 { 150 struct net_device *netdev; 151 struct ib_event ib_event; 152 153 memset(&ib_event, 0, sizeof(ib_event)); 154 155 mutex_lock(&us_ibdev->usdev_lock); 156 netdev = us_ibdev->netdev; 157 switch (event) { 158 case NETDEV_REBOOT: 159 usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name); 160 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 161 ib_event.event = IB_EVENT_PORT_ERR; 162 ib_event.device = &us_ibdev->ib_dev; 163 ib_event.element.port_num = 1; 164 ib_dispatch_event(&ib_event); 165 break; 166 case NETDEV_UP: 167 case NETDEV_DOWN: 168 case NETDEV_CHANGE: 169 if (!us_ibdev->ufdev->link_up && 170 netif_carrier_ok(netdev)) { 171 usnic_fwd_carrier_up(us_ibdev->ufdev); 172 usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name); 173 ib_event.event = IB_EVENT_PORT_ACTIVE; 174 ib_event.device = &us_ibdev->ib_dev; 175 ib_event.element.port_num = 1; 176 ib_dispatch_event(&ib_event); 177 } else if (us_ibdev->ufdev->link_up && 178 !netif_carrier_ok(netdev)) { 179 usnic_fwd_carrier_down(us_ibdev->ufdev); 180 usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name); 181 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 182 ib_event.event = IB_EVENT_PORT_ERR; 183 ib_event.device = &us_ibdev->ib_dev; 184 ib_event.element.port_num = 1; 185 ib_dispatch_event(&ib_event); 186 } else { 187 usnic_dbg("Ignoring %s on %s\n", 188 usnic_ib_netdev_event_to_string(event), 189 us_ibdev->ib_dev.name); 190 } 191 break; 192 case NETDEV_CHANGEADDR: 193 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr, 194 sizeof(us_ibdev->ufdev->mac))) { 195 usnic_dbg("Ignoring addr change on %s\n", 196 us_ibdev->ib_dev.name); 197 } else { 198 usnic_info(" %s old mac: %pM new mac: %pM\n", 199 us_ibdev->ib_dev.name, 200 us_ibdev->ufdev->mac, 201 netdev->dev_addr); 202 usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr); 203 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 204 ib_event.event = IB_EVENT_GID_CHANGE; 205 ib_event.device = &us_ibdev->ib_dev; 206 ib_event.element.port_num = 1; 207 ib_dispatch_event(&ib_event); 208 } 209 210 break; 211 case NETDEV_CHANGEMTU: 212 if (us_ibdev->ufdev->mtu != netdev->mtu) { 213 usnic_info("MTU Change on %s old: %u new: %u\n", 214 us_ibdev->ib_dev.name, 215 us_ibdev->ufdev->mtu, netdev->mtu); 216 usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu); 217 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 218 } else { 219 usnic_dbg("Ignoring MTU change on %s\n", 220 us_ibdev->ib_dev.name); 221 } 222 break; 223 default: 224 usnic_dbg("Ignoring event %s on %s", 225 usnic_ib_netdev_event_to_string(event), 226 us_ibdev->ib_dev.name); 227 } 228 mutex_unlock(&us_ibdev->usdev_lock); 229 } 230 231 static int usnic_ib_netdevice_event(struct notifier_block *notifier, 232 unsigned long event, void *ptr) 233 { 234 struct usnic_ib_dev *us_ibdev; 235 236 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 237 238 mutex_lock(&usnic_ib_ibdev_list_lock); 239 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 240 if (us_ibdev->netdev == netdev) { 241 usnic_ib_handle_usdev_event(us_ibdev, event); 242 break; 243 } 244 } 245 mutex_unlock(&usnic_ib_ibdev_list_lock); 246 247 return NOTIFY_DONE; 248 } 249 250 static struct notifier_block usnic_ib_netdevice_notifier = { 251 .notifier_call = usnic_ib_netdevice_event 252 }; 253 /* End of netdev section */ 254 255 /* Start of inet section */ 256 static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev, 257 unsigned long event, void *ptr) 258 { 259 struct in_ifaddr *ifa = ptr; 260 struct ib_event ib_event; 261 262 mutex_lock(&us_ibdev->usdev_lock); 263 264 switch (event) { 265 case NETDEV_DOWN: 266 usnic_info("%s via ip notifiers", 267 usnic_ib_netdev_event_to_string(event)); 268 usnic_fwd_del_ipaddr(us_ibdev->ufdev); 269 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 270 ib_event.event = IB_EVENT_GID_CHANGE; 271 ib_event.device = &us_ibdev->ib_dev; 272 ib_event.element.port_num = 1; 273 ib_dispatch_event(&ib_event); 274 break; 275 case NETDEV_UP: 276 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); 277 usnic_info("%s via ip notifiers: ip %pI4", 278 usnic_ib_netdev_event_to_string(event), 279 &us_ibdev->ufdev->inaddr); 280 ib_event.event = IB_EVENT_GID_CHANGE; 281 ib_event.device = &us_ibdev->ib_dev; 282 ib_event.element.port_num = 1; 283 ib_dispatch_event(&ib_event); 284 break; 285 default: 286 usnic_info("Ignoring event %s on %s", 287 usnic_ib_netdev_event_to_string(event), 288 us_ibdev->ib_dev.name); 289 } 290 mutex_unlock(&us_ibdev->usdev_lock); 291 292 return NOTIFY_DONE; 293 } 294 295 static int usnic_ib_inetaddr_event(struct notifier_block *notifier, 296 unsigned long event, void *ptr) 297 { 298 struct usnic_ib_dev *us_ibdev; 299 struct in_ifaddr *ifa = ptr; 300 struct net_device *netdev = ifa->ifa_dev->dev; 301 302 mutex_lock(&usnic_ib_ibdev_list_lock); 303 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 304 if (us_ibdev->netdev == netdev) { 305 usnic_ib_handle_inet_event(us_ibdev, event, ptr); 306 break; 307 } 308 } 309 mutex_unlock(&usnic_ib_ibdev_list_lock); 310 311 return NOTIFY_DONE; 312 } 313 static struct notifier_block usnic_ib_inetaddr_notifier = { 314 .notifier_call = usnic_ib_inetaddr_event 315 }; 316 /* End of inet section*/ 317 318 static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num, 319 struct ib_port_immutable *immutable) 320 { 321 struct ib_port_attr attr; 322 int err; 323 324 immutable->core_cap_flags = RDMA_CORE_PORT_USNIC; 325 326 err = ib_query_port(ibdev, port_num, &attr); 327 if (err) 328 return err; 329 330 immutable->pkey_tbl_len = attr.pkey_tbl_len; 331 immutable->gid_tbl_len = attr.gid_tbl_len; 332 333 return 0; 334 } 335 336 static void usnic_get_dev_fw_str(struct ib_device *device, 337 char *str, 338 size_t str_len) 339 { 340 struct usnic_ib_dev *us_ibdev = 341 container_of(device, struct usnic_ib_dev, ib_dev); 342 struct ethtool_drvinfo info; 343 344 mutex_lock(&us_ibdev->usdev_lock); 345 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 346 mutex_unlock(&us_ibdev->usdev_lock); 347 348 snprintf(str, str_len, "%s", info.fw_version); 349 } 350 351 /* Start of PF discovery section */ 352 static void *usnic_ib_device_add(struct pci_dev *dev) 353 { 354 struct usnic_ib_dev *us_ibdev; 355 union ib_gid gid; 356 struct in_ifaddr *in; 357 struct net_device *netdev; 358 359 usnic_dbg("\n"); 360 netdev = pci_get_drvdata(dev); 361 362 us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev)); 363 if (!us_ibdev) { 364 usnic_err("Device %s context alloc failed\n", 365 netdev_name(pci_get_drvdata(dev))); 366 return ERR_PTR(-EFAULT); 367 } 368 369 us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); 370 if (!us_ibdev->ufdev) { 371 usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev)); 372 goto err_dealloc; 373 } 374 375 mutex_init(&us_ibdev->usdev_lock); 376 INIT_LIST_HEAD(&us_ibdev->vf_dev_list); 377 INIT_LIST_HEAD(&us_ibdev->ctx_list); 378 379 us_ibdev->pdev = dev; 380 us_ibdev->netdev = pci_get_drvdata(dev); 381 us_ibdev->ib_dev.owner = THIS_MODULE; 382 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; 383 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; 384 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; 385 us_ibdev->ib_dev.dev.parent = &dev->dev; 386 us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION; 387 strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX); 388 389 us_ibdev->ib_dev.uverbs_cmd_mask = 390 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 391 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 392 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 393 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 394 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 395 (1ull << IB_USER_VERBS_CMD_REG_MR) | 396 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 397 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 398 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 399 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 400 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 401 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 402 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 403 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 404 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 405 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 406 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 407 408 us_ibdev->ib_dev.query_device = usnic_ib_query_device; 409 us_ibdev->ib_dev.query_port = usnic_ib_query_port; 410 us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey; 411 us_ibdev->ib_dev.query_gid = usnic_ib_query_gid; 412 us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer; 413 us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd; 414 us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd; 415 us_ibdev->ib_dev.create_qp = usnic_ib_create_qp; 416 us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp; 417 us_ibdev->ib_dev.query_qp = usnic_ib_query_qp; 418 us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp; 419 us_ibdev->ib_dev.create_cq = usnic_ib_create_cq; 420 us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq; 421 us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr; 422 us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr; 423 us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext; 424 us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext; 425 us_ibdev->ib_dev.mmap = usnic_ib_mmap; 426 us_ibdev->ib_dev.create_ah = usnic_ib_create_ah; 427 us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah; 428 us_ibdev->ib_dev.post_send = usnic_ib_post_send; 429 us_ibdev->ib_dev.post_recv = usnic_ib_post_recv; 430 us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq; 431 us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq; 432 us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr; 433 us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable; 434 us_ibdev->ib_dev.get_dev_fw_str = usnic_get_dev_fw_str; 435 436 437 if (ib_register_device(&us_ibdev->ib_dev, NULL)) 438 goto err_fwd_dealloc; 439 440 usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu); 441 usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr); 442 if (netif_carrier_ok(us_ibdev->netdev)) 443 usnic_fwd_carrier_up(us_ibdev->ufdev); 444 445 in = ((struct in_device *)(netdev->ip_ptr))->ifa_list; 446 if (in != NULL) 447 usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address); 448 449 usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, 450 us_ibdev->ufdev->inaddr, &gid.raw[0]); 451 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, 452 sizeof(gid.global.interface_id)); 453 kref_init(&us_ibdev->vf_cnt); 454 455 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n", 456 us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev), 457 us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up, 458 us_ibdev->ufdev->mtu); 459 return us_ibdev; 460 461 err_fwd_dealloc: 462 usnic_fwd_dev_free(us_ibdev->ufdev); 463 err_dealloc: 464 usnic_err("failed -- deallocing device\n"); 465 ib_dealloc_device(&us_ibdev->ib_dev); 466 return NULL; 467 } 468 469 static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev) 470 { 471 usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name); 472 usnic_ib_sysfs_unregister_usdev(us_ibdev); 473 usnic_fwd_dev_free(us_ibdev->ufdev); 474 ib_unregister_device(&us_ibdev->ib_dev); 475 ib_dealloc_device(&us_ibdev->ib_dev); 476 } 477 478 static void usnic_ib_undiscover_pf(struct kref *kref) 479 { 480 struct usnic_ib_dev *us_ibdev, *tmp; 481 struct pci_dev *dev; 482 bool found = false; 483 484 dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev; 485 mutex_lock(&usnic_ib_ibdev_list_lock); 486 list_for_each_entry_safe(us_ibdev, tmp, 487 &usnic_ib_ibdev_list, ib_dev_link) { 488 if (us_ibdev->pdev == dev) { 489 list_del(&us_ibdev->ib_dev_link); 490 usnic_ib_device_remove(us_ibdev); 491 found = true; 492 break; 493 } 494 } 495 496 WARN(!found, "Failed to remove PF %s\n", pci_name(dev)); 497 498 mutex_unlock(&usnic_ib_ibdev_list_lock); 499 } 500 501 static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) 502 { 503 struct usnic_ib_dev *us_ibdev; 504 struct pci_dev *parent_pci, *vf_pci; 505 int err; 506 507 vf_pci = usnic_vnic_get_pdev(vnic); 508 parent_pci = pci_physfn(vf_pci); 509 510 BUG_ON(!parent_pci); 511 512 mutex_lock(&usnic_ib_ibdev_list_lock); 513 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 514 if (us_ibdev->pdev == parent_pci) { 515 kref_get(&us_ibdev->vf_cnt); 516 goto out; 517 } 518 } 519 520 us_ibdev = usnic_ib_device_add(parent_pci); 521 if (IS_ERR_OR_NULL(us_ibdev)) { 522 us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); 523 goto out; 524 } 525 526 err = usnic_ib_sysfs_register_usdev(us_ibdev); 527 if (err) { 528 usnic_ib_device_remove(us_ibdev); 529 us_ibdev = ERR_PTR(err); 530 goto out; 531 } 532 533 list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list); 534 out: 535 mutex_unlock(&usnic_ib_ibdev_list_lock); 536 return us_ibdev; 537 } 538 /* End of PF discovery section */ 539 540 /* Start of PCI section */ 541 542 static const struct pci_device_id usnic_ib_pci_ids[] = { 543 {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)}, 544 {0,} 545 }; 546 547 static int usnic_ib_pci_probe(struct pci_dev *pdev, 548 const struct pci_device_id *id) 549 { 550 int err; 551 struct usnic_ib_dev *pf; 552 struct usnic_ib_vf *vf; 553 enum usnic_vnic_res_type res_type; 554 555 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 556 if (!vf) 557 return -ENOMEM; 558 559 err = pci_enable_device(pdev); 560 if (err) { 561 usnic_err("Failed to enable %s with err %d\n", 562 pci_name(pdev), err); 563 goto out_clean_vf; 564 } 565 566 err = pci_request_regions(pdev, DRV_NAME); 567 if (err) { 568 usnic_err("Failed to request region for %s with err %d\n", 569 pci_name(pdev), err); 570 goto out_disable_device; 571 } 572 573 pci_set_master(pdev); 574 pci_set_drvdata(pdev, vf); 575 576 vf->vnic = usnic_vnic_alloc(pdev); 577 if (IS_ERR_OR_NULL(vf->vnic)) { 578 err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM; 579 usnic_err("Failed to alloc vnic for %s with err %d\n", 580 pci_name(pdev), err); 581 goto out_release_regions; 582 } 583 584 pf = usnic_ib_discover_pf(vf->vnic); 585 if (IS_ERR_OR_NULL(pf)) { 586 usnic_err("Failed to discover pf of vnic %s with err%ld\n", 587 pci_name(pdev), PTR_ERR(pf)); 588 err = pf ? PTR_ERR(pf) : -EFAULT; 589 goto out_clean_vnic; 590 } 591 592 vf->pf = pf; 593 spin_lock_init(&vf->lock); 594 mutex_lock(&pf->usdev_lock); 595 list_add_tail(&vf->link, &pf->vf_dev_list); 596 /* 597 * Save max settings (will be same for each VF, easier to re-write than 598 * to say "if (!set) { set_values(); set=1; } 599 */ 600 for (res_type = USNIC_VNIC_RES_TYPE_EOL+1; 601 res_type < USNIC_VNIC_RES_TYPE_MAX; 602 res_type++) { 603 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic, 604 res_type); 605 } 606 607 mutex_unlock(&pf->usdev_lock); 608 609 usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev), 610 pf->ib_dev.name); 611 usnic_ib_log_vf(vf); 612 return 0; 613 614 out_clean_vnic: 615 usnic_vnic_free(vf->vnic); 616 out_release_regions: 617 pci_set_drvdata(pdev, NULL); 618 pci_clear_master(pdev); 619 pci_release_regions(pdev); 620 out_disable_device: 621 pci_disable_device(pdev); 622 out_clean_vf: 623 kfree(vf); 624 return err; 625 } 626 627 static void usnic_ib_pci_remove(struct pci_dev *pdev) 628 { 629 struct usnic_ib_vf *vf = pci_get_drvdata(pdev); 630 struct usnic_ib_dev *pf = vf->pf; 631 632 mutex_lock(&pf->usdev_lock); 633 list_del(&vf->link); 634 mutex_unlock(&pf->usdev_lock); 635 636 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); 637 usnic_vnic_free(vf->vnic); 638 pci_set_drvdata(pdev, NULL); 639 pci_clear_master(pdev); 640 pci_release_regions(pdev); 641 pci_disable_device(pdev); 642 kfree(vf); 643 644 usnic_info("Removed VF %s\n", pci_name(pdev)); 645 } 646 647 /* PCI driver entry points */ 648 static struct pci_driver usnic_ib_pci_driver = { 649 .name = DRV_NAME, 650 .id_table = usnic_ib_pci_ids, 651 .probe = usnic_ib_pci_probe, 652 .remove = usnic_ib_pci_remove, 653 }; 654 /* End of PCI section */ 655 656 /* Start of module section */ 657 static int __init usnic_ib_init(void) 658 { 659 int err; 660 661 printk_once(KERN_INFO "%s", usnic_version); 662 663 err = usnic_uiom_init(DRV_NAME); 664 if (err) { 665 usnic_err("Unable to initalize umem with err %d\n", err); 666 return err; 667 } 668 669 err = pci_register_driver(&usnic_ib_pci_driver); 670 if (err) { 671 usnic_err("Unable to register with PCI\n"); 672 goto out_umem_fini; 673 } 674 675 err = register_netdevice_notifier(&usnic_ib_netdevice_notifier); 676 if (err) { 677 usnic_err("Failed to register netdev notifier\n"); 678 goto out_pci_unreg; 679 } 680 681 err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 682 if (err) { 683 usnic_err("Failed to register inet addr notifier\n"); 684 goto out_unreg_netdev_notifier; 685 } 686 687 err = usnic_transport_init(); 688 if (err) { 689 usnic_err("Failed to initialize transport\n"); 690 goto out_unreg_inetaddr_notifier; 691 } 692 693 usnic_debugfs_init(); 694 695 return 0; 696 697 out_unreg_inetaddr_notifier: 698 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 699 out_unreg_netdev_notifier: 700 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 701 out_pci_unreg: 702 pci_unregister_driver(&usnic_ib_pci_driver); 703 out_umem_fini: 704 usnic_uiom_fini(); 705 706 return err; 707 } 708 709 static void __exit usnic_ib_destroy(void) 710 { 711 usnic_dbg("\n"); 712 usnic_debugfs_exit(); 713 usnic_transport_fini(); 714 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 715 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 716 pci_unregister_driver(&usnic_ib_pci_driver); 717 usnic_uiom_fini(); 718 } 719 720 MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); 721 MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>"); 722 MODULE_LICENSE("Dual BSD/GPL"); 723 MODULE_VERSION(DRV_VERSION); 724 module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR); 725 module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR); 726 MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3"); 727 MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs"); 728 MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids); 729 730 module_init(usnic_ib_init); 731 module_exit(usnic_ib_destroy); 732 /* End of module section */ 733