xref: /openbmc/linux/drivers/infiniband/hw/usnic/usnic_ib_main.c (revision fed8b7e366e7c8f81e957ef91aa8f0a38e038c66)
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * Author: Upinder Malhi <umalhi@cisco.com>
33  * Author: Anant Deepak <anadeepa@cisco.com>
34  * Author: Cesare Cantu' <cantuc@cisco.com>
35  * Author: Jeff Squyres <jsquyres@cisco.com>
36  * Author: Kiran Thirumalai <kithirum@cisco.com>
37  * Author: Xuyang Wang <xuywang@cisco.com>
38  * Author: Reese Faucette <rfaucett@cisco.com>
39  *
40  */
41 
42 #include <linux/module.h>
43 #include <linux/inetdevice.h>
44 #include <linux/init.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/pci.h>
48 #include <linux/netdevice.h>
49 
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 
53 #include "usnic_abi.h"
54 #include "usnic_common_util.h"
55 #include "usnic_ib.h"
56 #include "usnic_ib_qp_grp.h"
57 #include "usnic_log.h"
58 #include "usnic_fwd.h"
59 #include "usnic_debugfs.h"
60 #include "usnic_ib_verbs.h"
61 #include "usnic_transport.h"
62 #include "usnic_uiom.h"
63 #include "usnic_ib_sysfs.h"
64 
65 unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
66 unsigned int usnic_ib_share_vf = 1;
67 
68 static const char usnic_version[] =
69 	DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
70 	DRV_VERSION " (" DRV_RELDATE ")\n";
71 
72 static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
73 static LIST_HEAD(usnic_ib_ibdev_list);
74 
75 /* Callback dump funcs */
76 static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
77 {
78 	struct usnic_ib_vf *vf = obj;
79 	return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
80 }
81 /* End callback dump funcs */
82 
83 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
84 {
85 	usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
86 			usnic_ib_dump_vf_hdr,
87 			usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
88 }
89 
90 void usnic_ib_log_vf(struct usnic_ib_vf *vf)
91 {
92 	char buf[1000];
93 	usnic_ib_dump_vf(vf, buf, sizeof(buf));
94 	usnic_dbg("%s\n", buf);
95 }
96 
97 /* Start of netdev section */
98 static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
99 {
100 	struct usnic_ib_ucontext *ctx;
101 	struct usnic_ib_qp_grp *qp_grp;
102 	enum ib_qp_state cur_state;
103 	int status;
104 
105 	BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
106 
107 	list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
108 		list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
109 			cur_state = qp_grp->state;
110 			if (cur_state == IB_QPS_INIT ||
111 				cur_state == IB_QPS_RTR ||
112 				cur_state == IB_QPS_RTS) {
113 				status = usnic_ib_qp_grp_modify(qp_grp,
114 								IB_QPS_ERR,
115 								NULL);
116 				if (status) {
117 					usnic_err("Failed to transistion qp grp %u from %s to %s\n",
118 						qp_grp->grp_id,
119 						usnic_ib_qp_grp_state_to_string
120 						(cur_state),
121 						usnic_ib_qp_grp_state_to_string
122 						(IB_QPS_ERR));
123 				}
124 			}
125 		}
126 	}
127 }
128 
129 static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
130 					unsigned long event)
131 {
132 	struct net_device *netdev;
133 	struct ib_event ib_event;
134 
135 	memset(&ib_event, 0, sizeof(ib_event));
136 
137 	mutex_lock(&us_ibdev->usdev_lock);
138 	netdev = us_ibdev->netdev;
139 	switch (event) {
140 	case NETDEV_REBOOT:
141 		usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
142 		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
143 		ib_event.event = IB_EVENT_PORT_ERR;
144 		ib_event.device = &us_ibdev->ib_dev;
145 		ib_event.element.port_num = 1;
146 		ib_dispatch_event(&ib_event);
147 		break;
148 	case NETDEV_UP:
149 	case NETDEV_DOWN:
150 	case NETDEV_CHANGE:
151 		if (!us_ibdev->ufdev->link_up &&
152 				netif_carrier_ok(netdev)) {
153 			usnic_fwd_carrier_up(us_ibdev->ufdev);
154 			usnic_info("Link UP on %s\n",
155 				   dev_name(&us_ibdev->ib_dev.dev));
156 			ib_event.event = IB_EVENT_PORT_ACTIVE;
157 			ib_event.device = &us_ibdev->ib_dev;
158 			ib_event.element.port_num = 1;
159 			ib_dispatch_event(&ib_event);
160 		} else if (us_ibdev->ufdev->link_up &&
161 				!netif_carrier_ok(netdev)) {
162 			usnic_fwd_carrier_down(us_ibdev->ufdev);
163 			usnic_info("Link DOWN on %s\n",
164 				   dev_name(&us_ibdev->ib_dev.dev));
165 			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
166 			ib_event.event = IB_EVENT_PORT_ERR;
167 			ib_event.device = &us_ibdev->ib_dev;
168 			ib_event.element.port_num = 1;
169 			ib_dispatch_event(&ib_event);
170 		} else {
171 			usnic_dbg("Ignoring %s on %s\n",
172 					netdev_cmd_to_name(event),
173 					dev_name(&us_ibdev->ib_dev.dev));
174 		}
175 		break;
176 	case NETDEV_CHANGEADDR:
177 		if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
178 				sizeof(us_ibdev->ufdev->mac))) {
179 			usnic_dbg("Ignoring addr change on %s\n",
180 				  dev_name(&us_ibdev->ib_dev.dev));
181 		} else {
182 			usnic_info(" %s old mac: %pM new mac: %pM\n",
183 					dev_name(&us_ibdev->ib_dev.dev),
184 					us_ibdev->ufdev->mac,
185 					netdev->dev_addr);
186 			usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
187 			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
188 			ib_event.event = IB_EVENT_GID_CHANGE;
189 			ib_event.device = &us_ibdev->ib_dev;
190 			ib_event.element.port_num = 1;
191 			ib_dispatch_event(&ib_event);
192 		}
193 
194 		break;
195 	case NETDEV_CHANGEMTU:
196 		if (us_ibdev->ufdev->mtu != netdev->mtu) {
197 			usnic_info("MTU Change on %s old: %u new: %u\n",
198 					dev_name(&us_ibdev->ib_dev.dev),
199 					us_ibdev->ufdev->mtu, netdev->mtu);
200 			usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
201 			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
202 		} else {
203 			usnic_dbg("Ignoring MTU change on %s\n",
204 				  dev_name(&us_ibdev->ib_dev.dev));
205 		}
206 		break;
207 	default:
208 		usnic_dbg("Ignoring event %s on %s",
209 				netdev_cmd_to_name(event),
210 				dev_name(&us_ibdev->ib_dev.dev));
211 	}
212 	mutex_unlock(&us_ibdev->usdev_lock);
213 }
214 
215 static int usnic_ib_netdevice_event(struct notifier_block *notifier,
216 					unsigned long event, void *ptr)
217 {
218 	struct usnic_ib_dev *us_ibdev;
219 
220 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
221 
222 	mutex_lock(&usnic_ib_ibdev_list_lock);
223 	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
224 		if (us_ibdev->netdev == netdev) {
225 			usnic_ib_handle_usdev_event(us_ibdev, event);
226 			break;
227 		}
228 	}
229 	mutex_unlock(&usnic_ib_ibdev_list_lock);
230 
231 	return NOTIFY_DONE;
232 }
233 
234 static struct notifier_block usnic_ib_netdevice_notifier = {
235 	.notifier_call = usnic_ib_netdevice_event
236 };
237 /* End of netdev section */
238 
239 /* Start of inet section */
240 static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
241 					unsigned long event, void *ptr)
242 {
243 	struct in_ifaddr *ifa = ptr;
244 	struct ib_event ib_event;
245 
246 	mutex_lock(&us_ibdev->usdev_lock);
247 
248 	switch (event) {
249 	case NETDEV_DOWN:
250 		usnic_info("%s via ip notifiers",
251 				netdev_cmd_to_name(event));
252 		usnic_fwd_del_ipaddr(us_ibdev->ufdev);
253 		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
254 		ib_event.event = IB_EVENT_GID_CHANGE;
255 		ib_event.device = &us_ibdev->ib_dev;
256 		ib_event.element.port_num = 1;
257 		ib_dispatch_event(&ib_event);
258 		break;
259 	case NETDEV_UP:
260 		usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
261 		usnic_info("%s via ip notifiers: ip %pI4",
262 				netdev_cmd_to_name(event),
263 				&us_ibdev->ufdev->inaddr);
264 		ib_event.event = IB_EVENT_GID_CHANGE;
265 		ib_event.device = &us_ibdev->ib_dev;
266 		ib_event.element.port_num = 1;
267 		ib_dispatch_event(&ib_event);
268 		break;
269 	default:
270 		usnic_info("Ignoring event %s on %s",
271 				netdev_cmd_to_name(event),
272 				dev_name(&us_ibdev->ib_dev.dev));
273 	}
274 	mutex_unlock(&us_ibdev->usdev_lock);
275 
276 	return NOTIFY_DONE;
277 }
278 
279 static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
280 					unsigned long event, void *ptr)
281 {
282 	struct usnic_ib_dev *us_ibdev;
283 	struct in_ifaddr *ifa = ptr;
284 	struct net_device *netdev = ifa->ifa_dev->dev;
285 
286 	mutex_lock(&usnic_ib_ibdev_list_lock);
287 	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
288 		if (us_ibdev->netdev == netdev) {
289 			usnic_ib_handle_inet_event(us_ibdev, event, ptr);
290 			break;
291 		}
292 	}
293 	mutex_unlock(&usnic_ib_ibdev_list_lock);
294 
295 	return NOTIFY_DONE;
296 }
297 static struct notifier_block usnic_ib_inetaddr_notifier = {
298 	.notifier_call = usnic_ib_inetaddr_event
299 };
300 /* End of inet section*/
301 
302 static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
303 			        struct ib_port_immutable *immutable)
304 {
305 	struct ib_port_attr attr;
306 	int err;
307 
308 	immutable->core_cap_flags = RDMA_CORE_PORT_USNIC;
309 
310 	err = ib_query_port(ibdev, port_num, &attr);
311 	if (err)
312 		return err;
313 
314 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
315 	immutable->gid_tbl_len = attr.gid_tbl_len;
316 
317 	return 0;
318 }
319 
320 static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
321 {
322 	struct usnic_ib_dev *us_ibdev =
323 		container_of(device, struct usnic_ib_dev, ib_dev);
324 	struct ethtool_drvinfo info;
325 
326 	mutex_lock(&us_ibdev->usdev_lock);
327 	us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
328 	mutex_unlock(&us_ibdev->usdev_lock);
329 
330 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
331 }
332 
333 /* Start of PF discovery section */
334 static void *usnic_ib_device_add(struct pci_dev *dev)
335 {
336 	struct usnic_ib_dev *us_ibdev;
337 	union ib_gid gid;
338 	struct in_device *ind;
339 	struct net_device *netdev;
340 
341 	usnic_dbg("\n");
342 	netdev = pci_get_drvdata(dev);
343 
344 	us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
345 	if (!us_ibdev) {
346 		usnic_err("Device %s context alloc failed\n",
347 				netdev_name(pci_get_drvdata(dev)));
348 		return ERR_PTR(-EFAULT);
349 	}
350 
351 	us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
352 	if (!us_ibdev->ufdev) {
353 		usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
354 		goto err_dealloc;
355 	}
356 
357 	mutex_init(&us_ibdev->usdev_lock);
358 	INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
359 	INIT_LIST_HEAD(&us_ibdev->ctx_list);
360 
361 	us_ibdev->pdev = dev;
362 	us_ibdev->netdev = pci_get_drvdata(dev);
363 	us_ibdev->ib_dev.owner = THIS_MODULE;
364 	us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
365 	us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
366 	us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
367 	us_ibdev->ib_dev.dev.parent = &dev->dev;
368 	us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
369 
370 	us_ibdev->ib_dev.uverbs_cmd_mask =
371 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
372 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
373 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
374 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
375 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
376 		(1ull << IB_USER_VERBS_CMD_REG_MR) |
377 		(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
378 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
379 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
380 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
381 		(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
382 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
383 		(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
384 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
385 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
386 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
387 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
388 
389 	us_ibdev->ib_dev.query_device = usnic_ib_query_device;
390 	us_ibdev->ib_dev.query_port = usnic_ib_query_port;
391 	us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
392 	us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
393 	us_ibdev->ib_dev.get_netdev = usnic_get_netdev;
394 	us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
395 	us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
396 	us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
397 	us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
398 	us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
399 	us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
400 	us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
401 	us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
402 	us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
403 	us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
404 	us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
405 	us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
406 	us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
407 	us_ibdev->ib_dev.mmap = usnic_ib_mmap;
408 	us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
409 	us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
410 	us_ibdev->ib_dev.post_send = usnic_ib_post_send;
411 	us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
412 	us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
413 	us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
414 	us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
415 	us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
416 	us_ibdev->ib_dev.get_dev_fw_str     = usnic_get_dev_fw_str;
417 
418 
419 	us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC;
420 	rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
421 
422 	if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", NULL))
423 		goto err_fwd_dealloc;
424 
425 	usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
426 	usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
427 	if (netif_carrier_ok(us_ibdev->netdev))
428 		usnic_fwd_carrier_up(us_ibdev->ufdev);
429 
430 	ind = in_dev_get(netdev);
431 	if (ind->ifa_list)
432 		usnic_fwd_add_ipaddr(us_ibdev->ufdev,
433 				     ind->ifa_list->ifa_address);
434 	in_dev_put(ind);
435 
436 	usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
437 				us_ibdev->ufdev->inaddr, &gid.raw[0]);
438 	memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
439 		sizeof(gid.global.interface_id));
440 	kref_init(&us_ibdev->vf_cnt);
441 
442 	usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
443 		   dev_name(&us_ibdev->ib_dev.dev),
444 		   netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
445 		   us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
446 	return us_ibdev;
447 
448 err_fwd_dealloc:
449 	usnic_fwd_dev_free(us_ibdev->ufdev);
450 err_dealloc:
451 	usnic_err("failed -- deallocing device\n");
452 	ib_dealloc_device(&us_ibdev->ib_dev);
453 	return NULL;
454 }
455 
456 static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
457 {
458 	usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
459 	usnic_ib_sysfs_unregister_usdev(us_ibdev);
460 	usnic_fwd_dev_free(us_ibdev->ufdev);
461 	ib_unregister_device(&us_ibdev->ib_dev);
462 	ib_dealloc_device(&us_ibdev->ib_dev);
463 }
464 
465 static void usnic_ib_undiscover_pf(struct kref *kref)
466 {
467 	struct usnic_ib_dev *us_ibdev, *tmp;
468 	struct pci_dev *dev;
469 	bool found = false;
470 
471 	dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
472 	mutex_lock(&usnic_ib_ibdev_list_lock);
473 	list_for_each_entry_safe(us_ibdev, tmp,
474 				&usnic_ib_ibdev_list, ib_dev_link) {
475 		if (us_ibdev->pdev == dev) {
476 			list_del(&us_ibdev->ib_dev_link);
477 			usnic_ib_device_remove(us_ibdev);
478 			found = true;
479 			break;
480 		}
481 	}
482 
483 	WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
484 
485 	mutex_unlock(&usnic_ib_ibdev_list_lock);
486 }
487 
488 static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
489 {
490 	struct usnic_ib_dev *us_ibdev;
491 	struct pci_dev *parent_pci, *vf_pci;
492 	int err;
493 
494 	vf_pci = usnic_vnic_get_pdev(vnic);
495 	parent_pci = pci_physfn(vf_pci);
496 
497 	BUG_ON(!parent_pci);
498 
499 	mutex_lock(&usnic_ib_ibdev_list_lock);
500 	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
501 		if (us_ibdev->pdev == parent_pci) {
502 			kref_get(&us_ibdev->vf_cnt);
503 			goto out;
504 		}
505 	}
506 
507 	us_ibdev = usnic_ib_device_add(parent_pci);
508 	if (IS_ERR_OR_NULL(us_ibdev)) {
509 		us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
510 		goto out;
511 	}
512 
513 	err = usnic_ib_sysfs_register_usdev(us_ibdev);
514 	if (err) {
515 		usnic_ib_device_remove(us_ibdev);
516 		us_ibdev = ERR_PTR(err);
517 		goto out;
518 	}
519 
520 	list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
521 out:
522 	mutex_unlock(&usnic_ib_ibdev_list_lock);
523 	return us_ibdev;
524 }
525 /* End of PF discovery section */
526 
527 /* Start of PCI section */
528 
529 static const struct pci_device_id usnic_ib_pci_ids[] = {
530 	{PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
531 	{0,}
532 };
533 
534 static int usnic_ib_pci_probe(struct pci_dev *pdev,
535 				const struct pci_device_id *id)
536 {
537 	int err;
538 	struct usnic_ib_dev *pf;
539 	struct usnic_ib_vf *vf;
540 	enum usnic_vnic_res_type res_type;
541 
542 	vf = kzalloc(sizeof(*vf), GFP_KERNEL);
543 	if (!vf)
544 		return -ENOMEM;
545 
546 	err = pci_enable_device(pdev);
547 	if (err) {
548 		usnic_err("Failed to enable %s with err %d\n",
549 				pci_name(pdev), err);
550 		goto out_clean_vf;
551 	}
552 
553 	err = pci_request_regions(pdev, DRV_NAME);
554 	if (err) {
555 		usnic_err("Failed to request region for %s with err %d\n",
556 				pci_name(pdev), err);
557 		goto out_disable_device;
558 	}
559 
560 	pci_set_master(pdev);
561 	pci_set_drvdata(pdev, vf);
562 
563 	vf->vnic = usnic_vnic_alloc(pdev);
564 	if (IS_ERR_OR_NULL(vf->vnic)) {
565 		err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
566 		usnic_err("Failed to alloc vnic for %s with err %d\n",
567 				pci_name(pdev), err);
568 		goto out_release_regions;
569 	}
570 
571 	pf = usnic_ib_discover_pf(vf->vnic);
572 	if (IS_ERR_OR_NULL(pf)) {
573 		usnic_err("Failed to discover pf of vnic %s with err%ld\n",
574 				pci_name(pdev), PTR_ERR(pf));
575 		err = pf ? PTR_ERR(pf) : -EFAULT;
576 		goto out_clean_vnic;
577 	}
578 
579 	vf->pf = pf;
580 	spin_lock_init(&vf->lock);
581 	mutex_lock(&pf->usdev_lock);
582 	list_add_tail(&vf->link, &pf->vf_dev_list);
583 	/*
584 	 * Save max settings (will be same for each VF, easier to re-write than
585 	 * to say "if (!set) { set_values(); set=1; }
586 	 */
587 	for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
588 			res_type < USNIC_VNIC_RES_TYPE_MAX;
589 			res_type++) {
590 		pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
591 								res_type);
592 	}
593 
594 	mutex_unlock(&pf->usdev_lock);
595 
596 	usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
597 		   dev_name(&pf->ib_dev.dev));
598 	usnic_ib_log_vf(vf);
599 	return 0;
600 
601 out_clean_vnic:
602 	usnic_vnic_free(vf->vnic);
603 out_release_regions:
604 	pci_set_drvdata(pdev, NULL);
605 	pci_clear_master(pdev);
606 	pci_release_regions(pdev);
607 out_disable_device:
608 	pci_disable_device(pdev);
609 out_clean_vf:
610 	kfree(vf);
611 	return err;
612 }
613 
614 static void usnic_ib_pci_remove(struct pci_dev *pdev)
615 {
616 	struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
617 	struct usnic_ib_dev *pf = vf->pf;
618 
619 	mutex_lock(&pf->usdev_lock);
620 	list_del(&vf->link);
621 	mutex_unlock(&pf->usdev_lock);
622 
623 	kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
624 	usnic_vnic_free(vf->vnic);
625 	pci_set_drvdata(pdev, NULL);
626 	pci_clear_master(pdev);
627 	pci_release_regions(pdev);
628 	pci_disable_device(pdev);
629 	kfree(vf);
630 
631 	usnic_info("Removed VF %s\n", pci_name(pdev));
632 }
633 
634 /* PCI driver entry points */
635 static struct pci_driver usnic_ib_pci_driver = {
636 	.name = DRV_NAME,
637 	.id_table = usnic_ib_pci_ids,
638 	.probe = usnic_ib_pci_probe,
639 	.remove = usnic_ib_pci_remove,
640 };
641 /* End of PCI section */
642 
643 /* Start of module section */
644 static int __init usnic_ib_init(void)
645 {
646 	int err;
647 
648 	printk_once(KERN_INFO "%s", usnic_version);
649 
650 	err = usnic_uiom_init(DRV_NAME);
651 	if (err) {
652 		usnic_err("Unable to initalize umem with err %d\n", err);
653 		return err;
654 	}
655 
656 	err = pci_register_driver(&usnic_ib_pci_driver);
657 	if (err) {
658 		usnic_err("Unable to register with PCI\n");
659 		goto out_umem_fini;
660 	}
661 
662 	err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
663 	if (err) {
664 		usnic_err("Failed to register netdev notifier\n");
665 		goto out_pci_unreg;
666 	}
667 
668 	err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
669 	if (err) {
670 		usnic_err("Failed to register inet addr notifier\n");
671 		goto out_unreg_netdev_notifier;
672 	}
673 
674 	err = usnic_transport_init();
675 	if (err) {
676 		usnic_err("Failed to initialize transport\n");
677 		goto out_unreg_inetaddr_notifier;
678 	}
679 
680 	usnic_debugfs_init();
681 
682 	return 0;
683 
684 out_unreg_inetaddr_notifier:
685 	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
686 out_unreg_netdev_notifier:
687 	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
688 out_pci_unreg:
689 	pci_unregister_driver(&usnic_ib_pci_driver);
690 out_umem_fini:
691 	usnic_uiom_fini();
692 
693 	return err;
694 }
695 
696 static void __exit usnic_ib_destroy(void)
697 {
698 	usnic_dbg("\n");
699 	usnic_debugfs_exit();
700 	usnic_transport_fini();
701 	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
702 	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
703 	pci_unregister_driver(&usnic_ib_pci_driver);
704 	usnic_uiom_fini();
705 }
706 
707 MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
708 MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
709 MODULE_LICENSE("Dual BSD/GPL");
710 module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
711 module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
712 MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
713 MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
714 MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
715 
716 module_init(usnic_ib_init);
717 module_exit(usnic_ib_destroy);
718 /* End of module section */
719