1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  *
17  * Author: Upinder Malhi <umalhi@cisco.com>
18  * Author: Anant Deepak <anadeepa@cisco.com>
19  * Author: Cesare Cantu' <cantuc@cisco.com>
20  * Author: Jeff Squyres <jsquyres@cisco.com>
21  * Author: Kiran Thirumalai <kithirum@cisco.com>
22  * Author: Xuyang Wang <xuywang@cisco.com>
23  * Author: Reese Faucette <rfaucett@cisco.com>
24  *
25  */
26 
27 #include <linux/module.h>
28 #include <linux/inetdevice.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/errno.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_addr.h>
37 
38 #include "usnic_abi.h"
39 #include "usnic_common_util.h"
40 #include "usnic_ib.h"
41 #include "usnic_ib_qp_grp.h"
42 #include "usnic_log.h"
43 #include "usnic_fwd.h"
44 #include "usnic_debugfs.h"
45 #include "usnic_ib_verbs.h"
46 #include "usnic_transport.h"
47 #include "usnic_uiom.h"
48 #include "usnic_ib_sysfs.h"
49 
50 unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
51 unsigned int usnic_ib_share_vf = 1;
52 
53 static const char usnic_version[] =
54 	DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
55 	DRV_VERSION " (" DRV_RELDATE ")\n";
56 
57 static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
58 static LIST_HEAD(usnic_ib_ibdev_list);
59 
60 /* Callback dump funcs */
61 static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
62 {
63 	struct usnic_ib_vf *vf = obj;
64 	return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
65 }
66 /* End callback dump funcs */
67 
68 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
69 {
70 	usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
71 			usnic_ib_dump_vf_hdr,
72 			usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
73 }
74 
75 void usnic_ib_log_vf(struct usnic_ib_vf *vf)
76 {
77 	char buf[1000];
78 	usnic_ib_dump_vf(vf, buf, sizeof(buf));
79 	usnic_dbg("%s\n", buf);
80 }
81 
82 /* Start of netdev section */
83 static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
84 {
85 	const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
86 		"NETDEV_REBOOT", "NETDEV_CHANGE",
87 		"NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
88 		"NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
89 		"NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
90 		"NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
91 		"NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
92 		"NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
93 	};
94 
95 	if (event >= ARRAY_SIZE(event2str))
96 		return "UNKNOWN_NETDEV_EVENT";
97 	else
98 		return event2str[event];
99 }
100 
101 static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
102 {
103 	struct usnic_ib_ucontext *ctx;
104 	struct usnic_ib_qp_grp *qp_grp;
105 	enum ib_qp_state cur_state;
106 	int status;
107 
108 	BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
109 
110 	list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
111 		list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
112 			cur_state = qp_grp->state;
113 			if (cur_state == IB_QPS_INIT ||
114 				cur_state == IB_QPS_RTR ||
115 				cur_state == IB_QPS_RTS) {
116 				status = usnic_ib_qp_grp_modify(qp_grp,
117 								IB_QPS_ERR,
118 								NULL);
119 				if (status) {
120 					usnic_err("Failed to transistion qp grp %u from %s to %s\n",
121 						qp_grp->grp_id,
122 						usnic_ib_qp_grp_state_to_string
123 						(cur_state),
124 						usnic_ib_qp_grp_state_to_string
125 						(IB_QPS_ERR));
126 				}
127 			}
128 		}
129 	}
130 }
131 
132 static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
133 					unsigned long event)
134 {
135 	struct net_device *netdev;
136 	struct ib_event ib_event;
137 
138 	memset(&ib_event, 0, sizeof(ib_event));
139 
140 	mutex_lock(&us_ibdev->usdev_lock);
141 	netdev = us_ibdev->netdev;
142 	switch (event) {
143 	case NETDEV_REBOOT:
144 		usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
145 		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
146 		ib_event.event = IB_EVENT_PORT_ERR;
147 		ib_event.device = &us_ibdev->ib_dev;
148 		ib_event.element.port_num = 1;
149 		ib_dispatch_event(&ib_event);
150 		break;
151 	case NETDEV_UP:
152 	case NETDEV_DOWN:
153 	case NETDEV_CHANGE:
154 		if (!us_ibdev->ufdev->link_up &&
155 				netif_carrier_ok(netdev)) {
156 			usnic_fwd_carrier_up(us_ibdev->ufdev);
157 			usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
158 			ib_event.event = IB_EVENT_PORT_ACTIVE;
159 			ib_event.device = &us_ibdev->ib_dev;
160 			ib_event.element.port_num = 1;
161 			ib_dispatch_event(&ib_event);
162 		} else if (us_ibdev->ufdev->link_up &&
163 				!netif_carrier_ok(netdev)) {
164 			usnic_fwd_carrier_down(us_ibdev->ufdev);
165 			usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
166 			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
167 			ib_event.event = IB_EVENT_PORT_ERR;
168 			ib_event.device = &us_ibdev->ib_dev;
169 			ib_event.element.port_num = 1;
170 			ib_dispatch_event(&ib_event);
171 		} else {
172 			usnic_dbg("Ignoring %s on %s\n",
173 					usnic_ib_netdev_event_to_string(event),
174 					us_ibdev->ib_dev.name);
175 		}
176 		break;
177 	case NETDEV_CHANGEADDR:
178 		if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
179 				sizeof(us_ibdev->ufdev->mac))) {
180 			usnic_dbg("Ignoring addr change on %s\n",
181 					us_ibdev->ib_dev.name);
182 		} else {
183 			usnic_info(" %s old mac: %pM new mac: %pM\n",
184 					us_ibdev->ib_dev.name,
185 					us_ibdev->ufdev->mac,
186 					netdev->dev_addr);
187 			usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
188 			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
189 			ib_event.event = IB_EVENT_GID_CHANGE;
190 			ib_event.device = &us_ibdev->ib_dev;
191 			ib_event.element.port_num = 1;
192 			ib_dispatch_event(&ib_event);
193 		}
194 
195 		break;
196 	case NETDEV_CHANGEMTU:
197 		if (us_ibdev->ufdev->mtu != netdev->mtu) {
198 			usnic_info("MTU Change on %s old: %u new: %u\n",
199 					us_ibdev->ib_dev.name,
200 					us_ibdev->ufdev->mtu, netdev->mtu);
201 			usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
202 			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
203 		} else {
204 			usnic_dbg("Ignoring MTU change on %s\n",
205 					us_ibdev->ib_dev.name);
206 		}
207 		break;
208 	default:
209 		usnic_dbg("Ignoring event %s on %s",
210 				usnic_ib_netdev_event_to_string(event),
211 				us_ibdev->ib_dev.name);
212 	}
213 	mutex_unlock(&us_ibdev->usdev_lock);
214 }
215 
216 static int usnic_ib_netdevice_event(struct notifier_block *notifier,
217 					unsigned long event, void *ptr)
218 {
219 	struct usnic_ib_dev *us_ibdev;
220 
221 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
222 
223 	mutex_lock(&usnic_ib_ibdev_list_lock);
224 	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
225 		if (us_ibdev->netdev == netdev) {
226 			usnic_ib_handle_usdev_event(us_ibdev, event);
227 			break;
228 		}
229 	}
230 	mutex_unlock(&usnic_ib_ibdev_list_lock);
231 
232 	return NOTIFY_DONE;
233 }
234 
235 static struct notifier_block usnic_ib_netdevice_notifier = {
236 	.notifier_call = usnic_ib_netdevice_event
237 };
238 /* End of netdev section */
239 
240 /* Start of inet section */
241 static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
242 					unsigned long event, void *ptr)
243 {
244 	struct in_ifaddr *ifa = ptr;
245 	struct ib_event ib_event;
246 
247 	mutex_lock(&us_ibdev->usdev_lock);
248 
249 	switch (event) {
250 	case NETDEV_DOWN:
251 		usnic_info("%s via ip notifiers",
252 				usnic_ib_netdev_event_to_string(event));
253 		usnic_fwd_del_ipaddr(us_ibdev->ufdev);
254 		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
255 		ib_event.event = IB_EVENT_GID_CHANGE;
256 		ib_event.device = &us_ibdev->ib_dev;
257 		ib_event.element.port_num = 1;
258 		ib_dispatch_event(&ib_event);
259 		break;
260 	case NETDEV_UP:
261 		usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
262 		usnic_info("%s via ip notifiers: ip %pI4",
263 				usnic_ib_netdev_event_to_string(event),
264 				&us_ibdev->ufdev->inaddr);
265 		ib_event.event = IB_EVENT_GID_CHANGE;
266 		ib_event.device = &us_ibdev->ib_dev;
267 		ib_event.element.port_num = 1;
268 		ib_dispatch_event(&ib_event);
269 		break;
270 	default:
271 		usnic_info("Ignoring event %s on %s",
272 				usnic_ib_netdev_event_to_string(event),
273 				us_ibdev->ib_dev.name);
274 	}
275 	mutex_unlock(&us_ibdev->usdev_lock);
276 
277 	return NOTIFY_DONE;
278 }
279 
280 static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
281 					unsigned long event, void *ptr)
282 {
283 	struct usnic_ib_dev *us_ibdev;
284 	struct in_ifaddr *ifa = ptr;
285 	struct net_device *netdev = ifa->ifa_dev->dev;
286 
287 	mutex_lock(&usnic_ib_ibdev_list_lock);
288 	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
289 		if (us_ibdev->netdev == netdev) {
290 			usnic_ib_handle_inet_event(us_ibdev, event, ptr);
291 			break;
292 		}
293 	}
294 	mutex_unlock(&usnic_ib_ibdev_list_lock);
295 
296 	return NOTIFY_DONE;
297 }
298 static struct notifier_block usnic_ib_inetaddr_notifier = {
299 	.notifier_call = usnic_ib_inetaddr_event
300 };
301 /* End of inet section*/
302 
303 /* Start of PF discovery section */
304 static void *usnic_ib_device_add(struct pci_dev *dev)
305 {
306 	struct usnic_ib_dev *us_ibdev;
307 	union ib_gid gid;
308 	struct in_ifaddr *in;
309 	struct net_device *netdev;
310 
311 	usnic_dbg("\n");
312 	netdev = pci_get_drvdata(dev);
313 
314 	us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
315 	if (IS_ERR_OR_NULL(us_ibdev)) {
316 		usnic_err("Device %s context alloc failed\n",
317 				netdev_name(pci_get_drvdata(dev)));
318 		return ERR_PTR(us_ibdev ? PTR_ERR(us_ibdev) : -EFAULT);
319 	}
320 
321 	us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
322 	if (IS_ERR_OR_NULL(us_ibdev->ufdev)) {
323 		usnic_err("Failed to alloc ufdev for %s with err %ld\n",
324 				pci_name(dev), PTR_ERR(us_ibdev->ufdev));
325 		goto err_dealloc;
326 	}
327 
328 	mutex_init(&us_ibdev->usdev_lock);
329 	INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
330 	INIT_LIST_HEAD(&us_ibdev->ctx_list);
331 
332 	us_ibdev->pdev = dev;
333 	us_ibdev->netdev = pci_get_drvdata(dev);
334 	us_ibdev->ib_dev.owner = THIS_MODULE;
335 	us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
336 	us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
337 	us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
338 	us_ibdev->ib_dev.dma_device = &dev->dev;
339 	us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
340 	strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
341 
342 	us_ibdev->ib_dev.uverbs_cmd_mask =
343 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
344 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
345 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
346 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
347 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
348 		(1ull << IB_USER_VERBS_CMD_REG_MR) |
349 		(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
350 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
351 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
352 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
353 		(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
354 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
355 		(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
356 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
357 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
358 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
359 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
360 
361 	us_ibdev->ib_dev.query_device = usnic_ib_query_device;
362 	us_ibdev->ib_dev.query_port = usnic_ib_query_port;
363 	us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
364 	us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
365 	us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
366 	us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
367 	us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
368 	us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
369 	us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
370 	us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
371 	us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
372 	us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
373 	us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
374 	us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
375 	us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
376 	us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
377 	us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
378 	us_ibdev->ib_dev.mmap = usnic_ib_mmap;
379 	us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
380 	us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
381 	us_ibdev->ib_dev.post_send = usnic_ib_post_send;
382 	us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
383 	us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
384 	us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
385 	us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
386 
387 
388 	if (ib_register_device(&us_ibdev->ib_dev, NULL))
389 		goto err_fwd_dealloc;
390 
391 	usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
392 	usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
393 	if (netif_carrier_ok(us_ibdev->netdev))
394 		usnic_fwd_carrier_up(us_ibdev->ufdev);
395 
396 	in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
397 	if (in != NULL)
398 		usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
399 
400 	usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
401 				us_ibdev->ufdev->inaddr, &gid.raw[0]);
402 	memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
403 		sizeof(gid.global.interface_id));
404 	kref_init(&us_ibdev->vf_cnt);
405 
406 	usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
407 			us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
408 			us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
409 			us_ibdev->ufdev->mtu);
410 	return us_ibdev;
411 
412 err_fwd_dealloc:
413 	usnic_fwd_dev_free(us_ibdev->ufdev);
414 err_dealloc:
415 	usnic_err("failed -- deallocing device\n");
416 	ib_dealloc_device(&us_ibdev->ib_dev);
417 	return NULL;
418 }
419 
420 static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
421 {
422 	usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
423 	usnic_ib_sysfs_unregister_usdev(us_ibdev);
424 	usnic_fwd_dev_free(us_ibdev->ufdev);
425 	ib_unregister_device(&us_ibdev->ib_dev);
426 	ib_dealloc_device(&us_ibdev->ib_dev);
427 }
428 
429 static void usnic_ib_undiscover_pf(struct kref *kref)
430 {
431 	struct usnic_ib_dev *us_ibdev, *tmp;
432 	struct pci_dev *dev;
433 	bool found = false;
434 
435 	dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
436 	mutex_lock(&usnic_ib_ibdev_list_lock);
437 	list_for_each_entry_safe(us_ibdev, tmp,
438 				&usnic_ib_ibdev_list, ib_dev_link) {
439 		if (us_ibdev->pdev == dev) {
440 			list_del(&us_ibdev->ib_dev_link);
441 			usnic_ib_device_remove(us_ibdev);
442 			found = true;
443 			break;
444 		}
445 	}
446 
447 	WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
448 
449 	mutex_unlock(&usnic_ib_ibdev_list_lock);
450 }
451 
452 static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
453 {
454 	struct usnic_ib_dev *us_ibdev;
455 	struct pci_dev *parent_pci, *vf_pci;
456 	int err;
457 
458 	vf_pci = usnic_vnic_get_pdev(vnic);
459 	parent_pci = pci_physfn(vf_pci);
460 
461 	BUG_ON(!parent_pci);
462 
463 	mutex_lock(&usnic_ib_ibdev_list_lock);
464 	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
465 		if (us_ibdev->pdev == parent_pci) {
466 			kref_get(&us_ibdev->vf_cnt);
467 			goto out;
468 		}
469 	}
470 
471 	us_ibdev = usnic_ib_device_add(parent_pci);
472 	if (IS_ERR_OR_NULL(us_ibdev)) {
473 		us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
474 		goto out;
475 	}
476 
477 	err = usnic_ib_sysfs_register_usdev(us_ibdev);
478 	if (err) {
479 		usnic_ib_device_remove(us_ibdev);
480 		us_ibdev = ERR_PTR(err);
481 		goto out;
482 	}
483 
484 	list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
485 out:
486 	mutex_unlock(&usnic_ib_ibdev_list_lock);
487 	return us_ibdev;
488 }
489 /* End of PF discovery section */
490 
491 /* Start of PCI section */
492 
493 static const struct pci_device_id usnic_ib_pci_ids[] = {
494 	{PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
495 	{0,}
496 };
497 
498 static int usnic_ib_pci_probe(struct pci_dev *pdev,
499 				const struct pci_device_id *id)
500 {
501 	int err;
502 	struct usnic_ib_dev *pf;
503 	struct usnic_ib_vf *vf;
504 	enum usnic_vnic_res_type res_type;
505 
506 	vf = kzalloc(sizeof(*vf), GFP_KERNEL);
507 	if (!vf)
508 		return -ENOMEM;
509 
510 	err = pci_enable_device(pdev);
511 	if (err) {
512 		usnic_err("Failed to enable %s with err %d\n",
513 				pci_name(pdev), err);
514 		goto out_clean_vf;
515 	}
516 
517 	err = pci_request_regions(pdev, DRV_NAME);
518 	if (err) {
519 		usnic_err("Failed to request region for %s with err %d\n",
520 				pci_name(pdev), err);
521 		goto out_disable_device;
522 	}
523 
524 	pci_set_master(pdev);
525 	pci_set_drvdata(pdev, vf);
526 
527 	vf->vnic = usnic_vnic_alloc(pdev);
528 	if (IS_ERR_OR_NULL(vf->vnic)) {
529 		err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
530 		usnic_err("Failed to alloc vnic for %s with err %d\n",
531 				pci_name(pdev), err);
532 		goto out_release_regions;
533 	}
534 
535 	pf = usnic_ib_discover_pf(vf->vnic);
536 	if (IS_ERR_OR_NULL(pf)) {
537 		usnic_err("Failed to discover pf of vnic %s with err%ld\n",
538 				pci_name(pdev), PTR_ERR(pf));
539 		err = pf ? PTR_ERR(pf) : -EFAULT;
540 		goto out_clean_vnic;
541 	}
542 
543 	vf->pf = pf;
544 	spin_lock_init(&vf->lock);
545 	mutex_lock(&pf->usdev_lock);
546 	list_add_tail(&vf->link, &pf->vf_dev_list);
547 	/*
548 	 * Save max settings (will be same for each VF, easier to re-write than
549 	 * to say "if (!set) { set_values(); set=1; }
550 	 */
551 	for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
552 			res_type < USNIC_VNIC_RES_TYPE_MAX;
553 			res_type++) {
554 		pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
555 								res_type);
556 	}
557 
558 	mutex_unlock(&pf->usdev_lock);
559 
560 	usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
561 			pf->ib_dev.name);
562 	usnic_ib_log_vf(vf);
563 	return 0;
564 
565 out_clean_vnic:
566 	usnic_vnic_free(vf->vnic);
567 out_release_regions:
568 	pci_set_drvdata(pdev, NULL);
569 	pci_clear_master(pdev);
570 	pci_release_regions(pdev);
571 out_disable_device:
572 	pci_disable_device(pdev);
573 out_clean_vf:
574 	kfree(vf);
575 	return err;
576 }
577 
578 static void usnic_ib_pci_remove(struct pci_dev *pdev)
579 {
580 	struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
581 	struct usnic_ib_dev *pf = vf->pf;
582 
583 	mutex_lock(&pf->usdev_lock);
584 	list_del(&vf->link);
585 	mutex_unlock(&pf->usdev_lock);
586 
587 	kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
588 	usnic_vnic_free(vf->vnic);
589 	pci_set_drvdata(pdev, NULL);
590 	pci_clear_master(pdev);
591 	pci_release_regions(pdev);
592 	pci_disable_device(pdev);
593 	kfree(vf);
594 
595 	usnic_info("Removed VF %s\n", pci_name(pdev));
596 }
597 
598 /* PCI driver entry points */
599 static struct pci_driver usnic_ib_pci_driver = {
600 	.name = DRV_NAME,
601 	.id_table = usnic_ib_pci_ids,
602 	.probe = usnic_ib_pci_probe,
603 	.remove = usnic_ib_pci_remove,
604 };
605 /* End of PCI section */
606 
607 /* Start of module section */
608 static int __init usnic_ib_init(void)
609 {
610 	int err;
611 
612 	printk_once(KERN_INFO "%s", usnic_version);
613 
614 	err = usnic_uiom_init(DRV_NAME);
615 	if (err) {
616 		usnic_err("Unable to initalize umem with err %d\n", err);
617 		return err;
618 	}
619 
620 	if (pci_register_driver(&usnic_ib_pci_driver)) {
621 		usnic_err("Unable to register with PCI\n");
622 		goto out_umem_fini;
623 	}
624 
625 	err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
626 	if (err) {
627 		usnic_err("Failed to register netdev notifier\n");
628 		goto out_pci_unreg;
629 	}
630 
631 	err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
632 	if (err) {
633 		usnic_err("Failed to register inet addr notifier\n");
634 		goto out_unreg_netdev_notifier;
635 	}
636 
637 	err = usnic_transport_init();
638 	if (err) {
639 		usnic_err("Failed to initialize transport\n");
640 		goto out_unreg_inetaddr_notifier;
641 	}
642 
643 	usnic_debugfs_init();
644 
645 	return 0;
646 
647 out_unreg_inetaddr_notifier:
648 	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
649 out_unreg_netdev_notifier:
650 	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
651 out_pci_unreg:
652 	pci_unregister_driver(&usnic_ib_pci_driver);
653 out_umem_fini:
654 	usnic_uiom_fini();
655 
656 	return err;
657 }
658 
659 static void __exit usnic_ib_destroy(void)
660 {
661 	usnic_dbg("\n");
662 	usnic_debugfs_exit();
663 	usnic_transport_fini();
664 	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
665 	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
666 	pci_unregister_driver(&usnic_ib_pci_driver);
667 	usnic_uiom_fini();
668 }
669 
670 MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
671 MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
672 MODULE_LICENSE("Dual BSD/GPL");
673 MODULE_VERSION(DRV_VERSION);
674 module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
675 module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
676 MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
677 MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
678 MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
679 
680 module_init(usnic_ib_init);
681 module_exit(usnic_ib_destroy);
682 /* End of module section */
683