1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
37 
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
40 
41 #include "usnic_abi.h"
42 #include "usnic_ib.h"
43 #include "usnic_common_util.h"
44 #include "usnic_ib_qp_grp.h"
45 #include "usnic_ib_verbs.h"
46 #include "usnic_fwd.h"
47 #include "usnic_log.h"
48 #include "usnic_uiom.h"
49 #include "usnic_transport.h"
50 
51 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
52 
53 const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
54 	{ /*USNIC_TRANSPORT_UNKNOWN*/
55 		.resources = {
56 			{.type = USNIC_VNIC_RES_TYPE_EOL,	.cnt = 0,},
57 		},
58 	},
59 	{ /*USNIC_TRANSPORT_ROCE_CUSTOM*/
60 		.resources = {
61 			{.type = USNIC_VNIC_RES_TYPE_WQ,	.cnt = 1,},
62 			{.type = USNIC_VNIC_RES_TYPE_RQ,	.cnt = 1,},
63 			{.type = USNIC_VNIC_RES_TYPE_CQ,	.cnt = 1,},
64 			{.type = USNIC_VNIC_RES_TYPE_EOL,	.cnt = 0,},
65 		},
66 	},
67 	{ /*USNIC_TRANSPORT_IPV4_UDP*/
68 		.resources = {
69 			{.type = USNIC_VNIC_RES_TYPE_WQ,	.cnt = 1,},
70 			{.type = USNIC_VNIC_RES_TYPE_RQ,	.cnt = 1,},
71 			{.type = USNIC_VNIC_RES_TYPE_CQ,	.cnt = 1,},
72 			{.type = USNIC_VNIC_RES_TYPE_EOL,	.cnt = 0,},
73 		},
74 	},
75 };
76 
77 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
78 {
79 	*fw_ver = *((u64 *)fw_ver_str);
80 }
81 
82 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
83 					struct ib_udata *udata)
84 {
85 	struct usnic_ib_dev *us_ibdev;
86 	struct usnic_ib_create_qp_resp resp;
87 	struct pci_dev *pdev;
88 	struct vnic_dev_bar *bar;
89 	struct usnic_vnic_res_chunk *chunk;
90 	struct usnic_ib_qp_grp_flow *default_flow;
91 	int i, err;
92 
93 	memset(&resp, 0, sizeof(resp));
94 
95 	us_ibdev = qp_grp->vf->pf;
96 	pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
97 	if (!pdev) {
98 		usnic_err("Failed to get pdev of qp_grp %d\n",
99 				qp_grp->grp_id);
100 		return -EFAULT;
101 	}
102 
103 	bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
104 	if (!bar) {
105 		usnic_err("Failed to get bar0 of qp_grp %d vf %s",
106 				qp_grp->grp_id, pci_name(pdev));
107 		return -EFAULT;
108 	}
109 
110 	resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
111 	resp.bar_bus_addr = bar->bus_addr;
112 	resp.bar_len = bar->len;
113 
114 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
115 	if (IS_ERR(chunk)) {
116 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
117 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
118 			qp_grp->grp_id,
119 			PTR_ERR(chunk));
120 		return PTR_ERR(chunk);
121 	}
122 
123 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
124 	resp.rq_cnt = chunk->cnt;
125 	for (i = 0; i < chunk->cnt; i++)
126 		resp.rq_idx[i] = chunk->res[i]->vnic_idx;
127 
128 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
129 	if (IS_ERR(chunk)) {
130 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
131 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
132 			qp_grp->grp_id,
133 			PTR_ERR(chunk));
134 		return PTR_ERR(chunk);
135 	}
136 
137 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
138 	resp.wq_cnt = chunk->cnt;
139 	for (i = 0; i < chunk->cnt; i++)
140 		resp.wq_idx[i] = chunk->res[i]->vnic_idx;
141 
142 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
143 	if (IS_ERR(chunk)) {
144 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
145 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
146 			qp_grp->grp_id,
147 			PTR_ERR(chunk));
148 		return PTR_ERR(chunk);
149 	}
150 
151 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
152 	resp.cq_cnt = chunk->cnt;
153 	for (i = 0; i < chunk->cnt; i++)
154 		resp.cq_idx[i] = chunk->res[i]->vnic_idx;
155 
156 	default_flow = list_first_entry(&qp_grp->flows_lst,
157 					struct usnic_ib_qp_grp_flow, link);
158 	resp.transport = default_flow->trans_type;
159 
160 	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
161 	if (err) {
162 		usnic_err("Failed to copy udata for %s",
163 			  dev_name(&us_ibdev->ib_dev.dev));
164 		return err;
165 	}
166 
167 	return 0;
168 }
169 
170 static struct usnic_ib_qp_grp*
171 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
172 				struct usnic_ib_pd *pd,
173 				struct usnic_transport_spec *trans_spec,
174 				struct usnic_vnic_res_spec *res_spec)
175 {
176 	struct usnic_ib_vf *vf;
177 	struct usnic_vnic *vnic;
178 	struct usnic_ib_qp_grp *qp_grp;
179 	struct device *dev, **dev_list;
180 	int i;
181 
182 	BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
183 
184 	if (list_empty(&us_ibdev->vf_dev_list)) {
185 		usnic_info("No vfs to allocate\n");
186 		return NULL;
187 	}
188 
189 	if (usnic_ib_share_vf) {
190 		/* Try to find resouces on a used vf which is in pd */
191 		dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
192 		if (IS_ERR(dev_list))
193 			return ERR_CAST(dev_list);
194 		for (i = 0; dev_list[i]; i++) {
195 			dev = dev_list[i];
196 			vf = pci_get_drvdata(to_pci_dev(dev));
197 			spin_lock(&vf->lock);
198 			vnic = vf->vnic;
199 			if (!usnic_vnic_check_room(vnic, res_spec)) {
200 				usnic_dbg("Found used vnic %s from %s\n",
201 						dev_name(&us_ibdev->ib_dev.dev),
202 						pci_name(usnic_vnic_get_pdev(
203 									vnic)));
204 				qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
205 								vf, pd,
206 								res_spec,
207 								trans_spec);
208 
209 				spin_unlock(&vf->lock);
210 				goto qp_grp_check;
211 			}
212 			spin_unlock(&vf->lock);
213 
214 		}
215 		usnic_uiom_free_dev_list(dev_list);
216 	}
217 
218 	/* Try to find resources on an unused vf */
219 	list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
220 		spin_lock(&vf->lock);
221 		vnic = vf->vnic;
222 		if (vf->qp_grp_ref_cnt == 0 &&
223 		    usnic_vnic_check_room(vnic, res_spec) == 0) {
224 			qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
225 							pd, res_spec,
226 							trans_spec);
227 
228 			spin_unlock(&vf->lock);
229 			goto qp_grp_check;
230 		}
231 		spin_unlock(&vf->lock);
232 	}
233 
234 	usnic_info("No free qp grp found on %s\n",
235 		   dev_name(&us_ibdev->ib_dev.dev));
236 	return ERR_PTR(-ENOMEM);
237 
238 qp_grp_check:
239 	if (IS_ERR_OR_NULL(qp_grp)) {
240 		usnic_err("Failed to allocate qp_grp\n");
241 		return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
242 	}
243 	return qp_grp;
244 }
245 
246 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
247 {
248 	struct usnic_ib_vf *vf = qp_grp->vf;
249 
250 	WARN_ON(qp_grp->state != IB_QPS_RESET);
251 
252 	spin_lock(&vf->lock);
253 	usnic_ib_qp_grp_destroy(qp_grp);
254 	spin_unlock(&vf->lock);
255 }
256 
257 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
258 {
259 	if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
260 			cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
261 		return -EINVAL;
262 
263 	return 0;
264 }
265 
266 /* Start of ib callback functions */
267 
268 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
269 						u8 port_num)
270 {
271 	return IB_LINK_LAYER_ETHERNET;
272 }
273 
274 int usnic_ib_query_device(struct ib_device *ibdev,
275 			  struct ib_device_attr *props,
276 			  struct ib_udata *uhw)
277 {
278 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
279 	union ib_gid gid;
280 	struct ethtool_drvinfo info;
281 	int qp_per_vf;
282 
283 	usnic_dbg("\n");
284 	if (uhw->inlen || uhw->outlen)
285 		return -EINVAL;
286 
287 	mutex_lock(&us_ibdev->usdev_lock);
288 	us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
289 	memset(props, 0, sizeof(*props));
290 	usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
291 			&gid.raw[0]);
292 	memcpy(&props->sys_image_guid, &gid.global.interface_id,
293 		sizeof(gid.global.interface_id));
294 	usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
295 	props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
296 	props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
297 	props->vendor_id = PCI_VENDOR_ID_CISCO;
298 	props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
299 	props->hw_ver = us_ibdev->pdev->subsystem_device;
300 	qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
301 			us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
302 	props->max_qp = qp_per_vf *
303 		kref_read(&us_ibdev->vf_cnt);
304 	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
305 		IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
306 	props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
307 		kref_read(&us_ibdev->vf_cnt);
308 	props->max_pd = USNIC_UIOM_MAX_PD_CNT;
309 	props->max_mr = USNIC_UIOM_MAX_MR_CNT;
310 	props->local_ca_ack_delay = 0;
311 	props->max_pkeys = 0;
312 	props->atomic_cap = IB_ATOMIC_NONE;
313 	props->masked_atomic_cap = props->atomic_cap;
314 	props->max_qp_rd_atom = 0;
315 	props->max_qp_init_rd_atom = 0;
316 	props->max_res_rd_atom = 0;
317 	props->max_srq = 0;
318 	props->max_srq_wr = 0;
319 	props->max_srq_sge = 0;
320 	props->max_fast_reg_page_list_len = 0;
321 	props->max_mcast_grp = 0;
322 	props->max_mcast_qp_attach = 0;
323 	props->max_total_mcast_qp_attach = 0;
324 	props->max_map_per_fmr = 0;
325 	/* Owned by Userspace
326 	 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
327 	mutex_unlock(&us_ibdev->usdev_lock);
328 
329 	return 0;
330 }
331 
332 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
333 				struct ib_port_attr *props)
334 {
335 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
336 
337 	usnic_dbg("\n");
338 
339 	if (ib_get_eth_speed(ibdev, port, &props->active_speed,
340 			     &props->active_width))
341 		return -EINVAL;
342 
343 	/*
344 	 * usdev_lock is acquired after (and not before) ib_get_eth_speed call
345 	 * because acquiring rtnl_lock in ib_get_eth_speed, while holding
346 	 * usdev_lock could lead to a deadlock.
347 	 */
348 	mutex_lock(&us_ibdev->usdev_lock);
349 	/* props being zeroed by the caller, avoid zeroing it here */
350 
351 	props->lid = 0;
352 	props->lmc = 1;
353 	props->sm_lid = 0;
354 	props->sm_sl = 0;
355 
356 	if (!us_ibdev->ufdev->link_up) {
357 		props->state = IB_PORT_DOWN;
358 		props->phys_state = 3;
359 	} else if (!us_ibdev->ufdev->inaddr) {
360 		props->state = IB_PORT_INIT;
361 		props->phys_state = 4;
362 	} else {
363 		props->state = IB_PORT_ACTIVE;
364 		props->phys_state = 5;
365 	}
366 
367 	props->port_cap_flags = 0;
368 	props->gid_tbl_len = 1;
369 	props->pkey_tbl_len = 1;
370 	props->bad_pkey_cntr = 0;
371 	props->qkey_viol_cntr = 0;
372 	props->max_mtu = IB_MTU_4096;
373 	props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
374 	/* Userspace will adjust for hdrs */
375 	props->max_msg_sz = us_ibdev->ufdev->mtu;
376 	props->max_vl_num = 1;
377 	mutex_unlock(&us_ibdev->usdev_lock);
378 
379 	return 0;
380 }
381 
382 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
383 				int qp_attr_mask,
384 				struct ib_qp_init_attr *qp_init_attr)
385 {
386 	struct usnic_ib_qp_grp *qp_grp;
387 	struct usnic_ib_vf *vf;
388 	int err;
389 
390 	usnic_dbg("\n");
391 
392 	memset(qp_attr, 0, sizeof(*qp_attr));
393 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
394 
395 	qp_grp = to_uqp_grp(qp);
396 	vf = qp_grp->vf;
397 	mutex_lock(&vf->pf->usdev_lock);
398 	usnic_dbg("\n");
399 	qp_attr->qp_state = qp_grp->state;
400 	qp_attr->cur_qp_state = qp_grp->state;
401 
402 	switch (qp_grp->ibqp.qp_type) {
403 	case IB_QPT_UD:
404 		qp_attr->qkey = 0;
405 		break;
406 	default:
407 		usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
408 		err = -EINVAL;
409 		goto err_out;
410 	}
411 
412 	mutex_unlock(&vf->pf->usdev_lock);
413 	return 0;
414 
415 err_out:
416 	mutex_unlock(&vf->pf->usdev_lock);
417 	return err;
418 }
419 
420 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
421 				union ib_gid *gid)
422 {
423 
424 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
425 	usnic_dbg("\n");
426 
427 	if (index > 1)
428 		return -EINVAL;
429 
430 	mutex_lock(&us_ibdev->usdev_lock);
431 	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
432 	usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
433 			&gid->raw[0]);
434 	mutex_unlock(&us_ibdev->usdev_lock);
435 
436 	return 0;
437 }
438 
439 struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
440 {
441 	struct usnic_ib_dev *us_ibdev = to_usdev(device);
442 
443 	if (us_ibdev->netdev)
444 		dev_hold(us_ibdev->netdev);
445 
446 	return us_ibdev->netdev;
447 }
448 
449 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
450 				u16 *pkey)
451 {
452 	if (index > 1)
453 		return -EINVAL;
454 
455 	*pkey = 0xffff;
456 	return 0;
457 }
458 
459 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
460 					struct ib_ucontext *context,
461 					struct ib_udata *udata)
462 {
463 	struct usnic_ib_pd *pd;
464 	void *umem_pd;
465 
466 	usnic_dbg("\n");
467 
468 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
469 	if (!pd)
470 		return ERR_PTR(-ENOMEM);
471 
472 	umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
473 	if (IS_ERR_OR_NULL(umem_pd)) {
474 		kfree(pd);
475 		return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
476 	}
477 
478 	usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
479 		   pd, context, dev_name(&ibdev->dev));
480 	return &pd->ibpd;
481 }
482 
483 int usnic_ib_dealloc_pd(struct ib_pd *pd)
484 {
485 	usnic_info("freeing domain 0x%p\n", pd);
486 
487 	usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
488 	kfree(pd);
489 	return 0;
490 }
491 
492 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
493 					struct ib_qp_init_attr *init_attr,
494 					struct ib_udata *udata)
495 {
496 	int err;
497 	struct usnic_ib_dev *us_ibdev;
498 	struct usnic_ib_qp_grp *qp_grp;
499 	struct usnic_ib_ucontext *ucontext;
500 	int cq_cnt;
501 	struct usnic_vnic_res_spec res_spec;
502 	struct usnic_ib_create_qp_cmd cmd;
503 	struct usnic_transport_spec trans_spec;
504 
505 	usnic_dbg("\n");
506 
507 	ucontext = to_uucontext(pd->uobject->context);
508 	us_ibdev = to_usdev(pd->device);
509 
510 	if (init_attr->create_flags)
511 		return ERR_PTR(-EINVAL);
512 
513 	err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
514 	if (err) {
515 		usnic_err("%s: cannot copy udata for create_qp\n",
516 			  dev_name(&us_ibdev->ib_dev.dev));
517 		return ERR_PTR(-EINVAL);
518 	}
519 
520 	err = create_qp_validate_user_data(cmd);
521 	if (err) {
522 		usnic_err("%s: Failed to validate user data\n",
523 			  dev_name(&us_ibdev->ib_dev.dev));
524 		return ERR_PTR(-EINVAL);
525 	}
526 
527 	if (init_attr->qp_type != IB_QPT_UD) {
528 		usnic_err("%s asked to make a non-UD QP: %d\n",
529 			  dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
530 		return ERR_PTR(-EINVAL);
531 	}
532 
533 	trans_spec = cmd.spec;
534 	mutex_lock(&us_ibdev->usdev_lock);
535 	cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
536 	res_spec = min_transport_spec[trans_spec.trans_type];
537 	usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
538 	qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
539 						&trans_spec,
540 						&res_spec);
541 	if (IS_ERR_OR_NULL(qp_grp)) {
542 		err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
543 		goto out_release_mutex;
544 	}
545 
546 	err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
547 	if (err) {
548 		err = -EBUSY;
549 		goto out_release_qp_grp;
550 	}
551 
552 	qp_grp->ctx = ucontext;
553 	list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
554 	usnic_ib_log_vf(qp_grp->vf);
555 	mutex_unlock(&us_ibdev->usdev_lock);
556 	return &qp_grp->ibqp;
557 
558 out_release_qp_grp:
559 	qp_grp_destroy(qp_grp);
560 out_release_mutex:
561 	mutex_unlock(&us_ibdev->usdev_lock);
562 	return ERR_PTR(err);
563 }
564 
565 int usnic_ib_destroy_qp(struct ib_qp *qp)
566 {
567 	struct usnic_ib_qp_grp *qp_grp;
568 	struct usnic_ib_vf *vf;
569 
570 	usnic_dbg("\n");
571 
572 	qp_grp = to_uqp_grp(qp);
573 	vf = qp_grp->vf;
574 	mutex_lock(&vf->pf->usdev_lock);
575 	if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
576 		usnic_err("Failed to move qp grp %u to reset\n",
577 				qp_grp->grp_id);
578 	}
579 
580 	list_del(&qp_grp->link);
581 	qp_grp_destroy(qp_grp);
582 	mutex_unlock(&vf->pf->usdev_lock);
583 
584 	return 0;
585 }
586 
587 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
588 				int attr_mask, struct ib_udata *udata)
589 {
590 	struct usnic_ib_qp_grp *qp_grp;
591 	int status;
592 	usnic_dbg("\n");
593 
594 	qp_grp = to_uqp_grp(ibqp);
595 
596 	mutex_lock(&qp_grp->vf->pf->usdev_lock);
597 	if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
598 		/* usnic devices only have one port */
599 		status = -EINVAL;
600 		goto out_unlock;
601 	}
602 	if (attr_mask & IB_QP_STATE) {
603 		status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
604 	} else {
605 		usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
606 		status = -EINVAL;
607 	}
608 
609 out_unlock:
610 	mutex_unlock(&qp_grp->vf->pf->usdev_lock);
611 	return status;
612 }
613 
614 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
615 				 const struct ib_cq_init_attr *attr,
616 				 struct ib_ucontext *context,
617 				 struct ib_udata *udata)
618 {
619 	struct ib_cq *cq;
620 
621 	usnic_dbg("\n");
622 	if (attr->flags)
623 		return ERR_PTR(-EINVAL);
624 
625 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
626 	if (!cq)
627 		return ERR_PTR(-EBUSY);
628 
629 	return cq;
630 }
631 
632 int usnic_ib_destroy_cq(struct ib_cq *cq)
633 {
634 	usnic_dbg("\n");
635 	kfree(cq);
636 	return 0;
637 }
638 
639 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
640 					u64 virt_addr, int access_flags,
641 					struct ib_udata *udata)
642 {
643 	struct usnic_ib_mr *mr;
644 	int err;
645 
646 	usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
647 			virt_addr, length);
648 
649 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
650 	if (!mr)
651 		return ERR_PTR(-ENOMEM);
652 
653 	mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
654 					access_flags, 0);
655 	if (IS_ERR_OR_NULL(mr->umem)) {
656 		err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
657 		goto err_free;
658 	}
659 
660 	mr->ibmr.lkey = mr->ibmr.rkey = 0;
661 	return &mr->ibmr;
662 
663 err_free:
664 	kfree(mr);
665 	return ERR_PTR(err);
666 }
667 
668 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
669 {
670 	struct usnic_ib_mr *mr = to_umr(ibmr);
671 
672 	usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
673 
674 	usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
675 	kfree(mr);
676 	return 0;
677 }
678 
679 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
680 							struct ib_udata *udata)
681 {
682 	struct usnic_ib_ucontext *context;
683 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
684 	usnic_dbg("\n");
685 
686 	context = kmalloc(sizeof(*context), GFP_KERNEL);
687 	if (!context)
688 		return ERR_PTR(-ENOMEM);
689 
690 	INIT_LIST_HEAD(&context->qp_grp_list);
691 	mutex_lock(&us_ibdev->usdev_lock);
692 	list_add_tail(&context->link, &us_ibdev->ctx_list);
693 	mutex_unlock(&us_ibdev->usdev_lock);
694 
695 	return &context->ibucontext;
696 }
697 
698 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
699 {
700 	struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
701 	struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
702 	usnic_dbg("\n");
703 
704 	mutex_lock(&us_ibdev->usdev_lock);
705 	BUG_ON(!list_empty(&context->qp_grp_list));
706 	list_del(&context->link);
707 	mutex_unlock(&us_ibdev->usdev_lock);
708 	kfree(context);
709 	return 0;
710 }
711 
712 int usnic_ib_mmap(struct ib_ucontext *context,
713 				struct vm_area_struct *vma)
714 {
715 	struct usnic_ib_ucontext *uctx = to_ucontext(context);
716 	struct usnic_ib_dev *us_ibdev;
717 	struct usnic_ib_qp_grp *qp_grp;
718 	struct usnic_ib_vf *vf;
719 	struct vnic_dev_bar *bar;
720 	dma_addr_t bus_addr;
721 	unsigned int len;
722 	unsigned int vfid;
723 
724 	usnic_dbg("\n");
725 
726 	us_ibdev = to_usdev(context->device);
727 	vma->vm_flags |= VM_IO;
728 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
729 	vfid = vma->vm_pgoff;
730 	usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
731 			vma->vm_pgoff, PAGE_SHIFT, vfid);
732 
733 	mutex_lock(&us_ibdev->usdev_lock);
734 	list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
735 		vf = qp_grp->vf;
736 		if (usnic_vnic_get_index(vf->vnic) == vfid) {
737 			bar = usnic_vnic_get_bar(vf->vnic, 0);
738 			if ((vma->vm_end - vma->vm_start) != bar->len) {
739 				usnic_err("Bar0 Len %lu - Request map %lu\n",
740 						bar->len,
741 						vma->vm_end - vma->vm_start);
742 				mutex_unlock(&us_ibdev->usdev_lock);
743 				return -EINVAL;
744 			}
745 			bus_addr = bar->bus_addr;
746 			len = bar->len;
747 			usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
748 					&bus_addr, bar->vaddr, bar->len);
749 			mutex_unlock(&us_ibdev->usdev_lock);
750 
751 			return remap_pfn_range(vma,
752 						vma->vm_start,
753 						bus_addr >> PAGE_SHIFT,
754 						len, vma->vm_page_prot);
755 		}
756 	}
757 
758 	mutex_unlock(&us_ibdev->usdev_lock);
759 	usnic_err("No VF %u found\n", vfid);
760 	return -EINVAL;
761 }
762 
763 /* In ib callbacks section -  Start of stub funcs */
764 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
765 				 struct rdma_ah_attr *ah_attr,
766 				 u32 flags,
767 				 struct ib_udata *udata)
768 
769 {
770 	usnic_dbg("\n");
771 	return ERR_PTR(-EPERM);
772 }
773 
774 int usnic_ib_destroy_ah(struct ib_ah *ah, u32 flags)
775 {
776 	usnic_dbg("\n");
777 	return -EINVAL;
778 }
779 
780 int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
781 		       const struct ib_send_wr **bad_wr)
782 {
783 	usnic_dbg("\n");
784 	return -EINVAL;
785 }
786 
787 int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
788 		       const struct ib_recv_wr **bad_wr)
789 {
790 	usnic_dbg("\n");
791 	return -EINVAL;
792 }
793 
794 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
795 				struct ib_wc *wc)
796 {
797 	usnic_dbg("\n");
798 	return -EINVAL;
799 }
800 
801 int usnic_ib_req_notify_cq(struct ib_cq *cq,
802 					enum ib_cq_notify_flags flags)
803 {
804 	usnic_dbg("\n");
805 	return -EINVAL;
806 }
807 
808 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
809 {
810 	usnic_dbg("\n");
811 	return ERR_PTR(-ENOMEM);
812 }
813 
814 
815 /* In ib callbacks section - End of stub funcs */
816 /* End of ib callbacks section */
817