1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
37 
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
40 
41 #include "usnic_abi.h"
42 #include "usnic_ib.h"
43 #include "usnic_common_util.h"
44 #include "usnic_ib_qp_grp.h"
45 #include "usnic_fwd.h"
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_transport.h"
49 
50 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
51 
52 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
53 {
54 	*fw_ver = *((u64 *)fw_ver_str);
55 }
56 
57 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
58 					struct ib_udata *udata)
59 {
60 	struct usnic_ib_dev *us_ibdev;
61 	struct usnic_ib_create_qp_resp resp;
62 	struct pci_dev *pdev;
63 	struct vnic_dev_bar *bar;
64 	struct usnic_vnic_res_chunk *chunk;
65 	struct usnic_ib_qp_grp_flow *default_flow;
66 	int i, err;
67 
68 	memset(&resp, 0, sizeof(resp));
69 
70 	us_ibdev = qp_grp->vf->pf;
71 	pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
72 	if (!pdev) {
73 		usnic_err("Failed to get pdev of qp_grp %d\n",
74 				qp_grp->grp_id);
75 		return -EFAULT;
76 	}
77 
78 	bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
79 	if (!bar) {
80 		usnic_err("Failed to get bar0 of qp_grp %d vf %s",
81 				qp_grp->grp_id, pci_name(pdev));
82 		return -EFAULT;
83 	}
84 
85 	resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
86 	resp.bar_bus_addr = bar->bus_addr;
87 	resp.bar_len = bar->len;
88 
89 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
90 	if (IS_ERR(chunk)) {
91 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
92 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
93 			qp_grp->grp_id,
94 			PTR_ERR(chunk));
95 		return PTR_ERR(chunk);
96 	}
97 
98 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
99 	resp.rq_cnt = chunk->cnt;
100 	for (i = 0; i < chunk->cnt; i++)
101 		resp.rq_idx[i] = chunk->res[i]->vnic_idx;
102 
103 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
104 	if (IS_ERR(chunk)) {
105 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
106 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
107 			qp_grp->grp_id,
108 			PTR_ERR(chunk));
109 		return PTR_ERR(chunk);
110 	}
111 
112 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
113 	resp.wq_cnt = chunk->cnt;
114 	for (i = 0; i < chunk->cnt; i++)
115 		resp.wq_idx[i] = chunk->res[i]->vnic_idx;
116 
117 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
118 	if (IS_ERR(chunk)) {
119 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
120 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
121 			qp_grp->grp_id,
122 			PTR_ERR(chunk));
123 		return PTR_ERR(chunk);
124 	}
125 
126 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
127 	resp.cq_cnt = chunk->cnt;
128 	for (i = 0; i < chunk->cnt; i++)
129 		resp.cq_idx[i] = chunk->res[i]->vnic_idx;
130 
131 	default_flow = list_first_entry(&qp_grp->flows_lst,
132 					struct usnic_ib_qp_grp_flow, link);
133 	resp.transport = default_flow->trans_type;
134 
135 	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
136 	if (err) {
137 		usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
138 		return err;
139 	}
140 
141 	return 0;
142 }
143 
144 static struct usnic_ib_qp_grp*
145 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
146 				struct usnic_ib_pd *pd,
147 				struct usnic_transport_spec *trans_spec,
148 				struct usnic_vnic_res_spec *res_spec)
149 {
150 	struct usnic_ib_vf *vf;
151 	struct usnic_vnic *vnic;
152 	struct usnic_ib_qp_grp *qp_grp;
153 	struct device *dev, **dev_list;
154 	int i, found = 0;
155 
156 	BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
157 
158 	if (list_empty(&us_ibdev->vf_dev_list)) {
159 		usnic_info("No vfs to allocate\n");
160 		return NULL;
161 	}
162 
163 	if (usnic_ib_share_vf) {
164 		/* Try to find resouces on a used vf which is in pd */
165 		dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
166 		for (i = 0; dev_list[i]; i++) {
167 			dev = dev_list[i];
168 			vf = pci_get_drvdata(to_pci_dev(dev));
169 			spin_lock(&vf->lock);
170 			vnic = vf->vnic;
171 			if (!usnic_vnic_check_room(vnic, res_spec)) {
172 				usnic_dbg("Found used vnic %s from %s\n",
173 						us_ibdev->ib_dev.name,
174 						pci_name(usnic_vnic_get_pdev(
175 									vnic)));
176 				found = 1;
177 				break;
178 			}
179 			spin_unlock(&vf->lock);
180 
181 		}
182 		usnic_uiom_free_dev_list(dev_list);
183 	}
184 
185 	if (!found) {
186 		/* Try to find resources on an unused vf */
187 		list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
188 			spin_lock(&vf->lock);
189 			vnic = vf->vnic;
190 			if (vf->qp_grp_ref_cnt == 0 &&
191 				usnic_vnic_check_room(vnic, res_spec) == 0) {
192 				found = 1;
193 				break;
194 			}
195 			spin_unlock(&vf->lock);
196 		}
197 	}
198 
199 	if (!found) {
200 		usnic_info("No free qp grp found on %s\n",
201 				us_ibdev->ib_dev.name);
202 		return ERR_PTR(-ENOMEM);
203 	}
204 
205 	qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
206 						trans_spec);
207 	spin_unlock(&vf->lock);
208 	if (IS_ERR_OR_NULL(qp_grp)) {
209 		usnic_err("Failed to allocate qp_grp\n");
210 		return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
211 	}
212 
213 	return qp_grp;
214 }
215 
216 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
217 {
218 	struct usnic_ib_vf *vf = qp_grp->vf;
219 
220 	WARN_ON(qp_grp->state != IB_QPS_RESET);
221 
222 	spin_lock(&vf->lock);
223 	usnic_ib_qp_grp_destroy(qp_grp);
224 	spin_unlock(&vf->lock);
225 }
226 
227 static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
228 					u8 *active_width)
229 {
230 	if (speed <= 10000) {
231 		*active_width = IB_WIDTH_1X;
232 		*active_speed = IB_SPEED_FDR10;
233 	} else if (speed <= 20000) {
234 		*active_width = IB_WIDTH_4X;
235 		*active_speed = IB_SPEED_DDR;
236 	} else if (speed <= 30000) {
237 		*active_width = IB_WIDTH_4X;
238 		*active_speed = IB_SPEED_QDR;
239 	} else if (speed <= 40000) {
240 		*active_width = IB_WIDTH_4X;
241 		*active_speed = IB_SPEED_FDR10;
242 	} else {
243 		*active_width = IB_WIDTH_4X;
244 		*active_speed = IB_SPEED_EDR;
245 	}
246 }
247 
248 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
249 {
250 	if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
251 			cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
252 		return -EINVAL;
253 
254 	return 0;
255 }
256 
257 /* Start of ib callback functions */
258 
259 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
260 						u8 port_num)
261 {
262 	return IB_LINK_LAYER_ETHERNET;
263 }
264 
265 int usnic_ib_query_device(struct ib_device *ibdev,
266 			  struct ib_device_attr *props,
267 			  struct ib_udata *uhw)
268 {
269 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
270 	union ib_gid gid;
271 	struct ethtool_drvinfo info;
272 	int qp_per_vf;
273 
274 	usnic_dbg("\n");
275 	if (uhw->inlen || uhw->outlen)
276 		return -EINVAL;
277 
278 	mutex_lock(&us_ibdev->usdev_lock);
279 	us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
280 	memset(props, 0, sizeof(*props));
281 	usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
282 			&gid.raw[0]);
283 	memcpy(&props->sys_image_guid, &gid.global.interface_id,
284 		sizeof(gid.global.interface_id));
285 	usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
286 	props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
287 	props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
288 	props->vendor_id = PCI_VENDOR_ID_CISCO;
289 	props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
290 	props->hw_ver = us_ibdev->pdev->subsystem_device;
291 	qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
292 			us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
293 	props->max_qp = qp_per_vf *
294 		atomic_read(&us_ibdev->vf_cnt.refcount);
295 	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
296 		IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
297 	props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
298 		atomic_read(&us_ibdev->vf_cnt.refcount);
299 	props->max_pd = USNIC_UIOM_MAX_PD_CNT;
300 	props->max_mr = USNIC_UIOM_MAX_MR_CNT;
301 	props->local_ca_ack_delay = 0;
302 	props->max_pkeys = 0;
303 	props->atomic_cap = IB_ATOMIC_NONE;
304 	props->masked_atomic_cap = props->atomic_cap;
305 	props->max_qp_rd_atom = 0;
306 	props->max_qp_init_rd_atom = 0;
307 	props->max_res_rd_atom = 0;
308 	props->max_srq = 0;
309 	props->max_srq_wr = 0;
310 	props->max_srq_sge = 0;
311 	props->max_fast_reg_page_list_len = 0;
312 	props->max_mcast_grp = 0;
313 	props->max_mcast_qp_attach = 0;
314 	props->max_total_mcast_qp_attach = 0;
315 	props->max_map_per_fmr = 0;
316 	/* Owned by Userspace
317 	 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
318 	mutex_unlock(&us_ibdev->usdev_lock);
319 
320 	return 0;
321 }
322 
323 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
324 				struct ib_port_attr *props)
325 {
326 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
327 	struct ethtool_link_ksettings cmd;
328 
329 	usnic_dbg("\n");
330 
331 	mutex_lock(&us_ibdev->usdev_lock);
332 	__ethtool_get_link_ksettings(us_ibdev->netdev, &cmd);
333 	memset(props, 0, sizeof(*props));
334 
335 	props->lid = 0;
336 	props->lmc = 1;
337 	props->sm_lid = 0;
338 	props->sm_sl = 0;
339 
340 	if (!us_ibdev->ufdev->link_up) {
341 		props->state = IB_PORT_DOWN;
342 		props->phys_state = 3;
343 	} else if (!us_ibdev->ufdev->inaddr) {
344 		props->state = IB_PORT_INIT;
345 		props->phys_state = 4;
346 	} else {
347 		props->state = IB_PORT_ACTIVE;
348 		props->phys_state = 5;
349 	}
350 
351 	props->port_cap_flags = 0;
352 	props->gid_tbl_len = 1;
353 	props->pkey_tbl_len = 1;
354 	props->bad_pkey_cntr = 0;
355 	props->qkey_viol_cntr = 0;
356 	eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed,
357 			      &props->active_width);
358 	props->max_mtu = IB_MTU_4096;
359 	props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
360 	/* Userspace will adjust for hdrs */
361 	props->max_msg_sz = us_ibdev->ufdev->mtu;
362 	props->max_vl_num = 1;
363 	mutex_unlock(&us_ibdev->usdev_lock);
364 
365 	return 0;
366 }
367 
368 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
369 				int qp_attr_mask,
370 				struct ib_qp_init_attr *qp_init_attr)
371 {
372 	struct usnic_ib_qp_grp *qp_grp;
373 	struct usnic_ib_vf *vf;
374 	int err;
375 
376 	usnic_dbg("\n");
377 
378 	memset(qp_attr, 0, sizeof(*qp_attr));
379 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
380 
381 	qp_grp = to_uqp_grp(qp);
382 	vf = qp_grp->vf;
383 	mutex_lock(&vf->pf->usdev_lock);
384 	usnic_dbg("\n");
385 	qp_attr->qp_state = qp_grp->state;
386 	qp_attr->cur_qp_state = qp_grp->state;
387 
388 	switch (qp_grp->ibqp.qp_type) {
389 	case IB_QPT_UD:
390 		qp_attr->qkey = 0;
391 		break;
392 	default:
393 		usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
394 		err = -EINVAL;
395 		goto err_out;
396 	}
397 
398 	mutex_unlock(&vf->pf->usdev_lock);
399 	return 0;
400 
401 err_out:
402 	mutex_unlock(&vf->pf->usdev_lock);
403 	return err;
404 }
405 
406 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
407 				union ib_gid *gid)
408 {
409 
410 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
411 	usnic_dbg("\n");
412 
413 	if (index > 1)
414 		return -EINVAL;
415 
416 	mutex_lock(&us_ibdev->usdev_lock);
417 	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
418 	usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
419 			&gid->raw[0]);
420 	mutex_unlock(&us_ibdev->usdev_lock);
421 
422 	return 0;
423 }
424 
425 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
426 				u16 *pkey)
427 {
428 	if (index > 1)
429 		return -EINVAL;
430 
431 	*pkey = 0xffff;
432 	return 0;
433 }
434 
435 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
436 					struct ib_ucontext *context,
437 					struct ib_udata *udata)
438 {
439 	struct usnic_ib_pd *pd;
440 	void *umem_pd;
441 
442 	usnic_dbg("\n");
443 
444 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
445 	if (!pd)
446 		return ERR_PTR(-ENOMEM);
447 
448 	umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
449 	if (IS_ERR_OR_NULL(umem_pd)) {
450 		kfree(pd);
451 		return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
452 	}
453 
454 	usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
455 			pd, context, ibdev->name);
456 	return &pd->ibpd;
457 }
458 
459 int usnic_ib_dealloc_pd(struct ib_pd *pd)
460 {
461 	usnic_info("freeing domain 0x%p\n", pd);
462 
463 	usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
464 	kfree(pd);
465 	return 0;
466 }
467 
468 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
469 					struct ib_qp_init_attr *init_attr,
470 					struct ib_udata *udata)
471 {
472 	int err;
473 	struct usnic_ib_dev *us_ibdev;
474 	struct usnic_ib_qp_grp *qp_grp;
475 	struct usnic_ib_ucontext *ucontext;
476 	int cq_cnt;
477 	struct usnic_vnic_res_spec res_spec;
478 	struct usnic_ib_create_qp_cmd cmd;
479 	struct usnic_transport_spec trans_spec;
480 
481 	usnic_dbg("\n");
482 
483 	ucontext = to_uucontext(pd->uobject->context);
484 	us_ibdev = to_usdev(pd->device);
485 
486 	if (init_attr->create_flags)
487 		return ERR_PTR(-EINVAL);
488 
489 	err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
490 	if (err) {
491 		usnic_err("%s: cannot copy udata for create_qp\n",
492 				us_ibdev->ib_dev.name);
493 		return ERR_PTR(-EINVAL);
494 	}
495 
496 	err = create_qp_validate_user_data(cmd);
497 	if (err) {
498 		usnic_err("%s: Failed to validate user data\n",
499 				us_ibdev->ib_dev.name);
500 		return ERR_PTR(-EINVAL);
501 	}
502 
503 	if (init_attr->qp_type != IB_QPT_UD) {
504 		usnic_err("%s asked to make a non-UD QP: %d\n",
505 				us_ibdev->ib_dev.name, init_attr->qp_type);
506 		return ERR_PTR(-EINVAL);
507 	}
508 
509 	trans_spec = cmd.spec;
510 	mutex_lock(&us_ibdev->usdev_lock);
511 	cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
512 	res_spec = min_transport_spec[trans_spec.trans_type];
513 	usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
514 	qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
515 						&trans_spec,
516 						&res_spec);
517 	if (IS_ERR_OR_NULL(qp_grp)) {
518 		err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
519 		goto out_release_mutex;
520 	}
521 
522 	err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
523 	if (err) {
524 		err = -EBUSY;
525 		goto out_release_qp_grp;
526 	}
527 
528 	qp_grp->ctx = ucontext;
529 	list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
530 	usnic_ib_log_vf(qp_grp->vf);
531 	mutex_unlock(&us_ibdev->usdev_lock);
532 	return &qp_grp->ibqp;
533 
534 out_release_qp_grp:
535 	qp_grp_destroy(qp_grp);
536 out_release_mutex:
537 	mutex_unlock(&us_ibdev->usdev_lock);
538 	return ERR_PTR(err);
539 }
540 
541 int usnic_ib_destroy_qp(struct ib_qp *qp)
542 {
543 	struct usnic_ib_qp_grp *qp_grp;
544 	struct usnic_ib_vf *vf;
545 
546 	usnic_dbg("\n");
547 
548 	qp_grp = to_uqp_grp(qp);
549 	vf = qp_grp->vf;
550 	mutex_lock(&vf->pf->usdev_lock);
551 	if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
552 		usnic_err("Failed to move qp grp %u to reset\n",
553 				qp_grp->grp_id);
554 	}
555 
556 	list_del(&qp_grp->link);
557 	qp_grp_destroy(qp_grp);
558 	mutex_unlock(&vf->pf->usdev_lock);
559 
560 	return 0;
561 }
562 
563 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
564 				int attr_mask, struct ib_udata *udata)
565 {
566 	struct usnic_ib_qp_grp *qp_grp;
567 	int status;
568 	usnic_dbg("\n");
569 
570 	qp_grp = to_uqp_grp(ibqp);
571 
572 	mutex_lock(&qp_grp->vf->pf->usdev_lock);
573 	if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
574 		/* usnic devices only have one port */
575 		status = -EINVAL;
576 		goto out_unlock;
577 	}
578 	if (attr_mask & IB_QP_STATE) {
579 		status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
580 	} else {
581 		usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
582 		status = -EINVAL;
583 	}
584 
585 out_unlock:
586 	mutex_unlock(&qp_grp->vf->pf->usdev_lock);
587 	return status;
588 }
589 
590 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
591 				 const struct ib_cq_init_attr *attr,
592 				 struct ib_ucontext *context,
593 				 struct ib_udata *udata)
594 {
595 	struct ib_cq *cq;
596 
597 	usnic_dbg("\n");
598 	if (attr->flags)
599 		return ERR_PTR(-EINVAL);
600 
601 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
602 	if (!cq)
603 		return ERR_PTR(-EBUSY);
604 
605 	return cq;
606 }
607 
608 int usnic_ib_destroy_cq(struct ib_cq *cq)
609 {
610 	usnic_dbg("\n");
611 	kfree(cq);
612 	return 0;
613 }
614 
615 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
616 					u64 virt_addr, int access_flags,
617 					struct ib_udata *udata)
618 {
619 	struct usnic_ib_mr *mr;
620 	int err;
621 
622 	usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
623 			virt_addr, length);
624 
625 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
626 	if (!mr)
627 		return ERR_PTR(-ENOMEM);
628 
629 	mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
630 					access_flags, 0);
631 	if (IS_ERR_OR_NULL(mr->umem)) {
632 		err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
633 		goto err_free;
634 	}
635 
636 	mr->ibmr.lkey = mr->ibmr.rkey = 0;
637 	return &mr->ibmr;
638 
639 err_free:
640 	kfree(mr);
641 	return ERR_PTR(err);
642 }
643 
644 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
645 {
646 	struct usnic_ib_mr *mr = to_umr(ibmr);
647 
648 	usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
649 
650 	usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
651 	kfree(mr);
652 	return 0;
653 }
654 
655 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
656 							struct ib_udata *udata)
657 {
658 	struct usnic_ib_ucontext *context;
659 	struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
660 	usnic_dbg("\n");
661 
662 	context = kmalloc(sizeof(*context), GFP_KERNEL);
663 	if (!context)
664 		return ERR_PTR(-ENOMEM);
665 
666 	INIT_LIST_HEAD(&context->qp_grp_list);
667 	mutex_lock(&us_ibdev->usdev_lock);
668 	list_add_tail(&context->link, &us_ibdev->ctx_list);
669 	mutex_unlock(&us_ibdev->usdev_lock);
670 
671 	return &context->ibucontext;
672 }
673 
674 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
675 {
676 	struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
677 	struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
678 	usnic_dbg("\n");
679 
680 	mutex_lock(&us_ibdev->usdev_lock);
681 	BUG_ON(!list_empty(&context->qp_grp_list));
682 	list_del(&context->link);
683 	mutex_unlock(&us_ibdev->usdev_lock);
684 	kfree(context);
685 	return 0;
686 }
687 
688 int usnic_ib_mmap(struct ib_ucontext *context,
689 				struct vm_area_struct *vma)
690 {
691 	struct usnic_ib_ucontext *uctx = to_ucontext(context);
692 	struct usnic_ib_dev *us_ibdev;
693 	struct usnic_ib_qp_grp *qp_grp;
694 	struct usnic_ib_vf *vf;
695 	struct vnic_dev_bar *bar;
696 	dma_addr_t bus_addr;
697 	unsigned int len;
698 	unsigned int vfid;
699 
700 	usnic_dbg("\n");
701 
702 	us_ibdev = to_usdev(context->device);
703 	vma->vm_flags |= VM_IO;
704 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
705 	vfid = vma->vm_pgoff;
706 	usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
707 			vma->vm_pgoff, PAGE_SHIFT, vfid);
708 
709 	mutex_lock(&us_ibdev->usdev_lock);
710 	list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
711 		vf = qp_grp->vf;
712 		if (usnic_vnic_get_index(vf->vnic) == vfid) {
713 			bar = usnic_vnic_get_bar(vf->vnic, 0);
714 			if ((vma->vm_end - vma->vm_start) != bar->len) {
715 				usnic_err("Bar0 Len %lu - Request map %lu\n",
716 						bar->len,
717 						vma->vm_end - vma->vm_start);
718 				mutex_unlock(&us_ibdev->usdev_lock);
719 				return -EINVAL;
720 			}
721 			bus_addr = bar->bus_addr;
722 			len = bar->len;
723 			usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
724 					&bus_addr, bar->vaddr, bar->len);
725 			mutex_unlock(&us_ibdev->usdev_lock);
726 
727 			return remap_pfn_range(vma,
728 						vma->vm_start,
729 						bus_addr >> PAGE_SHIFT,
730 						len, vma->vm_page_prot);
731 		}
732 	}
733 
734 	mutex_unlock(&us_ibdev->usdev_lock);
735 	usnic_err("No VF %u found\n", vfid);
736 	return -EINVAL;
737 }
738 
739 /* In ib callbacks section -  Start of stub funcs */
740 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
741 				 struct ib_ah_attr *ah_attr,
742 				 struct ib_udata *udata)
743 
744 {
745 	usnic_dbg("\n");
746 	return ERR_PTR(-EPERM);
747 }
748 
749 int usnic_ib_destroy_ah(struct ib_ah *ah)
750 {
751 	usnic_dbg("\n");
752 	return -EINVAL;
753 }
754 
755 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
756 				struct ib_send_wr **bad_wr)
757 {
758 	usnic_dbg("\n");
759 	return -EINVAL;
760 }
761 
762 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
763 				struct ib_recv_wr **bad_wr)
764 {
765 	usnic_dbg("\n");
766 	return -EINVAL;
767 }
768 
769 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
770 				struct ib_wc *wc)
771 {
772 	usnic_dbg("\n");
773 	return -EINVAL;
774 }
775 
776 int usnic_ib_req_notify_cq(struct ib_cq *cq,
777 					enum ib_cq_notify_flags flags)
778 {
779 	usnic_dbg("\n");
780 	return -EINVAL;
781 }
782 
783 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
784 {
785 	usnic_dbg("\n");
786 	return ERR_PTR(-ENOMEM);
787 }
788 
789 
790 /* In ib callbacks section - End of stub funcs */
791 /* End of ib callbacks section */
792