1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46 
47 #include "mlx4.h"
48 #include "fw.h"
49 
50 #define MLX4_MAC_VALID		(1ull << 63)
51 
52 struct mac_res {
53 	struct list_head list;
54 	u64 mac;
55 	u8 port;
56 };
57 
58 struct res_common {
59 	struct list_head	list;
60 	struct rb_node		node;
61 	u64		        res_id;
62 	int			owner;
63 	int			state;
64 	int			from_state;
65 	int			to_state;
66 	int			removing;
67 };
68 
69 enum {
70 	RES_ANY_BUSY = 1
71 };
72 
73 struct res_gid {
74 	struct list_head	list;
75 	u8			gid[16];
76 	enum mlx4_protocol	prot;
77 	enum mlx4_steer_type	steer;
78 };
79 
80 enum res_qp_states {
81 	RES_QP_BUSY = RES_ANY_BUSY,
82 
83 	/* QP number was allocated */
84 	RES_QP_RESERVED,
85 
86 	/* ICM memory for QP context was mapped */
87 	RES_QP_MAPPED,
88 
89 	/* QP is in hw ownership */
90 	RES_QP_HW
91 };
92 
93 struct res_qp {
94 	struct res_common	com;
95 	struct res_mtt	       *mtt;
96 	struct res_cq	       *rcq;
97 	struct res_cq	       *scq;
98 	struct res_srq	       *srq;
99 	struct list_head	mcg_list;
100 	spinlock_t		mcg_spl;
101 	int			local_qpn;
102 };
103 
104 enum res_mtt_states {
105 	RES_MTT_BUSY = RES_ANY_BUSY,
106 	RES_MTT_ALLOCATED,
107 };
108 
109 static inline const char *mtt_states_str(enum res_mtt_states state)
110 {
111 	switch (state) {
112 	case RES_MTT_BUSY: return "RES_MTT_BUSY";
113 	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114 	default: return "Unknown";
115 	}
116 }
117 
118 struct res_mtt {
119 	struct res_common	com;
120 	int			order;
121 	atomic_t		ref_count;
122 };
123 
124 enum res_mpt_states {
125 	RES_MPT_BUSY = RES_ANY_BUSY,
126 	RES_MPT_RESERVED,
127 	RES_MPT_MAPPED,
128 	RES_MPT_HW,
129 };
130 
131 struct res_mpt {
132 	struct res_common	com;
133 	struct res_mtt	       *mtt;
134 	int			key;
135 };
136 
137 enum res_eq_states {
138 	RES_EQ_BUSY = RES_ANY_BUSY,
139 	RES_EQ_RESERVED,
140 	RES_EQ_HW,
141 };
142 
143 struct res_eq {
144 	struct res_common	com;
145 	struct res_mtt	       *mtt;
146 };
147 
148 enum res_cq_states {
149 	RES_CQ_BUSY = RES_ANY_BUSY,
150 	RES_CQ_ALLOCATED,
151 	RES_CQ_HW,
152 };
153 
154 struct res_cq {
155 	struct res_common	com;
156 	struct res_mtt	       *mtt;
157 	atomic_t		ref_count;
158 };
159 
160 enum res_srq_states {
161 	RES_SRQ_BUSY = RES_ANY_BUSY,
162 	RES_SRQ_ALLOCATED,
163 	RES_SRQ_HW,
164 };
165 
166 struct res_srq {
167 	struct res_common	com;
168 	struct res_mtt	       *mtt;
169 	struct res_cq	       *cq;
170 	atomic_t		ref_count;
171 };
172 
173 enum res_counter_states {
174 	RES_COUNTER_BUSY = RES_ANY_BUSY,
175 	RES_COUNTER_ALLOCATED,
176 };
177 
178 struct res_counter {
179 	struct res_common	com;
180 	int			port;
181 };
182 
183 enum res_xrcdn_states {
184 	RES_XRCD_BUSY = RES_ANY_BUSY,
185 	RES_XRCD_ALLOCATED,
186 };
187 
188 struct res_xrcdn {
189 	struct res_common	com;
190 	int			port;
191 };
192 
193 enum res_fs_rule_states {
194 	RES_FS_RULE_BUSY = RES_ANY_BUSY,
195 	RES_FS_RULE_ALLOCATED,
196 };
197 
198 struct res_fs_rule {
199 	struct res_common	com;
200 };
201 
202 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203 {
204 	struct rb_node *node = root->rb_node;
205 
206 	while (node) {
207 		struct res_common *res = container_of(node, struct res_common,
208 						      node);
209 
210 		if (res_id < res->res_id)
211 			node = node->rb_left;
212 		else if (res_id > res->res_id)
213 			node = node->rb_right;
214 		else
215 			return res;
216 	}
217 	return NULL;
218 }
219 
220 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221 {
222 	struct rb_node **new = &(root->rb_node), *parent = NULL;
223 
224 	/* Figure out where to put new node */
225 	while (*new) {
226 		struct res_common *this = container_of(*new, struct res_common,
227 						       node);
228 
229 		parent = *new;
230 		if (res->res_id < this->res_id)
231 			new = &((*new)->rb_left);
232 		else if (res->res_id > this->res_id)
233 			new = &((*new)->rb_right);
234 		else
235 			return -EEXIST;
236 	}
237 
238 	/* Add new node and rebalance tree. */
239 	rb_link_node(&res->node, parent, new);
240 	rb_insert_color(&res->node, root);
241 
242 	return 0;
243 }
244 
245 enum qp_transition {
246 	QP_TRANS_INIT2RTR,
247 	QP_TRANS_RTR2RTS,
248 	QP_TRANS_RTS2RTS,
249 	QP_TRANS_SQERR2RTS,
250 	QP_TRANS_SQD2SQD,
251 	QP_TRANS_SQD2RTS
252 };
253 
254 /* For Debug uses */
255 static const char *ResourceType(enum mlx4_resource rt)
256 {
257 	switch (rt) {
258 	case RES_QP: return "RES_QP";
259 	case RES_CQ: return "RES_CQ";
260 	case RES_SRQ: return "RES_SRQ";
261 	case RES_MPT: return "RES_MPT";
262 	case RES_MTT: return "RES_MTT";
263 	case RES_MAC: return  "RES_MAC";
264 	case RES_EQ: return "RES_EQ";
265 	case RES_COUNTER: return "RES_COUNTER";
266 	case RES_FS_RULE: return "RES_FS_RULE";
267 	case RES_XRCD: return "RES_XRCD";
268 	default: return "Unknown resource type !!!";
269 	};
270 }
271 
272 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
273 {
274 	struct mlx4_priv *priv = mlx4_priv(dev);
275 	int i;
276 	int t;
277 
278 	priv->mfunc.master.res_tracker.slave_list =
279 		kzalloc(dev->num_slaves * sizeof(struct slave_list),
280 			GFP_KERNEL);
281 	if (!priv->mfunc.master.res_tracker.slave_list)
282 		return -ENOMEM;
283 
284 	for (i = 0 ; i < dev->num_slaves; i++) {
285 		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
286 			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
287 				       slave_list[i].res_list[t]);
288 		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
289 	}
290 
291 	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
292 		 dev->num_slaves);
293 	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
294 		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
295 
296 	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
297 	return 0 ;
298 }
299 
300 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
301 				enum mlx4_res_tracker_free_type type)
302 {
303 	struct mlx4_priv *priv = mlx4_priv(dev);
304 	int i;
305 
306 	if (priv->mfunc.master.res_tracker.slave_list) {
307 		if (type != RES_TR_FREE_STRUCTS_ONLY)
308 			for (i = 0 ; i < dev->num_slaves; i++)
309 				if (type == RES_TR_FREE_ALL ||
310 				    dev->caps.function != i)
311 					mlx4_delete_all_resources_for_slave(dev, i);
312 
313 		if (type != RES_TR_FREE_SLAVES_ONLY) {
314 			kfree(priv->mfunc.master.res_tracker.slave_list);
315 			priv->mfunc.master.res_tracker.slave_list = NULL;
316 		}
317 	}
318 }
319 
320 static void update_pkey_index(struct mlx4_dev *dev, int slave,
321 			      struct mlx4_cmd_mailbox *inbox)
322 {
323 	u8 sched = *(u8 *)(inbox->buf + 64);
324 	u8 orig_index = *(u8 *)(inbox->buf + 35);
325 	u8 new_index;
326 	struct mlx4_priv *priv = mlx4_priv(dev);
327 	int port;
328 
329 	port = (sched >> 6 & 1) + 1;
330 
331 	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332 	*(u8 *)(inbox->buf + 35) = new_index;
333 }
334 
335 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
336 		       u8 slave)
337 {
338 	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
339 	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
340 	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
341 
342 	if (MLX4_QP_ST_UD == ts)
343 		qp_ctx->pri_path.mgid_index = 0x80 | slave;
344 
345 	if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
346 		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
347 			qp_ctx->pri_path.mgid_index = slave & 0x7F;
348 		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
349 			qp_ctx->alt_path.mgid_index = slave & 0x7F;
350 	}
351 }
352 
353 static int mpt_mask(struct mlx4_dev *dev)
354 {
355 	return dev->caps.num_mpts - 1;
356 }
357 
358 static void *find_res(struct mlx4_dev *dev, int res_id,
359 		      enum mlx4_resource type)
360 {
361 	struct mlx4_priv *priv = mlx4_priv(dev);
362 
363 	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
364 				  res_id);
365 }
366 
367 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
368 		   enum mlx4_resource type,
369 		   void *res)
370 {
371 	struct res_common *r;
372 	int err = 0;
373 
374 	spin_lock_irq(mlx4_tlock(dev));
375 	r = find_res(dev, res_id, type);
376 	if (!r) {
377 		err = -ENONET;
378 		goto exit;
379 	}
380 
381 	if (r->state == RES_ANY_BUSY) {
382 		err = -EBUSY;
383 		goto exit;
384 	}
385 
386 	if (r->owner != slave) {
387 		err = -EPERM;
388 		goto exit;
389 	}
390 
391 	r->from_state = r->state;
392 	r->state = RES_ANY_BUSY;
393 
394 	if (res)
395 		*((struct res_common **)res) = r;
396 
397 exit:
398 	spin_unlock_irq(mlx4_tlock(dev));
399 	return err;
400 }
401 
402 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
403 				    enum mlx4_resource type,
404 				    u64 res_id, int *slave)
405 {
406 
407 	struct res_common *r;
408 	int err = -ENOENT;
409 	int id = res_id;
410 
411 	if (type == RES_QP)
412 		id &= 0x7fffff;
413 	spin_lock(mlx4_tlock(dev));
414 
415 	r = find_res(dev, id, type);
416 	if (r) {
417 		*slave = r->owner;
418 		err = 0;
419 	}
420 	spin_unlock(mlx4_tlock(dev));
421 
422 	return err;
423 }
424 
425 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
426 		    enum mlx4_resource type)
427 {
428 	struct res_common *r;
429 
430 	spin_lock_irq(mlx4_tlock(dev));
431 	r = find_res(dev, res_id, type);
432 	if (r)
433 		r->state = r->from_state;
434 	spin_unlock_irq(mlx4_tlock(dev));
435 }
436 
437 static struct res_common *alloc_qp_tr(int id)
438 {
439 	struct res_qp *ret;
440 
441 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
442 	if (!ret)
443 		return NULL;
444 
445 	ret->com.res_id = id;
446 	ret->com.state = RES_QP_RESERVED;
447 	ret->local_qpn = id;
448 	INIT_LIST_HEAD(&ret->mcg_list);
449 	spin_lock_init(&ret->mcg_spl);
450 
451 	return &ret->com;
452 }
453 
454 static struct res_common *alloc_mtt_tr(int id, int order)
455 {
456 	struct res_mtt *ret;
457 
458 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
459 	if (!ret)
460 		return NULL;
461 
462 	ret->com.res_id = id;
463 	ret->order = order;
464 	ret->com.state = RES_MTT_ALLOCATED;
465 	atomic_set(&ret->ref_count, 0);
466 
467 	return &ret->com;
468 }
469 
470 static struct res_common *alloc_mpt_tr(int id, int key)
471 {
472 	struct res_mpt *ret;
473 
474 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
475 	if (!ret)
476 		return NULL;
477 
478 	ret->com.res_id = id;
479 	ret->com.state = RES_MPT_RESERVED;
480 	ret->key = key;
481 
482 	return &ret->com;
483 }
484 
485 static struct res_common *alloc_eq_tr(int id)
486 {
487 	struct res_eq *ret;
488 
489 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
490 	if (!ret)
491 		return NULL;
492 
493 	ret->com.res_id = id;
494 	ret->com.state = RES_EQ_RESERVED;
495 
496 	return &ret->com;
497 }
498 
499 static struct res_common *alloc_cq_tr(int id)
500 {
501 	struct res_cq *ret;
502 
503 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
504 	if (!ret)
505 		return NULL;
506 
507 	ret->com.res_id = id;
508 	ret->com.state = RES_CQ_ALLOCATED;
509 	atomic_set(&ret->ref_count, 0);
510 
511 	return &ret->com;
512 }
513 
514 static struct res_common *alloc_srq_tr(int id)
515 {
516 	struct res_srq *ret;
517 
518 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
519 	if (!ret)
520 		return NULL;
521 
522 	ret->com.res_id = id;
523 	ret->com.state = RES_SRQ_ALLOCATED;
524 	atomic_set(&ret->ref_count, 0);
525 
526 	return &ret->com;
527 }
528 
529 static struct res_common *alloc_counter_tr(int id)
530 {
531 	struct res_counter *ret;
532 
533 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
534 	if (!ret)
535 		return NULL;
536 
537 	ret->com.res_id = id;
538 	ret->com.state = RES_COUNTER_ALLOCATED;
539 
540 	return &ret->com;
541 }
542 
543 static struct res_common *alloc_xrcdn_tr(int id)
544 {
545 	struct res_xrcdn *ret;
546 
547 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
548 	if (!ret)
549 		return NULL;
550 
551 	ret->com.res_id = id;
552 	ret->com.state = RES_XRCD_ALLOCATED;
553 
554 	return &ret->com;
555 }
556 
557 static struct res_common *alloc_fs_rule_tr(u64 id)
558 {
559 	struct res_fs_rule *ret;
560 
561 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
562 	if (!ret)
563 		return NULL;
564 
565 	ret->com.res_id = id;
566 	ret->com.state = RES_FS_RULE_ALLOCATED;
567 
568 	return &ret->com;
569 }
570 
571 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
572 				   int extra)
573 {
574 	struct res_common *ret;
575 
576 	switch (type) {
577 	case RES_QP:
578 		ret = alloc_qp_tr(id);
579 		break;
580 	case RES_MPT:
581 		ret = alloc_mpt_tr(id, extra);
582 		break;
583 	case RES_MTT:
584 		ret = alloc_mtt_tr(id, extra);
585 		break;
586 	case RES_EQ:
587 		ret = alloc_eq_tr(id);
588 		break;
589 	case RES_CQ:
590 		ret = alloc_cq_tr(id);
591 		break;
592 	case RES_SRQ:
593 		ret = alloc_srq_tr(id);
594 		break;
595 	case RES_MAC:
596 		printk(KERN_ERR "implementation missing\n");
597 		return NULL;
598 	case RES_COUNTER:
599 		ret = alloc_counter_tr(id);
600 		break;
601 	case RES_XRCD:
602 		ret = alloc_xrcdn_tr(id);
603 		break;
604 	case RES_FS_RULE:
605 		ret = alloc_fs_rule_tr(id);
606 		break;
607 	default:
608 		return NULL;
609 	}
610 	if (ret)
611 		ret->owner = slave;
612 
613 	return ret;
614 }
615 
616 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
617 			 enum mlx4_resource type, int extra)
618 {
619 	int i;
620 	int err;
621 	struct mlx4_priv *priv = mlx4_priv(dev);
622 	struct res_common **res_arr;
623 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
624 	struct rb_root *root = &tracker->res_tree[type];
625 
626 	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
627 	if (!res_arr)
628 		return -ENOMEM;
629 
630 	for (i = 0; i < count; ++i) {
631 		res_arr[i] = alloc_tr(base + i, type, slave, extra);
632 		if (!res_arr[i]) {
633 			for (--i; i >= 0; --i)
634 				kfree(res_arr[i]);
635 
636 			kfree(res_arr);
637 			return -ENOMEM;
638 		}
639 	}
640 
641 	spin_lock_irq(mlx4_tlock(dev));
642 	for (i = 0; i < count; ++i) {
643 		if (find_res(dev, base + i, type)) {
644 			err = -EEXIST;
645 			goto undo;
646 		}
647 		err = res_tracker_insert(root, res_arr[i]);
648 		if (err)
649 			goto undo;
650 		list_add_tail(&res_arr[i]->list,
651 			      &tracker->slave_list[slave].res_list[type]);
652 	}
653 	spin_unlock_irq(mlx4_tlock(dev));
654 	kfree(res_arr);
655 
656 	return 0;
657 
658 undo:
659 	for (--i; i >= base; --i)
660 		rb_erase(&res_arr[i]->node, root);
661 
662 	spin_unlock_irq(mlx4_tlock(dev));
663 
664 	for (i = 0; i < count; ++i)
665 		kfree(res_arr[i]);
666 
667 	kfree(res_arr);
668 
669 	return err;
670 }
671 
672 static int remove_qp_ok(struct res_qp *res)
673 {
674 	if (res->com.state == RES_QP_BUSY)
675 		return -EBUSY;
676 	else if (res->com.state != RES_QP_RESERVED)
677 		return -EPERM;
678 
679 	return 0;
680 }
681 
682 static int remove_mtt_ok(struct res_mtt *res, int order)
683 {
684 	if (res->com.state == RES_MTT_BUSY ||
685 	    atomic_read(&res->ref_count)) {
686 		printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
687 		       __func__, __LINE__,
688 		       mtt_states_str(res->com.state),
689 		       atomic_read(&res->ref_count));
690 		return -EBUSY;
691 	} else if (res->com.state != RES_MTT_ALLOCATED)
692 		return -EPERM;
693 	else if (res->order != order)
694 		return -EINVAL;
695 
696 	return 0;
697 }
698 
699 static int remove_mpt_ok(struct res_mpt *res)
700 {
701 	if (res->com.state == RES_MPT_BUSY)
702 		return -EBUSY;
703 	else if (res->com.state != RES_MPT_RESERVED)
704 		return -EPERM;
705 
706 	return 0;
707 }
708 
709 static int remove_eq_ok(struct res_eq *res)
710 {
711 	if (res->com.state == RES_MPT_BUSY)
712 		return -EBUSY;
713 	else if (res->com.state != RES_MPT_RESERVED)
714 		return -EPERM;
715 
716 	return 0;
717 }
718 
719 static int remove_counter_ok(struct res_counter *res)
720 {
721 	if (res->com.state == RES_COUNTER_BUSY)
722 		return -EBUSY;
723 	else if (res->com.state != RES_COUNTER_ALLOCATED)
724 		return -EPERM;
725 
726 	return 0;
727 }
728 
729 static int remove_xrcdn_ok(struct res_xrcdn *res)
730 {
731 	if (res->com.state == RES_XRCD_BUSY)
732 		return -EBUSY;
733 	else if (res->com.state != RES_XRCD_ALLOCATED)
734 		return -EPERM;
735 
736 	return 0;
737 }
738 
739 static int remove_fs_rule_ok(struct res_fs_rule *res)
740 {
741 	if (res->com.state == RES_FS_RULE_BUSY)
742 		return -EBUSY;
743 	else if (res->com.state != RES_FS_RULE_ALLOCATED)
744 		return -EPERM;
745 
746 	return 0;
747 }
748 
749 static int remove_cq_ok(struct res_cq *res)
750 {
751 	if (res->com.state == RES_CQ_BUSY)
752 		return -EBUSY;
753 	else if (res->com.state != RES_CQ_ALLOCATED)
754 		return -EPERM;
755 
756 	return 0;
757 }
758 
759 static int remove_srq_ok(struct res_srq *res)
760 {
761 	if (res->com.state == RES_SRQ_BUSY)
762 		return -EBUSY;
763 	else if (res->com.state != RES_SRQ_ALLOCATED)
764 		return -EPERM;
765 
766 	return 0;
767 }
768 
769 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
770 {
771 	switch (type) {
772 	case RES_QP:
773 		return remove_qp_ok((struct res_qp *)res);
774 	case RES_CQ:
775 		return remove_cq_ok((struct res_cq *)res);
776 	case RES_SRQ:
777 		return remove_srq_ok((struct res_srq *)res);
778 	case RES_MPT:
779 		return remove_mpt_ok((struct res_mpt *)res);
780 	case RES_MTT:
781 		return remove_mtt_ok((struct res_mtt *)res, extra);
782 	case RES_MAC:
783 		return -ENOSYS;
784 	case RES_EQ:
785 		return remove_eq_ok((struct res_eq *)res);
786 	case RES_COUNTER:
787 		return remove_counter_ok((struct res_counter *)res);
788 	case RES_XRCD:
789 		return remove_xrcdn_ok((struct res_xrcdn *)res);
790 	case RES_FS_RULE:
791 		return remove_fs_rule_ok((struct res_fs_rule *)res);
792 	default:
793 		return -EINVAL;
794 	}
795 }
796 
797 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
798 			 enum mlx4_resource type, int extra)
799 {
800 	u64 i;
801 	int err;
802 	struct mlx4_priv *priv = mlx4_priv(dev);
803 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
804 	struct res_common *r;
805 
806 	spin_lock_irq(mlx4_tlock(dev));
807 	for (i = base; i < base + count; ++i) {
808 		r = res_tracker_lookup(&tracker->res_tree[type], i);
809 		if (!r) {
810 			err = -ENOENT;
811 			goto out;
812 		}
813 		if (r->owner != slave) {
814 			err = -EPERM;
815 			goto out;
816 		}
817 		err = remove_ok(r, type, extra);
818 		if (err)
819 			goto out;
820 	}
821 
822 	for (i = base; i < base + count; ++i) {
823 		r = res_tracker_lookup(&tracker->res_tree[type], i);
824 		rb_erase(&r->node, &tracker->res_tree[type]);
825 		list_del(&r->list);
826 		kfree(r);
827 	}
828 	err = 0;
829 
830 out:
831 	spin_unlock_irq(mlx4_tlock(dev));
832 
833 	return err;
834 }
835 
836 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
837 				enum res_qp_states state, struct res_qp **qp,
838 				int alloc)
839 {
840 	struct mlx4_priv *priv = mlx4_priv(dev);
841 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
842 	struct res_qp *r;
843 	int err = 0;
844 
845 	spin_lock_irq(mlx4_tlock(dev));
846 	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
847 	if (!r)
848 		err = -ENOENT;
849 	else if (r->com.owner != slave)
850 		err = -EPERM;
851 	else {
852 		switch (state) {
853 		case RES_QP_BUSY:
854 			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
855 				 __func__, r->com.res_id);
856 			err = -EBUSY;
857 			break;
858 
859 		case RES_QP_RESERVED:
860 			if (r->com.state == RES_QP_MAPPED && !alloc)
861 				break;
862 
863 			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
864 			err = -EINVAL;
865 			break;
866 
867 		case RES_QP_MAPPED:
868 			if ((r->com.state == RES_QP_RESERVED && alloc) ||
869 			    r->com.state == RES_QP_HW)
870 				break;
871 			else {
872 				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
873 					  r->com.res_id);
874 				err = -EINVAL;
875 			}
876 
877 			break;
878 
879 		case RES_QP_HW:
880 			if (r->com.state != RES_QP_MAPPED)
881 				err = -EINVAL;
882 			break;
883 		default:
884 			err = -EINVAL;
885 		}
886 
887 		if (!err) {
888 			r->com.from_state = r->com.state;
889 			r->com.to_state = state;
890 			r->com.state = RES_QP_BUSY;
891 			if (qp)
892 				*qp = r;
893 		}
894 	}
895 
896 	spin_unlock_irq(mlx4_tlock(dev));
897 
898 	return err;
899 }
900 
901 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
902 				enum res_mpt_states state, struct res_mpt **mpt)
903 {
904 	struct mlx4_priv *priv = mlx4_priv(dev);
905 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
906 	struct res_mpt *r;
907 	int err = 0;
908 
909 	spin_lock_irq(mlx4_tlock(dev));
910 	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
911 	if (!r)
912 		err = -ENOENT;
913 	else if (r->com.owner != slave)
914 		err = -EPERM;
915 	else {
916 		switch (state) {
917 		case RES_MPT_BUSY:
918 			err = -EINVAL;
919 			break;
920 
921 		case RES_MPT_RESERVED:
922 			if (r->com.state != RES_MPT_MAPPED)
923 				err = -EINVAL;
924 			break;
925 
926 		case RES_MPT_MAPPED:
927 			if (r->com.state != RES_MPT_RESERVED &&
928 			    r->com.state != RES_MPT_HW)
929 				err = -EINVAL;
930 			break;
931 
932 		case RES_MPT_HW:
933 			if (r->com.state != RES_MPT_MAPPED)
934 				err = -EINVAL;
935 			break;
936 		default:
937 			err = -EINVAL;
938 		}
939 
940 		if (!err) {
941 			r->com.from_state = r->com.state;
942 			r->com.to_state = state;
943 			r->com.state = RES_MPT_BUSY;
944 			if (mpt)
945 				*mpt = r;
946 		}
947 	}
948 
949 	spin_unlock_irq(mlx4_tlock(dev));
950 
951 	return err;
952 }
953 
954 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
955 				enum res_eq_states state, struct res_eq **eq)
956 {
957 	struct mlx4_priv *priv = mlx4_priv(dev);
958 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
959 	struct res_eq *r;
960 	int err = 0;
961 
962 	spin_lock_irq(mlx4_tlock(dev));
963 	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
964 	if (!r)
965 		err = -ENOENT;
966 	else if (r->com.owner != slave)
967 		err = -EPERM;
968 	else {
969 		switch (state) {
970 		case RES_EQ_BUSY:
971 			err = -EINVAL;
972 			break;
973 
974 		case RES_EQ_RESERVED:
975 			if (r->com.state != RES_EQ_HW)
976 				err = -EINVAL;
977 			break;
978 
979 		case RES_EQ_HW:
980 			if (r->com.state != RES_EQ_RESERVED)
981 				err = -EINVAL;
982 			break;
983 
984 		default:
985 			err = -EINVAL;
986 		}
987 
988 		if (!err) {
989 			r->com.from_state = r->com.state;
990 			r->com.to_state = state;
991 			r->com.state = RES_EQ_BUSY;
992 			if (eq)
993 				*eq = r;
994 		}
995 	}
996 
997 	spin_unlock_irq(mlx4_tlock(dev));
998 
999 	return err;
1000 }
1001 
1002 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1003 				enum res_cq_states state, struct res_cq **cq)
1004 {
1005 	struct mlx4_priv *priv = mlx4_priv(dev);
1006 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1007 	struct res_cq *r;
1008 	int err;
1009 
1010 	spin_lock_irq(mlx4_tlock(dev));
1011 	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1012 	if (!r)
1013 		err = -ENOENT;
1014 	else if (r->com.owner != slave)
1015 		err = -EPERM;
1016 	else {
1017 		switch (state) {
1018 		case RES_CQ_BUSY:
1019 			err = -EBUSY;
1020 			break;
1021 
1022 		case RES_CQ_ALLOCATED:
1023 			if (r->com.state != RES_CQ_HW)
1024 				err = -EINVAL;
1025 			else if (atomic_read(&r->ref_count))
1026 				err = -EBUSY;
1027 			else
1028 				err = 0;
1029 			break;
1030 
1031 		case RES_CQ_HW:
1032 			if (r->com.state != RES_CQ_ALLOCATED)
1033 				err = -EINVAL;
1034 			else
1035 				err = 0;
1036 			break;
1037 
1038 		default:
1039 			err = -EINVAL;
1040 		}
1041 
1042 		if (!err) {
1043 			r->com.from_state = r->com.state;
1044 			r->com.to_state = state;
1045 			r->com.state = RES_CQ_BUSY;
1046 			if (cq)
1047 				*cq = r;
1048 		}
1049 	}
1050 
1051 	spin_unlock_irq(mlx4_tlock(dev));
1052 
1053 	return err;
1054 }
1055 
1056 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1057 				 enum res_cq_states state, struct res_srq **srq)
1058 {
1059 	struct mlx4_priv *priv = mlx4_priv(dev);
1060 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1061 	struct res_srq *r;
1062 	int err = 0;
1063 
1064 	spin_lock_irq(mlx4_tlock(dev));
1065 	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1066 	if (!r)
1067 		err = -ENOENT;
1068 	else if (r->com.owner != slave)
1069 		err = -EPERM;
1070 	else {
1071 		switch (state) {
1072 		case RES_SRQ_BUSY:
1073 			err = -EINVAL;
1074 			break;
1075 
1076 		case RES_SRQ_ALLOCATED:
1077 			if (r->com.state != RES_SRQ_HW)
1078 				err = -EINVAL;
1079 			else if (atomic_read(&r->ref_count))
1080 				err = -EBUSY;
1081 			break;
1082 
1083 		case RES_SRQ_HW:
1084 			if (r->com.state != RES_SRQ_ALLOCATED)
1085 				err = -EINVAL;
1086 			break;
1087 
1088 		default:
1089 			err = -EINVAL;
1090 		}
1091 
1092 		if (!err) {
1093 			r->com.from_state = r->com.state;
1094 			r->com.to_state = state;
1095 			r->com.state = RES_SRQ_BUSY;
1096 			if (srq)
1097 				*srq = r;
1098 		}
1099 	}
1100 
1101 	spin_unlock_irq(mlx4_tlock(dev));
1102 
1103 	return err;
1104 }
1105 
1106 static void res_abort_move(struct mlx4_dev *dev, int slave,
1107 			   enum mlx4_resource type, int id)
1108 {
1109 	struct mlx4_priv *priv = mlx4_priv(dev);
1110 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1111 	struct res_common *r;
1112 
1113 	spin_lock_irq(mlx4_tlock(dev));
1114 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1115 	if (r && (r->owner == slave))
1116 		r->state = r->from_state;
1117 	spin_unlock_irq(mlx4_tlock(dev));
1118 }
1119 
1120 static void res_end_move(struct mlx4_dev *dev, int slave,
1121 			 enum mlx4_resource type, int id)
1122 {
1123 	struct mlx4_priv *priv = mlx4_priv(dev);
1124 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1125 	struct res_common *r;
1126 
1127 	spin_lock_irq(mlx4_tlock(dev));
1128 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1129 	if (r && (r->owner == slave))
1130 		r->state = r->to_state;
1131 	spin_unlock_irq(mlx4_tlock(dev));
1132 }
1133 
1134 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1135 {
1136 	return mlx4_is_qp_reserved(dev, qpn) &&
1137 		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1138 }
1139 
1140 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1141 {
1142 	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1143 }
1144 
1145 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1146 			u64 in_param, u64 *out_param)
1147 {
1148 	int err;
1149 	int count;
1150 	int align;
1151 	int base;
1152 	int qpn;
1153 
1154 	switch (op) {
1155 	case RES_OP_RESERVE:
1156 		count = get_param_l(&in_param);
1157 		align = get_param_h(&in_param);
1158 		err = __mlx4_qp_reserve_range(dev, count, align, &base);
1159 		if (err)
1160 			return err;
1161 
1162 		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1163 		if (err) {
1164 			__mlx4_qp_release_range(dev, base, count);
1165 			return err;
1166 		}
1167 		set_param_l(out_param, base);
1168 		break;
1169 	case RES_OP_MAP_ICM:
1170 		qpn = get_param_l(&in_param) & 0x7fffff;
1171 		if (valid_reserved(dev, slave, qpn)) {
1172 			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1173 			if (err)
1174 				return err;
1175 		}
1176 
1177 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1178 					   NULL, 1);
1179 		if (err)
1180 			return err;
1181 
1182 		if (!fw_reserved(dev, qpn)) {
1183 			err = __mlx4_qp_alloc_icm(dev, qpn);
1184 			if (err) {
1185 				res_abort_move(dev, slave, RES_QP, qpn);
1186 				return err;
1187 			}
1188 		}
1189 
1190 		res_end_move(dev, slave, RES_QP, qpn);
1191 		break;
1192 
1193 	default:
1194 		err = -EINVAL;
1195 		break;
1196 	}
1197 	return err;
1198 }
1199 
1200 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1201 			 u64 in_param, u64 *out_param)
1202 {
1203 	int err = -EINVAL;
1204 	int base;
1205 	int order;
1206 
1207 	if (op != RES_OP_RESERVE_AND_MAP)
1208 		return err;
1209 
1210 	order = get_param_l(&in_param);
1211 	base = __mlx4_alloc_mtt_range(dev, order);
1212 	if (base == -1)
1213 		return -ENOMEM;
1214 
1215 	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1216 	if (err)
1217 		__mlx4_free_mtt_range(dev, base, order);
1218 	else
1219 		set_param_l(out_param, base);
1220 
1221 	return err;
1222 }
1223 
1224 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1225 			 u64 in_param, u64 *out_param)
1226 {
1227 	int err = -EINVAL;
1228 	int index;
1229 	int id;
1230 	struct res_mpt *mpt;
1231 
1232 	switch (op) {
1233 	case RES_OP_RESERVE:
1234 		index = __mlx4_mr_reserve(dev);
1235 		if (index == -1)
1236 			break;
1237 		id = index & mpt_mask(dev);
1238 
1239 		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1240 		if (err) {
1241 			__mlx4_mr_release(dev, index);
1242 			break;
1243 		}
1244 		set_param_l(out_param, index);
1245 		break;
1246 	case RES_OP_MAP_ICM:
1247 		index = get_param_l(&in_param);
1248 		id = index & mpt_mask(dev);
1249 		err = mr_res_start_move_to(dev, slave, id,
1250 					   RES_MPT_MAPPED, &mpt);
1251 		if (err)
1252 			return err;
1253 
1254 		err = __mlx4_mr_alloc_icm(dev, mpt->key);
1255 		if (err) {
1256 			res_abort_move(dev, slave, RES_MPT, id);
1257 			return err;
1258 		}
1259 
1260 		res_end_move(dev, slave, RES_MPT, id);
1261 		break;
1262 	}
1263 	return err;
1264 }
1265 
1266 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1267 			u64 in_param, u64 *out_param)
1268 {
1269 	int cqn;
1270 	int err;
1271 
1272 	switch (op) {
1273 	case RES_OP_RESERVE_AND_MAP:
1274 		err = __mlx4_cq_alloc_icm(dev, &cqn);
1275 		if (err)
1276 			break;
1277 
1278 		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1279 		if (err) {
1280 			__mlx4_cq_free_icm(dev, cqn);
1281 			break;
1282 		}
1283 
1284 		set_param_l(out_param, cqn);
1285 		break;
1286 
1287 	default:
1288 		err = -EINVAL;
1289 	}
1290 
1291 	return err;
1292 }
1293 
1294 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1295 			 u64 in_param, u64 *out_param)
1296 {
1297 	int srqn;
1298 	int err;
1299 
1300 	switch (op) {
1301 	case RES_OP_RESERVE_AND_MAP:
1302 		err = __mlx4_srq_alloc_icm(dev, &srqn);
1303 		if (err)
1304 			break;
1305 
1306 		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1307 		if (err) {
1308 			__mlx4_srq_free_icm(dev, srqn);
1309 			break;
1310 		}
1311 
1312 		set_param_l(out_param, srqn);
1313 		break;
1314 
1315 	default:
1316 		err = -EINVAL;
1317 	}
1318 
1319 	return err;
1320 }
1321 
1322 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1323 {
1324 	struct mlx4_priv *priv = mlx4_priv(dev);
1325 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1326 	struct mac_res *res;
1327 
1328 	res = kzalloc(sizeof *res, GFP_KERNEL);
1329 	if (!res)
1330 		return -ENOMEM;
1331 	res->mac = mac;
1332 	res->port = (u8) port;
1333 	list_add_tail(&res->list,
1334 		      &tracker->slave_list[slave].res_list[RES_MAC]);
1335 	return 0;
1336 }
1337 
1338 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1339 			       int port)
1340 {
1341 	struct mlx4_priv *priv = mlx4_priv(dev);
1342 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1343 	struct list_head *mac_list =
1344 		&tracker->slave_list[slave].res_list[RES_MAC];
1345 	struct mac_res *res, *tmp;
1346 
1347 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1348 		if (res->mac == mac && res->port == (u8) port) {
1349 			list_del(&res->list);
1350 			kfree(res);
1351 			break;
1352 		}
1353 	}
1354 }
1355 
1356 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1357 {
1358 	struct mlx4_priv *priv = mlx4_priv(dev);
1359 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1360 	struct list_head *mac_list =
1361 		&tracker->slave_list[slave].res_list[RES_MAC];
1362 	struct mac_res *res, *tmp;
1363 
1364 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1365 		list_del(&res->list);
1366 		__mlx4_unregister_mac(dev, res->port, res->mac);
1367 		kfree(res);
1368 	}
1369 }
1370 
1371 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1372 			 u64 in_param, u64 *out_param)
1373 {
1374 	int err = -EINVAL;
1375 	int port;
1376 	u64 mac;
1377 
1378 	if (op != RES_OP_RESERVE_AND_MAP)
1379 		return err;
1380 
1381 	port = get_param_l(out_param);
1382 	mac = in_param;
1383 
1384 	err = __mlx4_register_mac(dev, port, mac);
1385 	if (err >= 0) {
1386 		set_param_l(out_param, err);
1387 		err = 0;
1388 	}
1389 
1390 	if (!err) {
1391 		err = mac_add_to_slave(dev, slave, mac, port);
1392 		if (err)
1393 			__mlx4_unregister_mac(dev, port, mac);
1394 	}
1395 	return err;
1396 }
1397 
1398 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1399 			 u64 in_param, u64 *out_param)
1400 {
1401 	return 0;
1402 }
1403 
1404 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1405 			     u64 in_param, u64 *out_param)
1406 {
1407 	u32 index;
1408 	int err;
1409 
1410 	if (op != RES_OP_RESERVE)
1411 		return -EINVAL;
1412 
1413 	err = __mlx4_counter_alloc(dev, &index);
1414 	if (err)
1415 		return err;
1416 
1417 	err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1418 	if (err)
1419 		__mlx4_counter_free(dev, index);
1420 	else
1421 		set_param_l(out_param, index);
1422 
1423 	return err;
1424 }
1425 
1426 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1427 			   u64 in_param, u64 *out_param)
1428 {
1429 	u32 xrcdn;
1430 	int err;
1431 
1432 	if (op != RES_OP_RESERVE)
1433 		return -EINVAL;
1434 
1435 	err = __mlx4_xrcd_alloc(dev, &xrcdn);
1436 	if (err)
1437 		return err;
1438 
1439 	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1440 	if (err)
1441 		__mlx4_xrcd_free(dev, xrcdn);
1442 	else
1443 		set_param_l(out_param, xrcdn);
1444 
1445 	return err;
1446 }
1447 
1448 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1449 			   struct mlx4_vhcr *vhcr,
1450 			   struct mlx4_cmd_mailbox *inbox,
1451 			   struct mlx4_cmd_mailbox *outbox,
1452 			   struct mlx4_cmd_info *cmd)
1453 {
1454 	int err;
1455 	int alop = vhcr->op_modifier;
1456 
1457 	switch (vhcr->in_modifier) {
1458 	case RES_QP:
1459 		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1460 				   vhcr->in_param, &vhcr->out_param);
1461 		break;
1462 
1463 	case RES_MTT:
1464 		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1465 				    vhcr->in_param, &vhcr->out_param);
1466 		break;
1467 
1468 	case RES_MPT:
1469 		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1470 				    vhcr->in_param, &vhcr->out_param);
1471 		break;
1472 
1473 	case RES_CQ:
1474 		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1475 				   vhcr->in_param, &vhcr->out_param);
1476 		break;
1477 
1478 	case RES_SRQ:
1479 		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1480 				    vhcr->in_param, &vhcr->out_param);
1481 		break;
1482 
1483 	case RES_MAC:
1484 		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1485 				    vhcr->in_param, &vhcr->out_param);
1486 		break;
1487 
1488 	case RES_VLAN:
1489 		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1490 				    vhcr->in_param, &vhcr->out_param);
1491 		break;
1492 
1493 	case RES_COUNTER:
1494 		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1495 					vhcr->in_param, &vhcr->out_param);
1496 		break;
1497 
1498 	case RES_XRCD:
1499 		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1500 				      vhcr->in_param, &vhcr->out_param);
1501 		break;
1502 
1503 	default:
1504 		err = -EINVAL;
1505 		break;
1506 	}
1507 
1508 	return err;
1509 }
1510 
1511 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1512 		       u64 in_param)
1513 {
1514 	int err;
1515 	int count;
1516 	int base;
1517 	int qpn;
1518 
1519 	switch (op) {
1520 	case RES_OP_RESERVE:
1521 		base = get_param_l(&in_param) & 0x7fffff;
1522 		count = get_param_h(&in_param);
1523 		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1524 		if (err)
1525 			break;
1526 		__mlx4_qp_release_range(dev, base, count);
1527 		break;
1528 	case RES_OP_MAP_ICM:
1529 		qpn = get_param_l(&in_param) & 0x7fffff;
1530 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1531 					   NULL, 0);
1532 		if (err)
1533 			return err;
1534 
1535 		if (!fw_reserved(dev, qpn))
1536 			__mlx4_qp_free_icm(dev, qpn);
1537 
1538 		res_end_move(dev, slave, RES_QP, qpn);
1539 
1540 		if (valid_reserved(dev, slave, qpn))
1541 			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1542 		break;
1543 	default:
1544 		err = -EINVAL;
1545 		break;
1546 	}
1547 	return err;
1548 }
1549 
1550 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1551 			u64 in_param, u64 *out_param)
1552 {
1553 	int err = -EINVAL;
1554 	int base;
1555 	int order;
1556 
1557 	if (op != RES_OP_RESERVE_AND_MAP)
1558 		return err;
1559 
1560 	base = get_param_l(&in_param);
1561 	order = get_param_h(&in_param);
1562 	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1563 	if (!err)
1564 		__mlx4_free_mtt_range(dev, base, order);
1565 	return err;
1566 }
1567 
1568 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1569 			u64 in_param)
1570 {
1571 	int err = -EINVAL;
1572 	int index;
1573 	int id;
1574 	struct res_mpt *mpt;
1575 
1576 	switch (op) {
1577 	case RES_OP_RESERVE:
1578 		index = get_param_l(&in_param);
1579 		id = index & mpt_mask(dev);
1580 		err = get_res(dev, slave, id, RES_MPT, &mpt);
1581 		if (err)
1582 			break;
1583 		index = mpt->key;
1584 		put_res(dev, slave, id, RES_MPT);
1585 
1586 		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1587 		if (err)
1588 			break;
1589 		__mlx4_mr_release(dev, index);
1590 		break;
1591 	case RES_OP_MAP_ICM:
1592 			index = get_param_l(&in_param);
1593 			id = index & mpt_mask(dev);
1594 			err = mr_res_start_move_to(dev, slave, id,
1595 						   RES_MPT_RESERVED, &mpt);
1596 			if (err)
1597 				return err;
1598 
1599 			__mlx4_mr_free_icm(dev, mpt->key);
1600 			res_end_move(dev, slave, RES_MPT, id);
1601 			return err;
1602 		break;
1603 	default:
1604 		err = -EINVAL;
1605 		break;
1606 	}
1607 	return err;
1608 }
1609 
1610 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1611 		       u64 in_param, u64 *out_param)
1612 {
1613 	int cqn;
1614 	int err;
1615 
1616 	switch (op) {
1617 	case RES_OP_RESERVE_AND_MAP:
1618 		cqn = get_param_l(&in_param);
1619 		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1620 		if (err)
1621 			break;
1622 
1623 		__mlx4_cq_free_icm(dev, cqn);
1624 		break;
1625 
1626 	default:
1627 		err = -EINVAL;
1628 		break;
1629 	}
1630 
1631 	return err;
1632 }
1633 
1634 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1635 			u64 in_param, u64 *out_param)
1636 {
1637 	int srqn;
1638 	int err;
1639 
1640 	switch (op) {
1641 	case RES_OP_RESERVE_AND_MAP:
1642 		srqn = get_param_l(&in_param);
1643 		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1644 		if (err)
1645 			break;
1646 
1647 		__mlx4_srq_free_icm(dev, srqn);
1648 		break;
1649 
1650 	default:
1651 		err = -EINVAL;
1652 		break;
1653 	}
1654 
1655 	return err;
1656 }
1657 
1658 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1659 			    u64 in_param, u64 *out_param)
1660 {
1661 	int port;
1662 	int err = 0;
1663 
1664 	switch (op) {
1665 	case RES_OP_RESERVE_AND_MAP:
1666 		port = get_param_l(out_param);
1667 		mac_del_from_slave(dev, slave, in_param, port);
1668 		__mlx4_unregister_mac(dev, port, in_param);
1669 		break;
1670 	default:
1671 		err = -EINVAL;
1672 		break;
1673 	}
1674 
1675 	return err;
1676 
1677 }
1678 
1679 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1680 			    u64 in_param, u64 *out_param)
1681 {
1682 	return 0;
1683 }
1684 
1685 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1686 			    u64 in_param, u64 *out_param)
1687 {
1688 	int index;
1689 	int err;
1690 
1691 	if (op != RES_OP_RESERVE)
1692 		return -EINVAL;
1693 
1694 	index = get_param_l(&in_param);
1695 	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1696 	if (err)
1697 		return err;
1698 
1699 	__mlx4_counter_free(dev, index);
1700 
1701 	return err;
1702 }
1703 
1704 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1705 			  u64 in_param, u64 *out_param)
1706 {
1707 	int xrcdn;
1708 	int err;
1709 
1710 	if (op != RES_OP_RESERVE)
1711 		return -EINVAL;
1712 
1713 	xrcdn = get_param_l(&in_param);
1714 	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1715 	if (err)
1716 		return err;
1717 
1718 	__mlx4_xrcd_free(dev, xrcdn);
1719 
1720 	return err;
1721 }
1722 
1723 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1724 			  struct mlx4_vhcr *vhcr,
1725 			  struct mlx4_cmd_mailbox *inbox,
1726 			  struct mlx4_cmd_mailbox *outbox,
1727 			  struct mlx4_cmd_info *cmd)
1728 {
1729 	int err = -EINVAL;
1730 	int alop = vhcr->op_modifier;
1731 
1732 	switch (vhcr->in_modifier) {
1733 	case RES_QP:
1734 		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1735 				  vhcr->in_param);
1736 		break;
1737 
1738 	case RES_MTT:
1739 		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1740 				   vhcr->in_param, &vhcr->out_param);
1741 		break;
1742 
1743 	case RES_MPT:
1744 		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1745 				   vhcr->in_param);
1746 		break;
1747 
1748 	case RES_CQ:
1749 		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1750 				  vhcr->in_param, &vhcr->out_param);
1751 		break;
1752 
1753 	case RES_SRQ:
1754 		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1755 				   vhcr->in_param, &vhcr->out_param);
1756 		break;
1757 
1758 	case RES_MAC:
1759 		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1760 				   vhcr->in_param, &vhcr->out_param);
1761 		break;
1762 
1763 	case RES_VLAN:
1764 		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1765 				   vhcr->in_param, &vhcr->out_param);
1766 		break;
1767 
1768 	case RES_COUNTER:
1769 		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1770 				       vhcr->in_param, &vhcr->out_param);
1771 		break;
1772 
1773 	case RES_XRCD:
1774 		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1775 				     vhcr->in_param, &vhcr->out_param);
1776 
1777 	default:
1778 		break;
1779 	}
1780 	return err;
1781 }
1782 
1783 /* ugly but other choices are uglier */
1784 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1785 {
1786 	return (be32_to_cpu(mpt->flags) >> 9) & 1;
1787 }
1788 
1789 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1790 {
1791 	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1792 }
1793 
1794 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1795 {
1796 	return be32_to_cpu(mpt->mtt_sz);
1797 }
1798 
1799 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1800 {
1801 	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1802 }
1803 
1804 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1805 {
1806 	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1807 }
1808 
1809 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1810 {
1811 	int page_shift = (qpc->log_page_size & 0x3f) + 12;
1812 	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1813 	int log_sq_sride = qpc->sq_size_stride & 7;
1814 	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1815 	int log_rq_stride = qpc->rq_size_stride & 7;
1816 	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1817 	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1818 	int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1819 	int sq_size;
1820 	int rq_size;
1821 	int total_pages;
1822 	int total_mem;
1823 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1824 
1825 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1826 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1827 	total_mem = sq_size + rq_size;
1828 	total_pages =
1829 		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1830 				   page_shift);
1831 
1832 	return total_pages;
1833 }
1834 
1835 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1836 			   int size, struct res_mtt *mtt)
1837 {
1838 	int res_start = mtt->com.res_id;
1839 	int res_size = (1 << mtt->order);
1840 
1841 	if (start < res_start || start + size > res_start + res_size)
1842 		return -EPERM;
1843 	return 0;
1844 }
1845 
1846 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1847 			   struct mlx4_vhcr *vhcr,
1848 			   struct mlx4_cmd_mailbox *inbox,
1849 			   struct mlx4_cmd_mailbox *outbox,
1850 			   struct mlx4_cmd_info *cmd)
1851 {
1852 	int err;
1853 	int index = vhcr->in_modifier;
1854 	struct res_mtt *mtt;
1855 	struct res_mpt *mpt;
1856 	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1857 	int phys;
1858 	int id;
1859 
1860 	id = index & mpt_mask(dev);
1861 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1862 	if (err)
1863 		return err;
1864 
1865 	phys = mr_phys_mpt(inbox->buf);
1866 	if (!phys) {
1867 		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1868 		if (err)
1869 			goto ex_abort;
1870 
1871 		err = check_mtt_range(dev, slave, mtt_base,
1872 				      mr_get_mtt_size(inbox->buf), mtt);
1873 		if (err)
1874 			goto ex_put;
1875 
1876 		mpt->mtt = mtt;
1877 	}
1878 
1879 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1880 	if (err)
1881 		goto ex_put;
1882 
1883 	if (!phys) {
1884 		atomic_inc(&mtt->ref_count);
1885 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
1886 	}
1887 
1888 	res_end_move(dev, slave, RES_MPT, id);
1889 	return 0;
1890 
1891 ex_put:
1892 	if (!phys)
1893 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
1894 ex_abort:
1895 	res_abort_move(dev, slave, RES_MPT, id);
1896 
1897 	return err;
1898 }
1899 
1900 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1901 			   struct mlx4_vhcr *vhcr,
1902 			   struct mlx4_cmd_mailbox *inbox,
1903 			   struct mlx4_cmd_mailbox *outbox,
1904 			   struct mlx4_cmd_info *cmd)
1905 {
1906 	int err;
1907 	int index = vhcr->in_modifier;
1908 	struct res_mpt *mpt;
1909 	int id;
1910 
1911 	id = index & mpt_mask(dev);
1912 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1913 	if (err)
1914 		return err;
1915 
1916 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1917 	if (err)
1918 		goto ex_abort;
1919 
1920 	if (mpt->mtt)
1921 		atomic_dec(&mpt->mtt->ref_count);
1922 
1923 	res_end_move(dev, slave, RES_MPT, id);
1924 	return 0;
1925 
1926 ex_abort:
1927 	res_abort_move(dev, slave, RES_MPT, id);
1928 
1929 	return err;
1930 }
1931 
1932 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1933 			   struct mlx4_vhcr *vhcr,
1934 			   struct mlx4_cmd_mailbox *inbox,
1935 			   struct mlx4_cmd_mailbox *outbox,
1936 			   struct mlx4_cmd_info *cmd)
1937 {
1938 	int err;
1939 	int index = vhcr->in_modifier;
1940 	struct res_mpt *mpt;
1941 	int id;
1942 
1943 	id = index & mpt_mask(dev);
1944 	err = get_res(dev, slave, id, RES_MPT, &mpt);
1945 	if (err)
1946 		return err;
1947 
1948 	if (mpt->com.from_state != RES_MPT_HW) {
1949 		err = -EBUSY;
1950 		goto out;
1951 	}
1952 
1953 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1954 
1955 out:
1956 	put_res(dev, slave, id, RES_MPT);
1957 	return err;
1958 }
1959 
1960 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1961 {
1962 	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1963 }
1964 
1965 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1966 {
1967 	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1968 }
1969 
1970 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1971 {
1972 	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1973 }
1974 
1975 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
1976 				  struct mlx4_qp_context *context)
1977 {
1978 	u32 qpn = vhcr->in_modifier & 0xffffff;
1979 	u32 qkey = 0;
1980 
1981 	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
1982 		return;
1983 
1984 	/* adjust qkey in qp context */
1985 	context->qkey = cpu_to_be32(qkey);
1986 }
1987 
1988 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1989 			     struct mlx4_vhcr *vhcr,
1990 			     struct mlx4_cmd_mailbox *inbox,
1991 			     struct mlx4_cmd_mailbox *outbox,
1992 			     struct mlx4_cmd_info *cmd)
1993 {
1994 	int err;
1995 	int qpn = vhcr->in_modifier & 0x7fffff;
1996 	struct res_mtt *mtt;
1997 	struct res_qp *qp;
1998 	struct mlx4_qp_context *qpc = inbox->buf + 8;
1999 	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2000 	int mtt_size = qp_get_mtt_size(qpc);
2001 	struct res_cq *rcq;
2002 	struct res_cq *scq;
2003 	int rcqn = qp_get_rcqn(qpc);
2004 	int scqn = qp_get_scqn(qpc);
2005 	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2006 	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2007 	struct res_srq *srq;
2008 	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2009 
2010 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2011 	if (err)
2012 		return err;
2013 	qp->local_qpn = local_qpn;
2014 
2015 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2016 	if (err)
2017 		goto ex_abort;
2018 
2019 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2020 	if (err)
2021 		goto ex_put_mtt;
2022 
2023 	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2024 	if (err)
2025 		goto ex_put_mtt;
2026 
2027 	if (scqn != rcqn) {
2028 		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2029 		if (err)
2030 			goto ex_put_rcq;
2031 	} else
2032 		scq = rcq;
2033 
2034 	if (use_srq) {
2035 		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2036 		if (err)
2037 			goto ex_put_scq;
2038 	}
2039 
2040 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2041 	update_pkey_index(dev, slave, inbox);
2042 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2043 	if (err)
2044 		goto ex_put_srq;
2045 	atomic_inc(&mtt->ref_count);
2046 	qp->mtt = mtt;
2047 	atomic_inc(&rcq->ref_count);
2048 	qp->rcq = rcq;
2049 	atomic_inc(&scq->ref_count);
2050 	qp->scq = scq;
2051 
2052 	if (scqn != rcqn)
2053 		put_res(dev, slave, scqn, RES_CQ);
2054 
2055 	if (use_srq) {
2056 		atomic_inc(&srq->ref_count);
2057 		put_res(dev, slave, srqn, RES_SRQ);
2058 		qp->srq = srq;
2059 	}
2060 	put_res(dev, slave, rcqn, RES_CQ);
2061 	put_res(dev, slave, mtt_base, RES_MTT);
2062 	res_end_move(dev, slave, RES_QP, qpn);
2063 
2064 	return 0;
2065 
2066 ex_put_srq:
2067 	if (use_srq)
2068 		put_res(dev, slave, srqn, RES_SRQ);
2069 ex_put_scq:
2070 	if (scqn != rcqn)
2071 		put_res(dev, slave, scqn, RES_CQ);
2072 ex_put_rcq:
2073 	put_res(dev, slave, rcqn, RES_CQ);
2074 ex_put_mtt:
2075 	put_res(dev, slave, mtt_base, RES_MTT);
2076 ex_abort:
2077 	res_abort_move(dev, slave, RES_QP, qpn);
2078 
2079 	return err;
2080 }
2081 
2082 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2083 {
2084 	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2085 }
2086 
2087 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2088 {
2089 	int log_eq_size = eqc->log_eq_size & 0x1f;
2090 	int page_shift = (eqc->log_page_size & 0x3f) + 12;
2091 
2092 	if (log_eq_size + 5 < page_shift)
2093 		return 1;
2094 
2095 	return 1 << (log_eq_size + 5 - page_shift);
2096 }
2097 
2098 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2099 {
2100 	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2101 }
2102 
2103 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2104 {
2105 	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2106 	int page_shift = (cqc->log_page_size & 0x3f) + 12;
2107 
2108 	if (log_cq_size + 5 < page_shift)
2109 		return 1;
2110 
2111 	return 1 << (log_cq_size + 5 - page_shift);
2112 }
2113 
2114 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2115 			  struct mlx4_vhcr *vhcr,
2116 			  struct mlx4_cmd_mailbox *inbox,
2117 			  struct mlx4_cmd_mailbox *outbox,
2118 			  struct mlx4_cmd_info *cmd)
2119 {
2120 	int err;
2121 	int eqn = vhcr->in_modifier;
2122 	int res_id = (slave << 8) | eqn;
2123 	struct mlx4_eq_context *eqc = inbox->buf;
2124 	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2125 	int mtt_size = eq_get_mtt_size(eqc);
2126 	struct res_eq *eq;
2127 	struct res_mtt *mtt;
2128 
2129 	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2130 	if (err)
2131 		return err;
2132 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2133 	if (err)
2134 		goto out_add;
2135 
2136 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2137 	if (err)
2138 		goto out_move;
2139 
2140 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2141 	if (err)
2142 		goto out_put;
2143 
2144 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2145 	if (err)
2146 		goto out_put;
2147 
2148 	atomic_inc(&mtt->ref_count);
2149 	eq->mtt = mtt;
2150 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2151 	res_end_move(dev, slave, RES_EQ, res_id);
2152 	return 0;
2153 
2154 out_put:
2155 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2156 out_move:
2157 	res_abort_move(dev, slave, RES_EQ, res_id);
2158 out_add:
2159 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2160 	return err;
2161 }
2162 
2163 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2164 			      int len, struct res_mtt **res)
2165 {
2166 	struct mlx4_priv *priv = mlx4_priv(dev);
2167 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2168 	struct res_mtt *mtt;
2169 	int err = -EINVAL;
2170 
2171 	spin_lock_irq(mlx4_tlock(dev));
2172 	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2173 			    com.list) {
2174 		if (!check_mtt_range(dev, slave, start, len, mtt)) {
2175 			*res = mtt;
2176 			mtt->com.from_state = mtt->com.state;
2177 			mtt->com.state = RES_MTT_BUSY;
2178 			err = 0;
2179 			break;
2180 		}
2181 	}
2182 	spin_unlock_irq(mlx4_tlock(dev));
2183 
2184 	return err;
2185 }
2186 
2187 static int verify_qp_parameters(struct mlx4_dev *dev,
2188 				struct mlx4_cmd_mailbox *inbox,
2189 				enum qp_transition transition, u8 slave)
2190 {
2191 	u32			qp_type;
2192 	struct mlx4_qp_context	*qp_ctx;
2193 	enum mlx4_qp_optpar	optpar;
2194 
2195 	qp_ctx  = inbox->buf + 8;
2196 	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2197 	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
2198 
2199 	switch (qp_type) {
2200 	case MLX4_QP_ST_RC:
2201 	case MLX4_QP_ST_UC:
2202 		switch (transition) {
2203 		case QP_TRANS_INIT2RTR:
2204 		case QP_TRANS_RTR2RTS:
2205 		case QP_TRANS_RTS2RTS:
2206 		case QP_TRANS_SQD2SQD:
2207 		case QP_TRANS_SQD2RTS:
2208 			if (slave != mlx4_master_func_num(dev))
2209 				/* slaves have only gid index 0 */
2210 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2211 					if (qp_ctx->pri_path.mgid_index)
2212 						return -EINVAL;
2213 				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2214 					if (qp_ctx->alt_path.mgid_index)
2215 						return -EINVAL;
2216 			break;
2217 		default:
2218 			break;
2219 		}
2220 
2221 		break;
2222 	default:
2223 		break;
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2230 			   struct mlx4_vhcr *vhcr,
2231 			   struct mlx4_cmd_mailbox *inbox,
2232 			   struct mlx4_cmd_mailbox *outbox,
2233 			   struct mlx4_cmd_info *cmd)
2234 {
2235 	struct mlx4_mtt mtt;
2236 	__be64 *page_list = inbox->buf;
2237 	u64 *pg_list = (u64 *)page_list;
2238 	int i;
2239 	struct res_mtt *rmtt = NULL;
2240 	int start = be64_to_cpu(page_list[0]);
2241 	int npages = vhcr->in_modifier;
2242 	int err;
2243 
2244 	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2245 	if (err)
2246 		return err;
2247 
2248 	/* Call the SW implementation of write_mtt:
2249 	 * - Prepare a dummy mtt struct
2250 	 * - Translate inbox contents to simple addresses in host endianess */
2251 	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2252 			    we don't really use it */
2253 	mtt.order = 0;
2254 	mtt.page_shift = 0;
2255 	for (i = 0; i < npages; ++i)
2256 		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2257 
2258 	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2259 			       ((u64 *)page_list + 2));
2260 
2261 	if (rmtt)
2262 		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2263 
2264 	return err;
2265 }
2266 
2267 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2268 			  struct mlx4_vhcr *vhcr,
2269 			  struct mlx4_cmd_mailbox *inbox,
2270 			  struct mlx4_cmd_mailbox *outbox,
2271 			  struct mlx4_cmd_info *cmd)
2272 {
2273 	int eqn = vhcr->in_modifier;
2274 	int res_id = eqn | (slave << 8);
2275 	struct res_eq *eq;
2276 	int err;
2277 
2278 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2279 	if (err)
2280 		return err;
2281 
2282 	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2283 	if (err)
2284 		goto ex_abort;
2285 
2286 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2287 	if (err)
2288 		goto ex_put;
2289 
2290 	atomic_dec(&eq->mtt->ref_count);
2291 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2292 	res_end_move(dev, slave, RES_EQ, res_id);
2293 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2294 
2295 	return 0;
2296 
2297 ex_put:
2298 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2299 ex_abort:
2300 	res_abort_move(dev, slave, RES_EQ, res_id);
2301 
2302 	return err;
2303 }
2304 
2305 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2306 {
2307 	struct mlx4_priv *priv = mlx4_priv(dev);
2308 	struct mlx4_slave_event_eq_info *event_eq;
2309 	struct mlx4_cmd_mailbox *mailbox;
2310 	u32 in_modifier = 0;
2311 	int err;
2312 	int res_id;
2313 	struct res_eq *req;
2314 
2315 	if (!priv->mfunc.master.slave_state)
2316 		return -EINVAL;
2317 
2318 	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2319 
2320 	/* Create the event only if the slave is registered */
2321 	if (event_eq->eqn < 0)
2322 		return 0;
2323 
2324 	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2325 	res_id = (slave << 8) | event_eq->eqn;
2326 	err = get_res(dev, slave, res_id, RES_EQ, &req);
2327 	if (err)
2328 		goto unlock;
2329 
2330 	if (req->com.from_state != RES_EQ_HW) {
2331 		err = -EINVAL;
2332 		goto put;
2333 	}
2334 
2335 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2336 	if (IS_ERR(mailbox)) {
2337 		err = PTR_ERR(mailbox);
2338 		goto put;
2339 	}
2340 
2341 	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2342 		++event_eq->token;
2343 		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2344 	}
2345 
2346 	memcpy(mailbox->buf, (u8 *) eqe, 28);
2347 
2348 	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2349 
2350 	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2351 		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2352 		       MLX4_CMD_NATIVE);
2353 
2354 	put_res(dev, slave, res_id, RES_EQ);
2355 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2356 	mlx4_free_cmd_mailbox(dev, mailbox);
2357 	return err;
2358 
2359 put:
2360 	put_res(dev, slave, res_id, RES_EQ);
2361 
2362 unlock:
2363 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2364 	return err;
2365 }
2366 
2367 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2368 			  struct mlx4_vhcr *vhcr,
2369 			  struct mlx4_cmd_mailbox *inbox,
2370 			  struct mlx4_cmd_mailbox *outbox,
2371 			  struct mlx4_cmd_info *cmd)
2372 {
2373 	int eqn = vhcr->in_modifier;
2374 	int res_id = eqn | (slave << 8);
2375 	struct res_eq *eq;
2376 	int err;
2377 
2378 	err = get_res(dev, slave, res_id, RES_EQ, &eq);
2379 	if (err)
2380 		return err;
2381 
2382 	if (eq->com.from_state != RES_EQ_HW) {
2383 		err = -EINVAL;
2384 		goto ex_put;
2385 	}
2386 
2387 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2388 
2389 ex_put:
2390 	put_res(dev, slave, res_id, RES_EQ);
2391 	return err;
2392 }
2393 
2394 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2395 			  struct mlx4_vhcr *vhcr,
2396 			  struct mlx4_cmd_mailbox *inbox,
2397 			  struct mlx4_cmd_mailbox *outbox,
2398 			  struct mlx4_cmd_info *cmd)
2399 {
2400 	int err;
2401 	int cqn = vhcr->in_modifier;
2402 	struct mlx4_cq_context *cqc = inbox->buf;
2403 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2404 	struct res_cq *cq;
2405 	struct res_mtt *mtt;
2406 
2407 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2408 	if (err)
2409 		return err;
2410 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2411 	if (err)
2412 		goto out_move;
2413 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2414 	if (err)
2415 		goto out_put;
2416 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2417 	if (err)
2418 		goto out_put;
2419 	atomic_inc(&mtt->ref_count);
2420 	cq->mtt = mtt;
2421 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2422 	res_end_move(dev, slave, RES_CQ, cqn);
2423 	return 0;
2424 
2425 out_put:
2426 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2427 out_move:
2428 	res_abort_move(dev, slave, RES_CQ, cqn);
2429 	return err;
2430 }
2431 
2432 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2433 			  struct mlx4_vhcr *vhcr,
2434 			  struct mlx4_cmd_mailbox *inbox,
2435 			  struct mlx4_cmd_mailbox *outbox,
2436 			  struct mlx4_cmd_info *cmd)
2437 {
2438 	int err;
2439 	int cqn = vhcr->in_modifier;
2440 	struct res_cq *cq;
2441 
2442 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2443 	if (err)
2444 		return err;
2445 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2446 	if (err)
2447 		goto out_move;
2448 	atomic_dec(&cq->mtt->ref_count);
2449 	res_end_move(dev, slave, RES_CQ, cqn);
2450 	return 0;
2451 
2452 out_move:
2453 	res_abort_move(dev, slave, RES_CQ, cqn);
2454 	return err;
2455 }
2456 
2457 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2458 			  struct mlx4_vhcr *vhcr,
2459 			  struct mlx4_cmd_mailbox *inbox,
2460 			  struct mlx4_cmd_mailbox *outbox,
2461 			  struct mlx4_cmd_info *cmd)
2462 {
2463 	int cqn = vhcr->in_modifier;
2464 	struct res_cq *cq;
2465 	int err;
2466 
2467 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
2468 	if (err)
2469 		return err;
2470 
2471 	if (cq->com.from_state != RES_CQ_HW)
2472 		goto ex_put;
2473 
2474 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2475 ex_put:
2476 	put_res(dev, slave, cqn, RES_CQ);
2477 
2478 	return err;
2479 }
2480 
2481 static int handle_resize(struct mlx4_dev *dev, int slave,
2482 			 struct mlx4_vhcr *vhcr,
2483 			 struct mlx4_cmd_mailbox *inbox,
2484 			 struct mlx4_cmd_mailbox *outbox,
2485 			 struct mlx4_cmd_info *cmd,
2486 			 struct res_cq *cq)
2487 {
2488 	int err;
2489 	struct res_mtt *orig_mtt;
2490 	struct res_mtt *mtt;
2491 	struct mlx4_cq_context *cqc = inbox->buf;
2492 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2493 
2494 	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2495 	if (err)
2496 		return err;
2497 
2498 	if (orig_mtt != cq->mtt) {
2499 		err = -EINVAL;
2500 		goto ex_put;
2501 	}
2502 
2503 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2504 	if (err)
2505 		goto ex_put;
2506 
2507 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2508 	if (err)
2509 		goto ex_put1;
2510 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2511 	if (err)
2512 		goto ex_put1;
2513 	atomic_dec(&orig_mtt->ref_count);
2514 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2515 	atomic_inc(&mtt->ref_count);
2516 	cq->mtt = mtt;
2517 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2518 	return 0;
2519 
2520 ex_put1:
2521 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2522 ex_put:
2523 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2524 
2525 	return err;
2526 
2527 }
2528 
2529 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2530 			   struct mlx4_vhcr *vhcr,
2531 			   struct mlx4_cmd_mailbox *inbox,
2532 			   struct mlx4_cmd_mailbox *outbox,
2533 			   struct mlx4_cmd_info *cmd)
2534 {
2535 	int cqn = vhcr->in_modifier;
2536 	struct res_cq *cq;
2537 	int err;
2538 
2539 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
2540 	if (err)
2541 		return err;
2542 
2543 	if (cq->com.from_state != RES_CQ_HW)
2544 		goto ex_put;
2545 
2546 	if (vhcr->op_modifier == 0) {
2547 		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2548 		goto ex_put;
2549 	}
2550 
2551 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2552 ex_put:
2553 	put_res(dev, slave, cqn, RES_CQ);
2554 
2555 	return err;
2556 }
2557 
2558 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2559 {
2560 	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2561 	int log_rq_stride = srqc->logstride & 7;
2562 	int page_shift = (srqc->log_page_size & 0x3f) + 12;
2563 
2564 	if (log_srq_size + log_rq_stride + 4 < page_shift)
2565 		return 1;
2566 
2567 	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2568 }
2569 
2570 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2571 			   struct mlx4_vhcr *vhcr,
2572 			   struct mlx4_cmd_mailbox *inbox,
2573 			   struct mlx4_cmd_mailbox *outbox,
2574 			   struct mlx4_cmd_info *cmd)
2575 {
2576 	int err;
2577 	int srqn = vhcr->in_modifier;
2578 	struct res_mtt *mtt;
2579 	struct res_srq *srq;
2580 	struct mlx4_srq_context *srqc = inbox->buf;
2581 	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2582 
2583 	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2584 		return -EINVAL;
2585 
2586 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2587 	if (err)
2588 		return err;
2589 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2590 	if (err)
2591 		goto ex_abort;
2592 	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2593 			      mtt);
2594 	if (err)
2595 		goto ex_put_mtt;
2596 
2597 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2598 	if (err)
2599 		goto ex_put_mtt;
2600 
2601 	atomic_inc(&mtt->ref_count);
2602 	srq->mtt = mtt;
2603 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2604 	res_end_move(dev, slave, RES_SRQ, srqn);
2605 	return 0;
2606 
2607 ex_put_mtt:
2608 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2609 ex_abort:
2610 	res_abort_move(dev, slave, RES_SRQ, srqn);
2611 
2612 	return err;
2613 }
2614 
2615 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2616 			   struct mlx4_vhcr *vhcr,
2617 			   struct mlx4_cmd_mailbox *inbox,
2618 			   struct mlx4_cmd_mailbox *outbox,
2619 			   struct mlx4_cmd_info *cmd)
2620 {
2621 	int err;
2622 	int srqn = vhcr->in_modifier;
2623 	struct res_srq *srq;
2624 
2625 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2626 	if (err)
2627 		return err;
2628 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2629 	if (err)
2630 		goto ex_abort;
2631 	atomic_dec(&srq->mtt->ref_count);
2632 	if (srq->cq)
2633 		atomic_dec(&srq->cq->ref_count);
2634 	res_end_move(dev, slave, RES_SRQ, srqn);
2635 
2636 	return 0;
2637 
2638 ex_abort:
2639 	res_abort_move(dev, slave, RES_SRQ, srqn);
2640 
2641 	return err;
2642 }
2643 
2644 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2645 			   struct mlx4_vhcr *vhcr,
2646 			   struct mlx4_cmd_mailbox *inbox,
2647 			   struct mlx4_cmd_mailbox *outbox,
2648 			   struct mlx4_cmd_info *cmd)
2649 {
2650 	int err;
2651 	int srqn = vhcr->in_modifier;
2652 	struct res_srq *srq;
2653 
2654 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2655 	if (err)
2656 		return err;
2657 	if (srq->com.from_state != RES_SRQ_HW) {
2658 		err = -EBUSY;
2659 		goto out;
2660 	}
2661 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2662 out:
2663 	put_res(dev, slave, srqn, RES_SRQ);
2664 	return err;
2665 }
2666 
2667 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2668 			 struct mlx4_vhcr *vhcr,
2669 			 struct mlx4_cmd_mailbox *inbox,
2670 			 struct mlx4_cmd_mailbox *outbox,
2671 			 struct mlx4_cmd_info *cmd)
2672 {
2673 	int err;
2674 	int srqn = vhcr->in_modifier;
2675 	struct res_srq *srq;
2676 
2677 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2678 	if (err)
2679 		return err;
2680 
2681 	if (srq->com.from_state != RES_SRQ_HW) {
2682 		err = -EBUSY;
2683 		goto out;
2684 	}
2685 
2686 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2687 out:
2688 	put_res(dev, slave, srqn, RES_SRQ);
2689 	return err;
2690 }
2691 
2692 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2693 			struct mlx4_vhcr *vhcr,
2694 			struct mlx4_cmd_mailbox *inbox,
2695 			struct mlx4_cmd_mailbox *outbox,
2696 			struct mlx4_cmd_info *cmd)
2697 {
2698 	int err;
2699 	int qpn = vhcr->in_modifier & 0x7fffff;
2700 	struct res_qp *qp;
2701 
2702 	err = get_res(dev, slave, qpn, RES_QP, &qp);
2703 	if (err)
2704 		return err;
2705 	if (qp->com.from_state != RES_QP_HW) {
2706 		err = -EBUSY;
2707 		goto out;
2708 	}
2709 
2710 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2711 out:
2712 	put_res(dev, slave, qpn, RES_QP);
2713 	return err;
2714 }
2715 
2716 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2717 			      struct mlx4_vhcr *vhcr,
2718 			      struct mlx4_cmd_mailbox *inbox,
2719 			      struct mlx4_cmd_mailbox *outbox,
2720 			      struct mlx4_cmd_info *cmd)
2721 {
2722 	struct mlx4_qp_context *context = inbox->buf + 8;
2723 	adjust_proxy_tun_qkey(dev, vhcr, context);
2724 	update_pkey_index(dev, slave, inbox);
2725 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2726 }
2727 
2728 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2729 			     struct mlx4_vhcr *vhcr,
2730 			     struct mlx4_cmd_mailbox *inbox,
2731 			     struct mlx4_cmd_mailbox *outbox,
2732 			     struct mlx4_cmd_info *cmd)
2733 {
2734 	int err;
2735 	struct mlx4_qp_context *qpc = inbox->buf + 8;
2736 
2737 	err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2738 	if (err)
2739 		return err;
2740 
2741 	update_pkey_index(dev, slave, inbox);
2742 	update_gid(dev, inbox, (u8)slave);
2743 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2744 
2745 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2746 }
2747 
2748 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2749 			    struct mlx4_vhcr *vhcr,
2750 			    struct mlx4_cmd_mailbox *inbox,
2751 			    struct mlx4_cmd_mailbox *outbox,
2752 			    struct mlx4_cmd_info *cmd)
2753 {
2754 	int err;
2755 	struct mlx4_qp_context *context = inbox->buf + 8;
2756 
2757 	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2758 	if (err)
2759 		return err;
2760 
2761 	update_pkey_index(dev, slave, inbox);
2762 	update_gid(dev, inbox, (u8)slave);
2763 	adjust_proxy_tun_qkey(dev, vhcr, context);
2764 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2765 }
2766 
2767 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2768 			    struct mlx4_vhcr *vhcr,
2769 			    struct mlx4_cmd_mailbox *inbox,
2770 			    struct mlx4_cmd_mailbox *outbox,
2771 			    struct mlx4_cmd_info *cmd)
2772 {
2773 	int err;
2774 	struct mlx4_qp_context *context = inbox->buf + 8;
2775 
2776 	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2777 	if (err)
2778 		return err;
2779 
2780 	update_pkey_index(dev, slave, inbox);
2781 	update_gid(dev, inbox, (u8)slave);
2782 	adjust_proxy_tun_qkey(dev, vhcr, context);
2783 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2784 }
2785 
2786 
2787 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2788 			      struct mlx4_vhcr *vhcr,
2789 			      struct mlx4_cmd_mailbox *inbox,
2790 			      struct mlx4_cmd_mailbox *outbox,
2791 			      struct mlx4_cmd_info *cmd)
2792 {
2793 	struct mlx4_qp_context *context = inbox->buf + 8;
2794 	adjust_proxy_tun_qkey(dev, vhcr, context);
2795 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2796 }
2797 
2798 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2799 			    struct mlx4_vhcr *vhcr,
2800 			    struct mlx4_cmd_mailbox *inbox,
2801 			    struct mlx4_cmd_mailbox *outbox,
2802 			    struct mlx4_cmd_info *cmd)
2803 {
2804 	int err;
2805 	struct mlx4_qp_context *context = inbox->buf + 8;
2806 
2807 	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2808 	if (err)
2809 		return err;
2810 
2811 	adjust_proxy_tun_qkey(dev, vhcr, context);
2812 	update_gid(dev, inbox, (u8)slave);
2813 	update_pkey_index(dev, slave, inbox);
2814 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2815 }
2816 
2817 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2818 			    struct mlx4_vhcr *vhcr,
2819 			    struct mlx4_cmd_mailbox *inbox,
2820 			    struct mlx4_cmd_mailbox *outbox,
2821 			    struct mlx4_cmd_info *cmd)
2822 {
2823 	int err;
2824 	struct mlx4_qp_context *context = inbox->buf + 8;
2825 
2826 	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2827 	if (err)
2828 		return err;
2829 
2830 	adjust_proxy_tun_qkey(dev, vhcr, context);
2831 	update_gid(dev, inbox, (u8)slave);
2832 	update_pkey_index(dev, slave, inbox);
2833 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2834 }
2835 
2836 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2837 			 struct mlx4_vhcr *vhcr,
2838 			 struct mlx4_cmd_mailbox *inbox,
2839 			 struct mlx4_cmd_mailbox *outbox,
2840 			 struct mlx4_cmd_info *cmd)
2841 {
2842 	int err;
2843 	int qpn = vhcr->in_modifier & 0x7fffff;
2844 	struct res_qp *qp;
2845 
2846 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2847 	if (err)
2848 		return err;
2849 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2850 	if (err)
2851 		goto ex_abort;
2852 
2853 	atomic_dec(&qp->mtt->ref_count);
2854 	atomic_dec(&qp->rcq->ref_count);
2855 	atomic_dec(&qp->scq->ref_count);
2856 	if (qp->srq)
2857 		atomic_dec(&qp->srq->ref_count);
2858 	res_end_move(dev, slave, RES_QP, qpn);
2859 	return 0;
2860 
2861 ex_abort:
2862 	res_abort_move(dev, slave, RES_QP, qpn);
2863 
2864 	return err;
2865 }
2866 
2867 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2868 				struct res_qp *rqp, u8 *gid)
2869 {
2870 	struct res_gid *res;
2871 
2872 	list_for_each_entry(res, &rqp->mcg_list, list) {
2873 		if (!memcmp(res->gid, gid, 16))
2874 			return res;
2875 	}
2876 	return NULL;
2877 }
2878 
2879 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2880 		       u8 *gid, enum mlx4_protocol prot,
2881 		       enum mlx4_steer_type steer)
2882 {
2883 	struct res_gid *res;
2884 	int err;
2885 
2886 	res = kzalloc(sizeof *res, GFP_KERNEL);
2887 	if (!res)
2888 		return -ENOMEM;
2889 
2890 	spin_lock_irq(&rqp->mcg_spl);
2891 	if (find_gid(dev, slave, rqp, gid)) {
2892 		kfree(res);
2893 		err = -EEXIST;
2894 	} else {
2895 		memcpy(res->gid, gid, 16);
2896 		res->prot = prot;
2897 		res->steer = steer;
2898 		list_add_tail(&res->list, &rqp->mcg_list);
2899 		err = 0;
2900 	}
2901 	spin_unlock_irq(&rqp->mcg_spl);
2902 
2903 	return err;
2904 }
2905 
2906 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2907 		       u8 *gid, enum mlx4_protocol prot,
2908 		       enum mlx4_steer_type steer)
2909 {
2910 	struct res_gid *res;
2911 	int err;
2912 
2913 	spin_lock_irq(&rqp->mcg_spl);
2914 	res = find_gid(dev, slave, rqp, gid);
2915 	if (!res || res->prot != prot || res->steer != steer)
2916 		err = -EINVAL;
2917 	else {
2918 		list_del(&res->list);
2919 		kfree(res);
2920 		err = 0;
2921 	}
2922 	spin_unlock_irq(&rqp->mcg_spl);
2923 
2924 	return err;
2925 }
2926 
2927 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2928 			       struct mlx4_vhcr *vhcr,
2929 			       struct mlx4_cmd_mailbox *inbox,
2930 			       struct mlx4_cmd_mailbox *outbox,
2931 			       struct mlx4_cmd_info *cmd)
2932 {
2933 	struct mlx4_qp qp; /* dummy for calling attach/detach */
2934 	u8 *gid = inbox->buf;
2935 	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2936 	int err;
2937 	int qpn;
2938 	struct res_qp *rqp;
2939 	int attach = vhcr->op_modifier;
2940 	int block_loopback = vhcr->in_modifier >> 31;
2941 	u8 steer_type_mask = 2;
2942 	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2943 
2944 	qpn = vhcr->in_modifier & 0xffffff;
2945 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
2946 	if (err)
2947 		return err;
2948 
2949 	qp.qpn = qpn;
2950 	if (attach) {
2951 		err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2952 		if (err)
2953 			goto ex_put;
2954 
2955 		err = mlx4_qp_attach_common(dev, &qp, gid,
2956 					    block_loopback, prot, type);
2957 		if (err)
2958 			goto ex_rem;
2959 	} else {
2960 		err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2961 		if (err)
2962 			goto ex_put;
2963 		err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2964 	}
2965 
2966 	put_res(dev, slave, qpn, RES_QP);
2967 	return 0;
2968 
2969 ex_rem:
2970 	/* ignore error return below, already in error */
2971 	(void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2972 ex_put:
2973 	put_res(dev, slave, qpn, RES_QP);
2974 
2975 	return err;
2976 }
2977 
2978 /*
2979  * MAC validation for Flow Steering rules.
2980  * VF can attach rules only with a mac address which is assigned to it.
2981  */
2982 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2983 				   struct list_head *rlist)
2984 {
2985 	struct mac_res *res, *tmp;
2986 	__be64 be_mac;
2987 
2988 	/* make sure it isn't multicast or broadcast mac*/
2989 	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2990 	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2991 		list_for_each_entry_safe(res, tmp, rlist, list) {
2992 			be_mac = cpu_to_be64(res->mac << 16);
2993 			if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
2994 				return 0;
2995 		}
2996 		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
2997 		       eth_header->eth.dst_mac, slave);
2998 		return -EINVAL;
2999 	}
3000 	return 0;
3001 }
3002 
3003 /*
3004  * In case of missing eth header, append eth header with a MAC address
3005  * assigned to the VF.
3006  */
3007 static int add_eth_header(struct mlx4_dev *dev, int slave,
3008 			  struct mlx4_cmd_mailbox *inbox,
3009 			  struct list_head *rlist, int header_id)
3010 {
3011 	struct mac_res *res, *tmp;
3012 	u8 port;
3013 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3014 	struct mlx4_net_trans_rule_hw_eth *eth_header;
3015 	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3016 	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3017 	__be64 be_mac = 0;
3018 	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3019 
3020 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3021 	port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
3022 	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3023 
3024 	/* Clear a space in the inbox for eth header */
3025 	switch (header_id) {
3026 	case MLX4_NET_TRANS_RULE_ID_IPV4:
3027 		ip_header =
3028 			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3029 		memmove(ip_header, eth_header,
3030 			sizeof(*ip_header) + sizeof(*l4_header));
3031 		break;
3032 	case MLX4_NET_TRANS_RULE_ID_TCP:
3033 	case MLX4_NET_TRANS_RULE_ID_UDP:
3034 		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3035 			    (eth_header + 1);
3036 		memmove(l4_header, eth_header, sizeof(*l4_header));
3037 		break;
3038 	default:
3039 		return -EINVAL;
3040 	}
3041 	list_for_each_entry_safe(res, tmp, rlist, list) {
3042 		if (port == res->port) {
3043 			be_mac = cpu_to_be64(res->mac << 16);
3044 			break;
3045 		}
3046 	}
3047 	if (!be_mac) {
3048 		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3049 		       port);
3050 		return -EINVAL;
3051 	}
3052 
3053 	memset(eth_header, 0, sizeof(*eth_header));
3054 	eth_header->size = sizeof(*eth_header) >> 2;
3055 	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3056 	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3057 	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3058 
3059 	return 0;
3060 
3061 }
3062 
3063 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3064 					 struct mlx4_vhcr *vhcr,
3065 					 struct mlx4_cmd_mailbox *inbox,
3066 					 struct mlx4_cmd_mailbox *outbox,
3067 					 struct mlx4_cmd_info *cmd)
3068 {
3069 
3070 	struct mlx4_priv *priv = mlx4_priv(dev);
3071 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3072 	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3073 	int err;
3074 	int qpn;
3075 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3076 	struct _rule_hw  *rule_header;
3077 	int header_id;
3078 
3079 	if (dev->caps.steering_mode !=
3080 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3081 		return -EOPNOTSUPP;
3082 
3083 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3084 	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3085 	err = get_res(dev, slave, qpn, RES_QP, NULL);
3086 	if (err) {
3087 		pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3088 		return err;
3089 	}
3090 	rule_header = (struct _rule_hw *)(ctrl + 1);
3091 	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3092 
3093 	switch (header_id) {
3094 	case MLX4_NET_TRANS_RULE_ID_ETH:
3095 		if (validate_eth_header_mac(slave, rule_header, rlist)) {
3096 			err = -EINVAL;
3097 			goto err_put;
3098 		}
3099 		break;
3100 	case MLX4_NET_TRANS_RULE_ID_IB:
3101 		break;
3102 	case MLX4_NET_TRANS_RULE_ID_IPV4:
3103 	case MLX4_NET_TRANS_RULE_ID_TCP:
3104 	case MLX4_NET_TRANS_RULE_ID_UDP:
3105 		pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3106 		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3107 			err = -EINVAL;
3108 			goto err_put;
3109 		}
3110 		vhcr->in_modifier +=
3111 			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3112 		break;
3113 	default:
3114 		pr_err("Corrupted mailbox.\n");
3115 		err = -EINVAL;
3116 		goto err_put;
3117 	}
3118 
3119 	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3120 			   vhcr->in_modifier, 0,
3121 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3122 			   MLX4_CMD_NATIVE);
3123 	if (err)
3124 		goto err_put;
3125 
3126 	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3127 	if (err) {
3128 		mlx4_err(dev, "Fail to add flow steering resources.\n ");
3129 		/* detach rule*/
3130 		mlx4_cmd(dev, vhcr->out_param, 0, 0,
3131 			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3132 			 MLX4_CMD_NATIVE);
3133 	}
3134 err_put:
3135 	put_res(dev, slave, qpn, RES_QP);
3136 	return err;
3137 }
3138 
3139 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3140 					 struct mlx4_vhcr *vhcr,
3141 					 struct mlx4_cmd_mailbox *inbox,
3142 					 struct mlx4_cmd_mailbox *outbox,
3143 					 struct mlx4_cmd_info *cmd)
3144 {
3145 	int err;
3146 
3147 	if (dev->caps.steering_mode !=
3148 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3149 		return -EOPNOTSUPP;
3150 
3151 	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3152 	if (err) {
3153 		mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3154 		return err;
3155 	}
3156 
3157 	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3158 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3159 		       MLX4_CMD_NATIVE);
3160 	return err;
3161 }
3162 
3163 enum {
3164 	BUSY_MAX_RETRIES = 10
3165 };
3166 
3167 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3168 			       struct mlx4_vhcr *vhcr,
3169 			       struct mlx4_cmd_mailbox *inbox,
3170 			       struct mlx4_cmd_mailbox *outbox,
3171 			       struct mlx4_cmd_info *cmd)
3172 {
3173 	int err;
3174 	int index = vhcr->in_modifier & 0xffff;
3175 
3176 	err = get_res(dev, slave, index, RES_COUNTER, NULL);
3177 	if (err)
3178 		return err;
3179 
3180 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3181 	put_res(dev, slave, index, RES_COUNTER);
3182 	return err;
3183 }
3184 
3185 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3186 {
3187 	struct res_gid *rgid;
3188 	struct res_gid *tmp;
3189 	struct mlx4_qp qp; /* dummy for calling attach/detach */
3190 
3191 	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3192 		qp.qpn = rqp->local_qpn;
3193 		(void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3194 					     rgid->steer);
3195 		list_del(&rgid->list);
3196 		kfree(rgid);
3197 	}
3198 }
3199 
3200 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3201 			  enum mlx4_resource type, int print)
3202 {
3203 	struct mlx4_priv *priv = mlx4_priv(dev);
3204 	struct mlx4_resource_tracker *tracker =
3205 		&priv->mfunc.master.res_tracker;
3206 	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3207 	struct res_common *r;
3208 	struct res_common *tmp;
3209 	int busy;
3210 
3211 	busy = 0;
3212 	spin_lock_irq(mlx4_tlock(dev));
3213 	list_for_each_entry_safe(r, tmp, rlist, list) {
3214 		if (r->owner == slave) {
3215 			if (!r->removing) {
3216 				if (r->state == RES_ANY_BUSY) {
3217 					if (print)
3218 						mlx4_dbg(dev,
3219 							 "%s id 0x%llx is busy\n",
3220 							  ResourceType(type),
3221 							  r->res_id);
3222 					++busy;
3223 				} else {
3224 					r->from_state = r->state;
3225 					r->state = RES_ANY_BUSY;
3226 					r->removing = 1;
3227 				}
3228 			}
3229 		}
3230 	}
3231 	spin_unlock_irq(mlx4_tlock(dev));
3232 
3233 	return busy;
3234 }
3235 
3236 static int move_all_busy(struct mlx4_dev *dev, int slave,
3237 			 enum mlx4_resource type)
3238 {
3239 	unsigned long begin;
3240 	int busy;
3241 
3242 	begin = jiffies;
3243 	do {
3244 		busy = _move_all_busy(dev, slave, type, 0);
3245 		if (time_after(jiffies, begin + 5 * HZ))
3246 			break;
3247 		if (busy)
3248 			cond_resched();
3249 	} while (busy);
3250 
3251 	if (busy)
3252 		busy = _move_all_busy(dev, slave, type, 1);
3253 
3254 	return busy;
3255 }
3256 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3257 {
3258 	struct mlx4_priv *priv = mlx4_priv(dev);
3259 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3260 	struct list_head *qp_list =
3261 		&tracker->slave_list[slave].res_list[RES_QP];
3262 	struct res_qp *qp;
3263 	struct res_qp *tmp;
3264 	int state;
3265 	u64 in_param;
3266 	int qpn;
3267 	int err;
3268 
3269 	err = move_all_busy(dev, slave, RES_QP);
3270 	if (err)
3271 		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3272 			  "for slave %d\n", slave);
3273 
3274 	spin_lock_irq(mlx4_tlock(dev));
3275 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3276 		spin_unlock_irq(mlx4_tlock(dev));
3277 		if (qp->com.owner == slave) {
3278 			qpn = qp->com.res_id;
3279 			detach_qp(dev, slave, qp);
3280 			state = qp->com.from_state;
3281 			while (state != 0) {
3282 				switch (state) {
3283 				case RES_QP_RESERVED:
3284 					spin_lock_irq(mlx4_tlock(dev));
3285 					rb_erase(&qp->com.node,
3286 						 &tracker->res_tree[RES_QP]);
3287 					list_del(&qp->com.list);
3288 					spin_unlock_irq(mlx4_tlock(dev));
3289 					kfree(qp);
3290 					state = 0;
3291 					break;
3292 				case RES_QP_MAPPED:
3293 					if (!valid_reserved(dev, slave, qpn))
3294 						__mlx4_qp_free_icm(dev, qpn);
3295 					state = RES_QP_RESERVED;
3296 					break;
3297 				case RES_QP_HW:
3298 					in_param = slave;
3299 					err = mlx4_cmd(dev, in_param,
3300 						       qp->local_qpn, 2,
3301 						       MLX4_CMD_2RST_QP,
3302 						       MLX4_CMD_TIME_CLASS_A,
3303 						       MLX4_CMD_NATIVE);
3304 					if (err)
3305 						mlx4_dbg(dev, "rem_slave_qps: failed"
3306 							 " to move slave %d qpn %d to"
3307 							 " reset\n", slave,
3308 							 qp->local_qpn);
3309 					atomic_dec(&qp->rcq->ref_count);
3310 					atomic_dec(&qp->scq->ref_count);
3311 					atomic_dec(&qp->mtt->ref_count);
3312 					if (qp->srq)
3313 						atomic_dec(&qp->srq->ref_count);
3314 					state = RES_QP_MAPPED;
3315 					break;
3316 				default:
3317 					state = 0;
3318 				}
3319 			}
3320 		}
3321 		spin_lock_irq(mlx4_tlock(dev));
3322 	}
3323 	spin_unlock_irq(mlx4_tlock(dev));
3324 }
3325 
3326 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3327 {
3328 	struct mlx4_priv *priv = mlx4_priv(dev);
3329 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3330 	struct list_head *srq_list =
3331 		&tracker->slave_list[slave].res_list[RES_SRQ];
3332 	struct res_srq *srq;
3333 	struct res_srq *tmp;
3334 	int state;
3335 	u64 in_param;
3336 	LIST_HEAD(tlist);
3337 	int srqn;
3338 	int err;
3339 
3340 	err = move_all_busy(dev, slave, RES_SRQ);
3341 	if (err)
3342 		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3343 			  "busy for slave %d\n", slave);
3344 
3345 	spin_lock_irq(mlx4_tlock(dev));
3346 	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3347 		spin_unlock_irq(mlx4_tlock(dev));
3348 		if (srq->com.owner == slave) {
3349 			srqn = srq->com.res_id;
3350 			state = srq->com.from_state;
3351 			while (state != 0) {
3352 				switch (state) {
3353 				case RES_SRQ_ALLOCATED:
3354 					__mlx4_srq_free_icm(dev, srqn);
3355 					spin_lock_irq(mlx4_tlock(dev));
3356 					rb_erase(&srq->com.node,
3357 						 &tracker->res_tree[RES_SRQ]);
3358 					list_del(&srq->com.list);
3359 					spin_unlock_irq(mlx4_tlock(dev));
3360 					kfree(srq);
3361 					state = 0;
3362 					break;
3363 
3364 				case RES_SRQ_HW:
3365 					in_param = slave;
3366 					err = mlx4_cmd(dev, in_param, srqn, 1,
3367 						       MLX4_CMD_HW2SW_SRQ,
3368 						       MLX4_CMD_TIME_CLASS_A,
3369 						       MLX4_CMD_NATIVE);
3370 					if (err)
3371 						mlx4_dbg(dev, "rem_slave_srqs: failed"
3372 							 " to move slave %d srq %d to"
3373 							 " SW ownership\n",
3374 							 slave, srqn);
3375 
3376 					atomic_dec(&srq->mtt->ref_count);
3377 					if (srq->cq)
3378 						atomic_dec(&srq->cq->ref_count);
3379 					state = RES_SRQ_ALLOCATED;
3380 					break;
3381 
3382 				default:
3383 					state = 0;
3384 				}
3385 			}
3386 		}
3387 		spin_lock_irq(mlx4_tlock(dev));
3388 	}
3389 	spin_unlock_irq(mlx4_tlock(dev));
3390 }
3391 
3392 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3393 {
3394 	struct mlx4_priv *priv = mlx4_priv(dev);
3395 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3396 	struct list_head *cq_list =
3397 		&tracker->slave_list[slave].res_list[RES_CQ];
3398 	struct res_cq *cq;
3399 	struct res_cq *tmp;
3400 	int state;
3401 	u64 in_param;
3402 	LIST_HEAD(tlist);
3403 	int cqn;
3404 	int err;
3405 
3406 	err = move_all_busy(dev, slave, RES_CQ);
3407 	if (err)
3408 		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3409 			  "busy for slave %d\n", slave);
3410 
3411 	spin_lock_irq(mlx4_tlock(dev));
3412 	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3413 		spin_unlock_irq(mlx4_tlock(dev));
3414 		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3415 			cqn = cq->com.res_id;
3416 			state = cq->com.from_state;
3417 			while (state != 0) {
3418 				switch (state) {
3419 				case RES_CQ_ALLOCATED:
3420 					__mlx4_cq_free_icm(dev, cqn);
3421 					spin_lock_irq(mlx4_tlock(dev));
3422 					rb_erase(&cq->com.node,
3423 						 &tracker->res_tree[RES_CQ]);
3424 					list_del(&cq->com.list);
3425 					spin_unlock_irq(mlx4_tlock(dev));
3426 					kfree(cq);
3427 					state = 0;
3428 					break;
3429 
3430 				case RES_CQ_HW:
3431 					in_param = slave;
3432 					err = mlx4_cmd(dev, in_param, cqn, 1,
3433 						       MLX4_CMD_HW2SW_CQ,
3434 						       MLX4_CMD_TIME_CLASS_A,
3435 						       MLX4_CMD_NATIVE);
3436 					if (err)
3437 						mlx4_dbg(dev, "rem_slave_cqs: failed"
3438 							 " to move slave %d cq %d to"
3439 							 " SW ownership\n",
3440 							 slave, cqn);
3441 					atomic_dec(&cq->mtt->ref_count);
3442 					state = RES_CQ_ALLOCATED;
3443 					break;
3444 
3445 				default:
3446 					state = 0;
3447 				}
3448 			}
3449 		}
3450 		spin_lock_irq(mlx4_tlock(dev));
3451 	}
3452 	spin_unlock_irq(mlx4_tlock(dev));
3453 }
3454 
3455 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3456 {
3457 	struct mlx4_priv *priv = mlx4_priv(dev);
3458 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3459 	struct list_head *mpt_list =
3460 		&tracker->slave_list[slave].res_list[RES_MPT];
3461 	struct res_mpt *mpt;
3462 	struct res_mpt *tmp;
3463 	int state;
3464 	u64 in_param;
3465 	LIST_HEAD(tlist);
3466 	int mptn;
3467 	int err;
3468 
3469 	err = move_all_busy(dev, slave, RES_MPT);
3470 	if (err)
3471 		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3472 			  "busy for slave %d\n", slave);
3473 
3474 	spin_lock_irq(mlx4_tlock(dev));
3475 	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3476 		spin_unlock_irq(mlx4_tlock(dev));
3477 		if (mpt->com.owner == slave) {
3478 			mptn = mpt->com.res_id;
3479 			state = mpt->com.from_state;
3480 			while (state != 0) {
3481 				switch (state) {
3482 				case RES_MPT_RESERVED:
3483 					__mlx4_mr_release(dev, mpt->key);
3484 					spin_lock_irq(mlx4_tlock(dev));
3485 					rb_erase(&mpt->com.node,
3486 						 &tracker->res_tree[RES_MPT]);
3487 					list_del(&mpt->com.list);
3488 					spin_unlock_irq(mlx4_tlock(dev));
3489 					kfree(mpt);
3490 					state = 0;
3491 					break;
3492 
3493 				case RES_MPT_MAPPED:
3494 					__mlx4_mr_free_icm(dev, mpt->key);
3495 					state = RES_MPT_RESERVED;
3496 					break;
3497 
3498 				case RES_MPT_HW:
3499 					in_param = slave;
3500 					err = mlx4_cmd(dev, in_param, mptn, 0,
3501 						     MLX4_CMD_HW2SW_MPT,
3502 						     MLX4_CMD_TIME_CLASS_A,
3503 						     MLX4_CMD_NATIVE);
3504 					if (err)
3505 						mlx4_dbg(dev, "rem_slave_mrs: failed"
3506 							 " to move slave %d mpt %d to"
3507 							 " SW ownership\n",
3508 							 slave, mptn);
3509 					if (mpt->mtt)
3510 						atomic_dec(&mpt->mtt->ref_count);
3511 					state = RES_MPT_MAPPED;
3512 					break;
3513 				default:
3514 					state = 0;
3515 				}
3516 			}
3517 		}
3518 		spin_lock_irq(mlx4_tlock(dev));
3519 	}
3520 	spin_unlock_irq(mlx4_tlock(dev));
3521 }
3522 
3523 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3524 {
3525 	struct mlx4_priv *priv = mlx4_priv(dev);
3526 	struct mlx4_resource_tracker *tracker =
3527 		&priv->mfunc.master.res_tracker;
3528 	struct list_head *mtt_list =
3529 		&tracker->slave_list[slave].res_list[RES_MTT];
3530 	struct res_mtt *mtt;
3531 	struct res_mtt *tmp;
3532 	int state;
3533 	LIST_HEAD(tlist);
3534 	int base;
3535 	int err;
3536 
3537 	err = move_all_busy(dev, slave, RES_MTT);
3538 	if (err)
3539 		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3540 			  "busy for slave %d\n", slave);
3541 
3542 	spin_lock_irq(mlx4_tlock(dev));
3543 	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3544 		spin_unlock_irq(mlx4_tlock(dev));
3545 		if (mtt->com.owner == slave) {
3546 			base = mtt->com.res_id;
3547 			state = mtt->com.from_state;
3548 			while (state != 0) {
3549 				switch (state) {
3550 				case RES_MTT_ALLOCATED:
3551 					__mlx4_free_mtt_range(dev, base,
3552 							      mtt->order);
3553 					spin_lock_irq(mlx4_tlock(dev));
3554 					rb_erase(&mtt->com.node,
3555 						 &tracker->res_tree[RES_MTT]);
3556 					list_del(&mtt->com.list);
3557 					spin_unlock_irq(mlx4_tlock(dev));
3558 					kfree(mtt);
3559 					state = 0;
3560 					break;
3561 
3562 				default:
3563 					state = 0;
3564 				}
3565 			}
3566 		}
3567 		spin_lock_irq(mlx4_tlock(dev));
3568 	}
3569 	spin_unlock_irq(mlx4_tlock(dev));
3570 }
3571 
3572 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3573 {
3574 	struct mlx4_priv *priv = mlx4_priv(dev);
3575 	struct mlx4_resource_tracker *tracker =
3576 		&priv->mfunc.master.res_tracker;
3577 	struct list_head *fs_rule_list =
3578 		&tracker->slave_list[slave].res_list[RES_FS_RULE];
3579 	struct res_fs_rule *fs_rule;
3580 	struct res_fs_rule *tmp;
3581 	int state;
3582 	u64 base;
3583 	int err;
3584 
3585 	err = move_all_busy(dev, slave, RES_FS_RULE);
3586 	if (err)
3587 		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3588 			  slave);
3589 
3590 	spin_lock_irq(mlx4_tlock(dev));
3591 	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3592 		spin_unlock_irq(mlx4_tlock(dev));
3593 		if (fs_rule->com.owner == slave) {
3594 			base = fs_rule->com.res_id;
3595 			state = fs_rule->com.from_state;
3596 			while (state != 0) {
3597 				switch (state) {
3598 				case RES_FS_RULE_ALLOCATED:
3599 					/* detach rule */
3600 					err = mlx4_cmd(dev, base, 0, 0,
3601 						       MLX4_QP_FLOW_STEERING_DETACH,
3602 						       MLX4_CMD_TIME_CLASS_A,
3603 						       MLX4_CMD_NATIVE);
3604 
3605 					spin_lock_irq(mlx4_tlock(dev));
3606 					rb_erase(&fs_rule->com.node,
3607 						 &tracker->res_tree[RES_FS_RULE]);
3608 					list_del(&fs_rule->com.list);
3609 					spin_unlock_irq(mlx4_tlock(dev));
3610 					kfree(fs_rule);
3611 					state = 0;
3612 					break;
3613 
3614 				default:
3615 					state = 0;
3616 				}
3617 			}
3618 		}
3619 		spin_lock_irq(mlx4_tlock(dev));
3620 	}
3621 	spin_unlock_irq(mlx4_tlock(dev));
3622 }
3623 
3624 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3625 {
3626 	struct mlx4_priv *priv = mlx4_priv(dev);
3627 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3628 	struct list_head *eq_list =
3629 		&tracker->slave_list[slave].res_list[RES_EQ];
3630 	struct res_eq *eq;
3631 	struct res_eq *tmp;
3632 	int err;
3633 	int state;
3634 	LIST_HEAD(tlist);
3635 	int eqn;
3636 	struct mlx4_cmd_mailbox *mailbox;
3637 
3638 	err = move_all_busy(dev, slave, RES_EQ);
3639 	if (err)
3640 		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3641 			  "busy for slave %d\n", slave);
3642 
3643 	spin_lock_irq(mlx4_tlock(dev));
3644 	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3645 		spin_unlock_irq(mlx4_tlock(dev));
3646 		if (eq->com.owner == slave) {
3647 			eqn = eq->com.res_id;
3648 			state = eq->com.from_state;
3649 			while (state != 0) {
3650 				switch (state) {
3651 				case RES_EQ_RESERVED:
3652 					spin_lock_irq(mlx4_tlock(dev));
3653 					rb_erase(&eq->com.node,
3654 						 &tracker->res_tree[RES_EQ]);
3655 					list_del(&eq->com.list);
3656 					spin_unlock_irq(mlx4_tlock(dev));
3657 					kfree(eq);
3658 					state = 0;
3659 					break;
3660 
3661 				case RES_EQ_HW:
3662 					mailbox = mlx4_alloc_cmd_mailbox(dev);
3663 					if (IS_ERR(mailbox)) {
3664 						cond_resched();
3665 						continue;
3666 					}
3667 					err = mlx4_cmd_box(dev, slave, 0,
3668 							   eqn & 0xff, 0,
3669 							   MLX4_CMD_HW2SW_EQ,
3670 							   MLX4_CMD_TIME_CLASS_A,
3671 							   MLX4_CMD_NATIVE);
3672 					if (err)
3673 						mlx4_dbg(dev, "rem_slave_eqs: failed"
3674 							 " to move slave %d eqs %d to"
3675 							 " SW ownership\n", slave, eqn);
3676 					mlx4_free_cmd_mailbox(dev, mailbox);
3677 					atomic_dec(&eq->mtt->ref_count);
3678 					state = RES_EQ_RESERVED;
3679 					break;
3680 
3681 				default:
3682 					state = 0;
3683 				}
3684 			}
3685 		}
3686 		spin_lock_irq(mlx4_tlock(dev));
3687 	}
3688 	spin_unlock_irq(mlx4_tlock(dev));
3689 }
3690 
3691 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3692 {
3693 	struct mlx4_priv *priv = mlx4_priv(dev);
3694 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3695 	struct list_head *counter_list =
3696 		&tracker->slave_list[slave].res_list[RES_COUNTER];
3697 	struct res_counter *counter;
3698 	struct res_counter *tmp;
3699 	int err;
3700 	int index;
3701 
3702 	err = move_all_busy(dev, slave, RES_COUNTER);
3703 	if (err)
3704 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3705 			  "busy for slave %d\n", slave);
3706 
3707 	spin_lock_irq(mlx4_tlock(dev));
3708 	list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3709 		if (counter->com.owner == slave) {
3710 			index = counter->com.res_id;
3711 			rb_erase(&counter->com.node,
3712 				 &tracker->res_tree[RES_COUNTER]);
3713 			list_del(&counter->com.list);
3714 			kfree(counter);
3715 			__mlx4_counter_free(dev, index);
3716 		}
3717 	}
3718 	spin_unlock_irq(mlx4_tlock(dev));
3719 }
3720 
3721 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3722 {
3723 	struct mlx4_priv *priv = mlx4_priv(dev);
3724 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3725 	struct list_head *xrcdn_list =
3726 		&tracker->slave_list[slave].res_list[RES_XRCD];
3727 	struct res_xrcdn *xrcd;
3728 	struct res_xrcdn *tmp;
3729 	int err;
3730 	int xrcdn;
3731 
3732 	err = move_all_busy(dev, slave, RES_XRCD);
3733 	if (err)
3734 		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3735 			  "busy for slave %d\n", slave);
3736 
3737 	spin_lock_irq(mlx4_tlock(dev));
3738 	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3739 		if (xrcd->com.owner == slave) {
3740 			xrcdn = xrcd->com.res_id;
3741 			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3742 			list_del(&xrcd->com.list);
3743 			kfree(xrcd);
3744 			__mlx4_xrcd_free(dev, xrcdn);
3745 		}
3746 	}
3747 	spin_unlock_irq(mlx4_tlock(dev));
3748 }
3749 
3750 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3751 {
3752 	struct mlx4_priv *priv = mlx4_priv(dev);
3753 
3754 	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3755 	/*VLAN*/
3756 	rem_slave_macs(dev, slave);
3757 	rem_slave_qps(dev, slave);
3758 	rem_slave_srqs(dev, slave);
3759 	rem_slave_cqs(dev, slave);
3760 	rem_slave_mrs(dev, slave);
3761 	rem_slave_eqs(dev, slave);
3762 	rem_slave_mtts(dev, slave);
3763 	rem_slave_counters(dev, slave);
3764 	rem_slave_xrcdns(dev, slave);
3765 	rem_slave_fs_rule(dev, slave);
3766 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3767 }
3768