1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46 
47 #include "mlx4.h"
48 #include "fw.h"
49 
50 #define MLX4_MAC_VALID		(1ull << 63)
51 
52 struct mac_res {
53 	struct list_head list;
54 	u64 mac;
55 	u8 port;
56 };
57 
58 struct res_common {
59 	struct list_head	list;
60 	struct rb_node		node;
61 	u64		        res_id;
62 	int			owner;
63 	int			state;
64 	int			from_state;
65 	int			to_state;
66 	int			removing;
67 };
68 
69 enum {
70 	RES_ANY_BUSY = 1
71 };
72 
73 struct res_gid {
74 	struct list_head	list;
75 	u8			gid[16];
76 	enum mlx4_protocol	prot;
77 	enum mlx4_steer_type	steer;
78 	u64			reg_id;
79 };
80 
81 enum res_qp_states {
82 	RES_QP_BUSY = RES_ANY_BUSY,
83 
84 	/* QP number was allocated */
85 	RES_QP_RESERVED,
86 
87 	/* ICM memory for QP context was mapped */
88 	RES_QP_MAPPED,
89 
90 	/* QP is in hw ownership */
91 	RES_QP_HW
92 };
93 
94 struct res_qp {
95 	struct res_common	com;
96 	struct res_mtt	       *mtt;
97 	struct res_cq	       *rcq;
98 	struct res_cq	       *scq;
99 	struct res_srq	       *srq;
100 	struct list_head	mcg_list;
101 	spinlock_t		mcg_spl;
102 	int			local_qpn;
103 	atomic_t		ref_count;
104 	u32			qpc_flags;
105 	u8			sched_queue;
106 };
107 
108 enum res_mtt_states {
109 	RES_MTT_BUSY = RES_ANY_BUSY,
110 	RES_MTT_ALLOCATED,
111 };
112 
113 static inline const char *mtt_states_str(enum res_mtt_states state)
114 {
115 	switch (state) {
116 	case RES_MTT_BUSY: return "RES_MTT_BUSY";
117 	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
118 	default: return "Unknown";
119 	}
120 }
121 
122 struct res_mtt {
123 	struct res_common	com;
124 	int			order;
125 	atomic_t		ref_count;
126 };
127 
128 enum res_mpt_states {
129 	RES_MPT_BUSY = RES_ANY_BUSY,
130 	RES_MPT_RESERVED,
131 	RES_MPT_MAPPED,
132 	RES_MPT_HW,
133 };
134 
135 struct res_mpt {
136 	struct res_common	com;
137 	struct res_mtt	       *mtt;
138 	int			key;
139 };
140 
141 enum res_eq_states {
142 	RES_EQ_BUSY = RES_ANY_BUSY,
143 	RES_EQ_RESERVED,
144 	RES_EQ_HW,
145 };
146 
147 struct res_eq {
148 	struct res_common	com;
149 	struct res_mtt	       *mtt;
150 };
151 
152 enum res_cq_states {
153 	RES_CQ_BUSY = RES_ANY_BUSY,
154 	RES_CQ_ALLOCATED,
155 	RES_CQ_HW,
156 };
157 
158 struct res_cq {
159 	struct res_common	com;
160 	struct res_mtt	       *mtt;
161 	atomic_t		ref_count;
162 };
163 
164 enum res_srq_states {
165 	RES_SRQ_BUSY = RES_ANY_BUSY,
166 	RES_SRQ_ALLOCATED,
167 	RES_SRQ_HW,
168 };
169 
170 struct res_srq {
171 	struct res_common	com;
172 	struct res_mtt	       *mtt;
173 	struct res_cq	       *cq;
174 	atomic_t		ref_count;
175 };
176 
177 enum res_counter_states {
178 	RES_COUNTER_BUSY = RES_ANY_BUSY,
179 	RES_COUNTER_ALLOCATED,
180 };
181 
182 struct res_counter {
183 	struct res_common	com;
184 	int			port;
185 };
186 
187 enum res_xrcdn_states {
188 	RES_XRCD_BUSY = RES_ANY_BUSY,
189 	RES_XRCD_ALLOCATED,
190 };
191 
192 struct res_xrcdn {
193 	struct res_common	com;
194 	int			port;
195 };
196 
197 enum res_fs_rule_states {
198 	RES_FS_RULE_BUSY = RES_ANY_BUSY,
199 	RES_FS_RULE_ALLOCATED,
200 };
201 
202 struct res_fs_rule {
203 	struct res_common	com;
204 	int			qpn;
205 };
206 
207 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
208 {
209 	struct rb_node *node = root->rb_node;
210 
211 	while (node) {
212 		struct res_common *res = container_of(node, struct res_common,
213 						      node);
214 
215 		if (res_id < res->res_id)
216 			node = node->rb_left;
217 		else if (res_id > res->res_id)
218 			node = node->rb_right;
219 		else
220 			return res;
221 	}
222 	return NULL;
223 }
224 
225 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
226 {
227 	struct rb_node **new = &(root->rb_node), *parent = NULL;
228 
229 	/* Figure out where to put new node */
230 	while (*new) {
231 		struct res_common *this = container_of(*new, struct res_common,
232 						       node);
233 
234 		parent = *new;
235 		if (res->res_id < this->res_id)
236 			new = &((*new)->rb_left);
237 		else if (res->res_id > this->res_id)
238 			new = &((*new)->rb_right);
239 		else
240 			return -EEXIST;
241 	}
242 
243 	/* Add new node and rebalance tree. */
244 	rb_link_node(&res->node, parent, new);
245 	rb_insert_color(&res->node, root);
246 
247 	return 0;
248 }
249 
250 enum qp_transition {
251 	QP_TRANS_INIT2RTR,
252 	QP_TRANS_RTR2RTS,
253 	QP_TRANS_RTS2RTS,
254 	QP_TRANS_SQERR2RTS,
255 	QP_TRANS_SQD2SQD,
256 	QP_TRANS_SQD2RTS
257 };
258 
259 /* For Debug uses */
260 static const char *ResourceType(enum mlx4_resource rt)
261 {
262 	switch (rt) {
263 	case RES_QP: return "RES_QP";
264 	case RES_CQ: return "RES_CQ";
265 	case RES_SRQ: return "RES_SRQ";
266 	case RES_MPT: return "RES_MPT";
267 	case RES_MTT: return "RES_MTT";
268 	case RES_MAC: return  "RES_MAC";
269 	case RES_EQ: return "RES_EQ";
270 	case RES_COUNTER: return "RES_COUNTER";
271 	case RES_FS_RULE: return "RES_FS_RULE";
272 	case RES_XRCD: return "RES_XRCD";
273 	default: return "Unknown resource type !!!";
274 	};
275 }
276 
277 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
278 {
279 	struct mlx4_priv *priv = mlx4_priv(dev);
280 	int i;
281 	int t;
282 
283 	priv->mfunc.master.res_tracker.slave_list =
284 		kzalloc(dev->num_slaves * sizeof(struct slave_list),
285 			GFP_KERNEL);
286 	if (!priv->mfunc.master.res_tracker.slave_list)
287 		return -ENOMEM;
288 
289 	for (i = 0 ; i < dev->num_slaves; i++) {
290 		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
291 			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
292 				       slave_list[i].res_list[t]);
293 		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
294 	}
295 
296 	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
297 		 dev->num_slaves);
298 	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
299 		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
300 
301 	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
302 	return 0 ;
303 }
304 
305 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
306 				enum mlx4_res_tracker_free_type type)
307 {
308 	struct mlx4_priv *priv = mlx4_priv(dev);
309 	int i;
310 
311 	if (priv->mfunc.master.res_tracker.slave_list) {
312 		if (type != RES_TR_FREE_STRUCTS_ONLY)
313 			for (i = 0 ; i < dev->num_slaves; i++)
314 				if (type == RES_TR_FREE_ALL ||
315 				    dev->caps.function != i)
316 					mlx4_delete_all_resources_for_slave(dev, i);
317 
318 		if (type != RES_TR_FREE_SLAVES_ONLY) {
319 			kfree(priv->mfunc.master.res_tracker.slave_list);
320 			priv->mfunc.master.res_tracker.slave_list = NULL;
321 		}
322 	}
323 }
324 
325 static void update_pkey_index(struct mlx4_dev *dev, int slave,
326 			      struct mlx4_cmd_mailbox *inbox)
327 {
328 	u8 sched = *(u8 *)(inbox->buf + 64);
329 	u8 orig_index = *(u8 *)(inbox->buf + 35);
330 	u8 new_index;
331 	struct mlx4_priv *priv = mlx4_priv(dev);
332 	int port;
333 
334 	port = (sched >> 6 & 1) + 1;
335 
336 	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
337 	*(u8 *)(inbox->buf + 35) = new_index;
338 }
339 
340 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
341 		       u8 slave)
342 {
343 	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
344 	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
345 	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
346 
347 	if (MLX4_QP_ST_UD == ts)
348 		qp_ctx->pri_path.mgid_index = 0x80 | slave;
349 
350 	if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
351 		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
352 			qp_ctx->pri_path.mgid_index = slave & 0x7F;
353 		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
354 			qp_ctx->alt_path.mgid_index = slave & 0x7F;
355 	}
356 }
357 
358 static int update_vport_qp_param(struct mlx4_dev *dev,
359 				 struct mlx4_cmd_mailbox *inbox,
360 				 u8 slave, u32 qpn)
361 {
362 	struct mlx4_qp_context	*qpc = inbox->buf + 8;
363 	struct mlx4_vport_oper_state *vp_oper;
364 	struct mlx4_priv *priv;
365 	u32 qp_type;
366 	int port;
367 
368 	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
369 	priv = mlx4_priv(dev);
370 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
371 
372 	if (MLX4_VGT != vp_oper->state.default_vlan) {
373 		qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
374 		if (MLX4_QP_ST_RC == qp_type ||
375 		    (MLX4_QP_ST_UD == qp_type &&
376 		     !mlx4_is_qp_reserved(dev, qpn)))
377 			return -EINVAL;
378 
379 		/* the reserved QPs (special, proxy, tunnel)
380 		 * do not operate over vlans
381 		 */
382 		if (mlx4_is_qp_reserved(dev, qpn))
383 			return 0;
384 
385 		/* force strip vlan by clear vsd */
386 		qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
387 
388 		if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
389 		    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
390 			qpc->pri_path.vlan_control =
391 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
392 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
393 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
394 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
395 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
396 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
397 		} else if (0 != vp_oper->state.default_vlan) {
398 			qpc->pri_path.vlan_control =
399 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
400 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
401 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
402 		} else { /* priority tagged */
403 			qpc->pri_path.vlan_control =
404 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
405 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
406 		}
407 
408 		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
409 		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
410 		qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
411 		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
412 		qpc->pri_path.sched_queue &= 0xC7;
413 		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
414 	}
415 	if (vp_oper->state.spoofchk) {
416 		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
417 		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
418 	}
419 	return 0;
420 }
421 
422 static int mpt_mask(struct mlx4_dev *dev)
423 {
424 	return dev->caps.num_mpts - 1;
425 }
426 
427 static void *find_res(struct mlx4_dev *dev, u64 res_id,
428 		      enum mlx4_resource type)
429 {
430 	struct mlx4_priv *priv = mlx4_priv(dev);
431 
432 	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
433 				  res_id);
434 }
435 
436 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
437 		   enum mlx4_resource type,
438 		   void *res)
439 {
440 	struct res_common *r;
441 	int err = 0;
442 
443 	spin_lock_irq(mlx4_tlock(dev));
444 	r = find_res(dev, res_id, type);
445 	if (!r) {
446 		err = -ENONET;
447 		goto exit;
448 	}
449 
450 	if (r->state == RES_ANY_BUSY) {
451 		err = -EBUSY;
452 		goto exit;
453 	}
454 
455 	if (r->owner != slave) {
456 		err = -EPERM;
457 		goto exit;
458 	}
459 
460 	r->from_state = r->state;
461 	r->state = RES_ANY_BUSY;
462 
463 	if (res)
464 		*((struct res_common **)res) = r;
465 
466 exit:
467 	spin_unlock_irq(mlx4_tlock(dev));
468 	return err;
469 }
470 
471 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
472 				    enum mlx4_resource type,
473 				    u64 res_id, int *slave)
474 {
475 
476 	struct res_common *r;
477 	int err = -ENOENT;
478 	int id = res_id;
479 
480 	if (type == RES_QP)
481 		id &= 0x7fffff;
482 	spin_lock(mlx4_tlock(dev));
483 
484 	r = find_res(dev, id, type);
485 	if (r) {
486 		*slave = r->owner;
487 		err = 0;
488 	}
489 	spin_unlock(mlx4_tlock(dev));
490 
491 	return err;
492 }
493 
494 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
495 		    enum mlx4_resource type)
496 {
497 	struct res_common *r;
498 
499 	spin_lock_irq(mlx4_tlock(dev));
500 	r = find_res(dev, res_id, type);
501 	if (r)
502 		r->state = r->from_state;
503 	spin_unlock_irq(mlx4_tlock(dev));
504 }
505 
506 static struct res_common *alloc_qp_tr(int id)
507 {
508 	struct res_qp *ret;
509 
510 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
511 	if (!ret)
512 		return NULL;
513 
514 	ret->com.res_id = id;
515 	ret->com.state = RES_QP_RESERVED;
516 	ret->local_qpn = id;
517 	INIT_LIST_HEAD(&ret->mcg_list);
518 	spin_lock_init(&ret->mcg_spl);
519 	atomic_set(&ret->ref_count, 0);
520 
521 	return &ret->com;
522 }
523 
524 static struct res_common *alloc_mtt_tr(int id, int order)
525 {
526 	struct res_mtt *ret;
527 
528 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
529 	if (!ret)
530 		return NULL;
531 
532 	ret->com.res_id = id;
533 	ret->order = order;
534 	ret->com.state = RES_MTT_ALLOCATED;
535 	atomic_set(&ret->ref_count, 0);
536 
537 	return &ret->com;
538 }
539 
540 static struct res_common *alloc_mpt_tr(int id, int key)
541 {
542 	struct res_mpt *ret;
543 
544 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
545 	if (!ret)
546 		return NULL;
547 
548 	ret->com.res_id = id;
549 	ret->com.state = RES_MPT_RESERVED;
550 	ret->key = key;
551 
552 	return &ret->com;
553 }
554 
555 static struct res_common *alloc_eq_tr(int id)
556 {
557 	struct res_eq *ret;
558 
559 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
560 	if (!ret)
561 		return NULL;
562 
563 	ret->com.res_id = id;
564 	ret->com.state = RES_EQ_RESERVED;
565 
566 	return &ret->com;
567 }
568 
569 static struct res_common *alloc_cq_tr(int id)
570 {
571 	struct res_cq *ret;
572 
573 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
574 	if (!ret)
575 		return NULL;
576 
577 	ret->com.res_id = id;
578 	ret->com.state = RES_CQ_ALLOCATED;
579 	atomic_set(&ret->ref_count, 0);
580 
581 	return &ret->com;
582 }
583 
584 static struct res_common *alloc_srq_tr(int id)
585 {
586 	struct res_srq *ret;
587 
588 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
589 	if (!ret)
590 		return NULL;
591 
592 	ret->com.res_id = id;
593 	ret->com.state = RES_SRQ_ALLOCATED;
594 	atomic_set(&ret->ref_count, 0);
595 
596 	return &ret->com;
597 }
598 
599 static struct res_common *alloc_counter_tr(int id)
600 {
601 	struct res_counter *ret;
602 
603 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
604 	if (!ret)
605 		return NULL;
606 
607 	ret->com.res_id = id;
608 	ret->com.state = RES_COUNTER_ALLOCATED;
609 
610 	return &ret->com;
611 }
612 
613 static struct res_common *alloc_xrcdn_tr(int id)
614 {
615 	struct res_xrcdn *ret;
616 
617 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
618 	if (!ret)
619 		return NULL;
620 
621 	ret->com.res_id = id;
622 	ret->com.state = RES_XRCD_ALLOCATED;
623 
624 	return &ret->com;
625 }
626 
627 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
628 {
629 	struct res_fs_rule *ret;
630 
631 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
632 	if (!ret)
633 		return NULL;
634 
635 	ret->com.res_id = id;
636 	ret->com.state = RES_FS_RULE_ALLOCATED;
637 	ret->qpn = qpn;
638 	return &ret->com;
639 }
640 
641 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
642 				   int extra)
643 {
644 	struct res_common *ret;
645 
646 	switch (type) {
647 	case RES_QP:
648 		ret = alloc_qp_tr(id);
649 		break;
650 	case RES_MPT:
651 		ret = alloc_mpt_tr(id, extra);
652 		break;
653 	case RES_MTT:
654 		ret = alloc_mtt_tr(id, extra);
655 		break;
656 	case RES_EQ:
657 		ret = alloc_eq_tr(id);
658 		break;
659 	case RES_CQ:
660 		ret = alloc_cq_tr(id);
661 		break;
662 	case RES_SRQ:
663 		ret = alloc_srq_tr(id);
664 		break;
665 	case RES_MAC:
666 		printk(KERN_ERR "implementation missing\n");
667 		return NULL;
668 	case RES_COUNTER:
669 		ret = alloc_counter_tr(id);
670 		break;
671 	case RES_XRCD:
672 		ret = alloc_xrcdn_tr(id);
673 		break;
674 	case RES_FS_RULE:
675 		ret = alloc_fs_rule_tr(id, extra);
676 		break;
677 	default:
678 		return NULL;
679 	}
680 	if (ret)
681 		ret->owner = slave;
682 
683 	return ret;
684 }
685 
686 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
687 			 enum mlx4_resource type, int extra)
688 {
689 	int i;
690 	int err;
691 	struct mlx4_priv *priv = mlx4_priv(dev);
692 	struct res_common **res_arr;
693 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
694 	struct rb_root *root = &tracker->res_tree[type];
695 
696 	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
697 	if (!res_arr)
698 		return -ENOMEM;
699 
700 	for (i = 0; i < count; ++i) {
701 		res_arr[i] = alloc_tr(base + i, type, slave, extra);
702 		if (!res_arr[i]) {
703 			for (--i; i >= 0; --i)
704 				kfree(res_arr[i]);
705 
706 			kfree(res_arr);
707 			return -ENOMEM;
708 		}
709 	}
710 
711 	spin_lock_irq(mlx4_tlock(dev));
712 	for (i = 0; i < count; ++i) {
713 		if (find_res(dev, base + i, type)) {
714 			err = -EEXIST;
715 			goto undo;
716 		}
717 		err = res_tracker_insert(root, res_arr[i]);
718 		if (err)
719 			goto undo;
720 		list_add_tail(&res_arr[i]->list,
721 			      &tracker->slave_list[slave].res_list[type]);
722 	}
723 	spin_unlock_irq(mlx4_tlock(dev));
724 	kfree(res_arr);
725 
726 	return 0;
727 
728 undo:
729 	for (--i; i >= base; --i)
730 		rb_erase(&res_arr[i]->node, root);
731 
732 	spin_unlock_irq(mlx4_tlock(dev));
733 
734 	for (i = 0; i < count; ++i)
735 		kfree(res_arr[i]);
736 
737 	kfree(res_arr);
738 
739 	return err;
740 }
741 
742 static int remove_qp_ok(struct res_qp *res)
743 {
744 	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
745 	    !list_empty(&res->mcg_list)) {
746 		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
747 		       res->com.state, atomic_read(&res->ref_count));
748 		return -EBUSY;
749 	} else if (res->com.state != RES_QP_RESERVED) {
750 		return -EPERM;
751 	}
752 
753 	return 0;
754 }
755 
756 static int remove_mtt_ok(struct res_mtt *res, int order)
757 {
758 	if (res->com.state == RES_MTT_BUSY ||
759 	    atomic_read(&res->ref_count)) {
760 		printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
761 		       __func__, __LINE__,
762 		       mtt_states_str(res->com.state),
763 		       atomic_read(&res->ref_count));
764 		return -EBUSY;
765 	} else if (res->com.state != RES_MTT_ALLOCATED)
766 		return -EPERM;
767 	else if (res->order != order)
768 		return -EINVAL;
769 
770 	return 0;
771 }
772 
773 static int remove_mpt_ok(struct res_mpt *res)
774 {
775 	if (res->com.state == RES_MPT_BUSY)
776 		return -EBUSY;
777 	else if (res->com.state != RES_MPT_RESERVED)
778 		return -EPERM;
779 
780 	return 0;
781 }
782 
783 static int remove_eq_ok(struct res_eq *res)
784 {
785 	if (res->com.state == RES_MPT_BUSY)
786 		return -EBUSY;
787 	else if (res->com.state != RES_MPT_RESERVED)
788 		return -EPERM;
789 
790 	return 0;
791 }
792 
793 static int remove_counter_ok(struct res_counter *res)
794 {
795 	if (res->com.state == RES_COUNTER_BUSY)
796 		return -EBUSY;
797 	else if (res->com.state != RES_COUNTER_ALLOCATED)
798 		return -EPERM;
799 
800 	return 0;
801 }
802 
803 static int remove_xrcdn_ok(struct res_xrcdn *res)
804 {
805 	if (res->com.state == RES_XRCD_BUSY)
806 		return -EBUSY;
807 	else if (res->com.state != RES_XRCD_ALLOCATED)
808 		return -EPERM;
809 
810 	return 0;
811 }
812 
813 static int remove_fs_rule_ok(struct res_fs_rule *res)
814 {
815 	if (res->com.state == RES_FS_RULE_BUSY)
816 		return -EBUSY;
817 	else if (res->com.state != RES_FS_RULE_ALLOCATED)
818 		return -EPERM;
819 
820 	return 0;
821 }
822 
823 static int remove_cq_ok(struct res_cq *res)
824 {
825 	if (res->com.state == RES_CQ_BUSY)
826 		return -EBUSY;
827 	else if (res->com.state != RES_CQ_ALLOCATED)
828 		return -EPERM;
829 
830 	return 0;
831 }
832 
833 static int remove_srq_ok(struct res_srq *res)
834 {
835 	if (res->com.state == RES_SRQ_BUSY)
836 		return -EBUSY;
837 	else if (res->com.state != RES_SRQ_ALLOCATED)
838 		return -EPERM;
839 
840 	return 0;
841 }
842 
843 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
844 {
845 	switch (type) {
846 	case RES_QP:
847 		return remove_qp_ok((struct res_qp *)res);
848 	case RES_CQ:
849 		return remove_cq_ok((struct res_cq *)res);
850 	case RES_SRQ:
851 		return remove_srq_ok((struct res_srq *)res);
852 	case RES_MPT:
853 		return remove_mpt_ok((struct res_mpt *)res);
854 	case RES_MTT:
855 		return remove_mtt_ok((struct res_mtt *)res, extra);
856 	case RES_MAC:
857 		return -ENOSYS;
858 	case RES_EQ:
859 		return remove_eq_ok((struct res_eq *)res);
860 	case RES_COUNTER:
861 		return remove_counter_ok((struct res_counter *)res);
862 	case RES_XRCD:
863 		return remove_xrcdn_ok((struct res_xrcdn *)res);
864 	case RES_FS_RULE:
865 		return remove_fs_rule_ok((struct res_fs_rule *)res);
866 	default:
867 		return -EINVAL;
868 	}
869 }
870 
871 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
872 			 enum mlx4_resource type, int extra)
873 {
874 	u64 i;
875 	int err;
876 	struct mlx4_priv *priv = mlx4_priv(dev);
877 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878 	struct res_common *r;
879 
880 	spin_lock_irq(mlx4_tlock(dev));
881 	for (i = base; i < base + count; ++i) {
882 		r = res_tracker_lookup(&tracker->res_tree[type], i);
883 		if (!r) {
884 			err = -ENOENT;
885 			goto out;
886 		}
887 		if (r->owner != slave) {
888 			err = -EPERM;
889 			goto out;
890 		}
891 		err = remove_ok(r, type, extra);
892 		if (err)
893 			goto out;
894 	}
895 
896 	for (i = base; i < base + count; ++i) {
897 		r = res_tracker_lookup(&tracker->res_tree[type], i);
898 		rb_erase(&r->node, &tracker->res_tree[type]);
899 		list_del(&r->list);
900 		kfree(r);
901 	}
902 	err = 0;
903 
904 out:
905 	spin_unlock_irq(mlx4_tlock(dev));
906 
907 	return err;
908 }
909 
910 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
911 				enum res_qp_states state, struct res_qp **qp,
912 				int alloc)
913 {
914 	struct mlx4_priv *priv = mlx4_priv(dev);
915 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
916 	struct res_qp *r;
917 	int err = 0;
918 
919 	spin_lock_irq(mlx4_tlock(dev));
920 	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
921 	if (!r)
922 		err = -ENOENT;
923 	else if (r->com.owner != slave)
924 		err = -EPERM;
925 	else {
926 		switch (state) {
927 		case RES_QP_BUSY:
928 			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
929 				 __func__, r->com.res_id);
930 			err = -EBUSY;
931 			break;
932 
933 		case RES_QP_RESERVED:
934 			if (r->com.state == RES_QP_MAPPED && !alloc)
935 				break;
936 
937 			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
938 			err = -EINVAL;
939 			break;
940 
941 		case RES_QP_MAPPED:
942 			if ((r->com.state == RES_QP_RESERVED && alloc) ||
943 			    r->com.state == RES_QP_HW)
944 				break;
945 			else {
946 				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
947 					  r->com.res_id);
948 				err = -EINVAL;
949 			}
950 
951 			break;
952 
953 		case RES_QP_HW:
954 			if (r->com.state != RES_QP_MAPPED)
955 				err = -EINVAL;
956 			break;
957 		default:
958 			err = -EINVAL;
959 		}
960 
961 		if (!err) {
962 			r->com.from_state = r->com.state;
963 			r->com.to_state = state;
964 			r->com.state = RES_QP_BUSY;
965 			if (qp)
966 				*qp = r;
967 		}
968 	}
969 
970 	spin_unlock_irq(mlx4_tlock(dev));
971 
972 	return err;
973 }
974 
975 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
976 				enum res_mpt_states state, struct res_mpt **mpt)
977 {
978 	struct mlx4_priv *priv = mlx4_priv(dev);
979 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
980 	struct res_mpt *r;
981 	int err = 0;
982 
983 	spin_lock_irq(mlx4_tlock(dev));
984 	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
985 	if (!r)
986 		err = -ENOENT;
987 	else if (r->com.owner != slave)
988 		err = -EPERM;
989 	else {
990 		switch (state) {
991 		case RES_MPT_BUSY:
992 			err = -EINVAL;
993 			break;
994 
995 		case RES_MPT_RESERVED:
996 			if (r->com.state != RES_MPT_MAPPED)
997 				err = -EINVAL;
998 			break;
999 
1000 		case RES_MPT_MAPPED:
1001 			if (r->com.state != RES_MPT_RESERVED &&
1002 			    r->com.state != RES_MPT_HW)
1003 				err = -EINVAL;
1004 			break;
1005 
1006 		case RES_MPT_HW:
1007 			if (r->com.state != RES_MPT_MAPPED)
1008 				err = -EINVAL;
1009 			break;
1010 		default:
1011 			err = -EINVAL;
1012 		}
1013 
1014 		if (!err) {
1015 			r->com.from_state = r->com.state;
1016 			r->com.to_state = state;
1017 			r->com.state = RES_MPT_BUSY;
1018 			if (mpt)
1019 				*mpt = r;
1020 		}
1021 	}
1022 
1023 	spin_unlock_irq(mlx4_tlock(dev));
1024 
1025 	return err;
1026 }
1027 
1028 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029 				enum res_eq_states state, struct res_eq **eq)
1030 {
1031 	struct mlx4_priv *priv = mlx4_priv(dev);
1032 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1033 	struct res_eq *r;
1034 	int err = 0;
1035 
1036 	spin_lock_irq(mlx4_tlock(dev));
1037 	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1038 	if (!r)
1039 		err = -ENOENT;
1040 	else if (r->com.owner != slave)
1041 		err = -EPERM;
1042 	else {
1043 		switch (state) {
1044 		case RES_EQ_BUSY:
1045 			err = -EINVAL;
1046 			break;
1047 
1048 		case RES_EQ_RESERVED:
1049 			if (r->com.state != RES_EQ_HW)
1050 				err = -EINVAL;
1051 			break;
1052 
1053 		case RES_EQ_HW:
1054 			if (r->com.state != RES_EQ_RESERVED)
1055 				err = -EINVAL;
1056 			break;
1057 
1058 		default:
1059 			err = -EINVAL;
1060 		}
1061 
1062 		if (!err) {
1063 			r->com.from_state = r->com.state;
1064 			r->com.to_state = state;
1065 			r->com.state = RES_EQ_BUSY;
1066 			if (eq)
1067 				*eq = r;
1068 		}
1069 	}
1070 
1071 	spin_unlock_irq(mlx4_tlock(dev));
1072 
1073 	return err;
1074 }
1075 
1076 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1077 				enum res_cq_states state, struct res_cq **cq)
1078 {
1079 	struct mlx4_priv *priv = mlx4_priv(dev);
1080 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1081 	struct res_cq *r;
1082 	int err;
1083 
1084 	spin_lock_irq(mlx4_tlock(dev));
1085 	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1086 	if (!r)
1087 		err = -ENOENT;
1088 	else if (r->com.owner != slave)
1089 		err = -EPERM;
1090 	else {
1091 		switch (state) {
1092 		case RES_CQ_BUSY:
1093 			err = -EBUSY;
1094 			break;
1095 
1096 		case RES_CQ_ALLOCATED:
1097 			if (r->com.state != RES_CQ_HW)
1098 				err = -EINVAL;
1099 			else if (atomic_read(&r->ref_count))
1100 				err = -EBUSY;
1101 			else
1102 				err = 0;
1103 			break;
1104 
1105 		case RES_CQ_HW:
1106 			if (r->com.state != RES_CQ_ALLOCATED)
1107 				err = -EINVAL;
1108 			else
1109 				err = 0;
1110 			break;
1111 
1112 		default:
1113 			err = -EINVAL;
1114 		}
1115 
1116 		if (!err) {
1117 			r->com.from_state = r->com.state;
1118 			r->com.to_state = state;
1119 			r->com.state = RES_CQ_BUSY;
1120 			if (cq)
1121 				*cq = r;
1122 		}
1123 	}
1124 
1125 	spin_unlock_irq(mlx4_tlock(dev));
1126 
1127 	return err;
1128 }
1129 
1130 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1131 				 enum res_cq_states state, struct res_srq **srq)
1132 {
1133 	struct mlx4_priv *priv = mlx4_priv(dev);
1134 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1135 	struct res_srq *r;
1136 	int err = 0;
1137 
1138 	spin_lock_irq(mlx4_tlock(dev));
1139 	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1140 	if (!r)
1141 		err = -ENOENT;
1142 	else if (r->com.owner != slave)
1143 		err = -EPERM;
1144 	else {
1145 		switch (state) {
1146 		case RES_SRQ_BUSY:
1147 			err = -EINVAL;
1148 			break;
1149 
1150 		case RES_SRQ_ALLOCATED:
1151 			if (r->com.state != RES_SRQ_HW)
1152 				err = -EINVAL;
1153 			else if (atomic_read(&r->ref_count))
1154 				err = -EBUSY;
1155 			break;
1156 
1157 		case RES_SRQ_HW:
1158 			if (r->com.state != RES_SRQ_ALLOCATED)
1159 				err = -EINVAL;
1160 			break;
1161 
1162 		default:
1163 			err = -EINVAL;
1164 		}
1165 
1166 		if (!err) {
1167 			r->com.from_state = r->com.state;
1168 			r->com.to_state = state;
1169 			r->com.state = RES_SRQ_BUSY;
1170 			if (srq)
1171 				*srq = r;
1172 		}
1173 	}
1174 
1175 	spin_unlock_irq(mlx4_tlock(dev));
1176 
1177 	return err;
1178 }
1179 
1180 static void res_abort_move(struct mlx4_dev *dev, int slave,
1181 			   enum mlx4_resource type, int id)
1182 {
1183 	struct mlx4_priv *priv = mlx4_priv(dev);
1184 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1185 	struct res_common *r;
1186 
1187 	spin_lock_irq(mlx4_tlock(dev));
1188 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1189 	if (r && (r->owner == slave))
1190 		r->state = r->from_state;
1191 	spin_unlock_irq(mlx4_tlock(dev));
1192 }
1193 
1194 static void res_end_move(struct mlx4_dev *dev, int slave,
1195 			 enum mlx4_resource type, int id)
1196 {
1197 	struct mlx4_priv *priv = mlx4_priv(dev);
1198 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1199 	struct res_common *r;
1200 
1201 	spin_lock_irq(mlx4_tlock(dev));
1202 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1203 	if (r && (r->owner == slave))
1204 		r->state = r->to_state;
1205 	spin_unlock_irq(mlx4_tlock(dev));
1206 }
1207 
1208 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1209 {
1210 	return mlx4_is_qp_reserved(dev, qpn) &&
1211 		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1212 }
1213 
1214 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1215 {
1216 	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1217 }
1218 
1219 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1220 			u64 in_param, u64 *out_param)
1221 {
1222 	int err;
1223 	int count;
1224 	int align;
1225 	int base;
1226 	int qpn;
1227 
1228 	switch (op) {
1229 	case RES_OP_RESERVE:
1230 		count = get_param_l(&in_param);
1231 		align = get_param_h(&in_param);
1232 		err = __mlx4_qp_reserve_range(dev, count, align, &base);
1233 		if (err)
1234 			return err;
1235 
1236 		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1237 		if (err) {
1238 			__mlx4_qp_release_range(dev, base, count);
1239 			return err;
1240 		}
1241 		set_param_l(out_param, base);
1242 		break;
1243 	case RES_OP_MAP_ICM:
1244 		qpn = get_param_l(&in_param) & 0x7fffff;
1245 		if (valid_reserved(dev, slave, qpn)) {
1246 			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1247 			if (err)
1248 				return err;
1249 		}
1250 
1251 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1252 					   NULL, 1);
1253 		if (err)
1254 			return err;
1255 
1256 		if (!fw_reserved(dev, qpn)) {
1257 			err = __mlx4_qp_alloc_icm(dev, qpn);
1258 			if (err) {
1259 				res_abort_move(dev, slave, RES_QP, qpn);
1260 				return err;
1261 			}
1262 		}
1263 
1264 		res_end_move(dev, slave, RES_QP, qpn);
1265 		break;
1266 
1267 	default:
1268 		err = -EINVAL;
1269 		break;
1270 	}
1271 	return err;
1272 }
1273 
1274 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1275 			 u64 in_param, u64 *out_param)
1276 {
1277 	int err = -EINVAL;
1278 	int base;
1279 	int order;
1280 
1281 	if (op != RES_OP_RESERVE_AND_MAP)
1282 		return err;
1283 
1284 	order = get_param_l(&in_param);
1285 	base = __mlx4_alloc_mtt_range(dev, order);
1286 	if (base == -1)
1287 		return -ENOMEM;
1288 
1289 	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1290 	if (err)
1291 		__mlx4_free_mtt_range(dev, base, order);
1292 	else
1293 		set_param_l(out_param, base);
1294 
1295 	return err;
1296 }
1297 
1298 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1299 			 u64 in_param, u64 *out_param)
1300 {
1301 	int err = -EINVAL;
1302 	int index;
1303 	int id;
1304 	struct res_mpt *mpt;
1305 
1306 	switch (op) {
1307 	case RES_OP_RESERVE:
1308 		index = __mlx4_mpt_reserve(dev);
1309 		if (index == -1)
1310 			break;
1311 		id = index & mpt_mask(dev);
1312 
1313 		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1314 		if (err) {
1315 			__mlx4_mpt_release(dev, index);
1316 			break;
1317 		}
1318 		set_param_l(out_param, index);
1319 		break;
1320 	case RES_OP_MAP_ICM:
1321 		index = get_param_l(&in_param);
1322 		id = index & mpt_mask(dev);
1323 		err = mr_res_start_move_to(dev, slave, id,
1324 					   RES_MPT_MAPPED, &mpt);
1325 		if (err)
1326 			return err;
1327 
1328 		err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1329 		if (err) {
1330 			res_abort_move(dev, slave, RES_MPT, id);
1331 			return err;
1332 		}
1333 
1334 		res_end_move(dev, slave, RES_MPT, id);
1335 		break;
1336 	}
1337 	return err;
1338 }
1339 
1340 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1341 			u64 in_param, u64 *out_param)
1342 {
1343 	int cqn;
1344 	int err;
1345 
1346 	switch (op) {
1347 	case RES_OP_RESERVE_AND_MAP:
1348 		err = __mlx4_cq_alloc_icm(dev, &cqn);
1349 		if (err)
1350 			break;
1351 
1352 		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1353 		if (err) {
1354 			__mlx4_cq_free_icm(dev, cqn);
1355 			break;
1356 		}
1357 
1358 		set_param_l(out_param, cqn);
1359 		break;
1360 
1361 	default:
1362 		err = -EINVAL;
1363 	}
1364 
1365 	return err;
1366 }
1367 
1368 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1369 			 u64 in_param, u64 *out_param)
1370 {
1371 	int srqn;
1372 	int err;
1373 
1374 	switch (op) {
1375 	case RES_OP_RESERVE_AND_MAP:
1376 		err = __mlx4_srq_alloc_icm(dev, &srqn);
1377 		if (err)
1378 			break;
1379 
1380 		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1381 		if (err) {
1382 			__mlx4_srq_free_icm(dev, srqn);
1383 			break;
1384 		}
1385 
1386 		set_param_l(out_param, srqn);
1387 		break;
1388 
1389 	default:
1390 		err = -EINVAL;
1391 	}
1392 
1393 	return err;
1394 }
1395 
1396 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1397 {
1398 	struct mlx4_priv *priv = mlx4_priv(dev);
1399 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1400 	struct mac_res *res;
1401 
1402 	res = kzalloc(sizeof *res, GFP_KERNEL);
1403 	if (!res)
1404 		return -ENOMEM;
1405 	res->mac = mac;
1406 	res->port = (u8) port;
1407 	list_add_tail(&res->list,
1408 		      &tracker->slave_list[slave].res_list[RES_MAC]);
1409 	return 0;
1410 }
1411 
1412 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1413 			       int port)
1414 {
1415 	struct mlx4_priv *priv = mlx4_priv(dev);
1416 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1417 	struct list_head *mac_list =
1418 		&tracker->slave_list[slave].res_list[RES_MAC];
1419 	struct mac_res *res, *tmp;
1420 
1421 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1422 		if (res->mac == mac && res->port == (u8) port) {
1423 			list_del(&res->list);
1424 			kfree(res);
1425 			break;
1426 		}
1427 	}
1428 }
1429 
1430 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1431 {
1432 	struct mlx4_priv *priv = mlx4_priv(dev);
1433 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1434 	struct list_head *mac_list =
1435 		&tracker->slave_list[slave].res_list[RES_MAC];
1436 	struct mac_res *res, *tmp;
1437 
1438 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1439 		list_del(&res->list);
1440 		__mlx4_unregister_mac(dev, res->port, res->mac);
1441 		kfree(res);
1442 	}
1443 }
1444 
1445 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446 			 u64 in_param, u64 *out_param)
1447 {
1448 	int err = -EINVAL;
1449 	int port;
1450 	u64 mac;
1451 
1452 	if (op != RES_OP_RESERVE_AND_MAP)
1453 		return err;
1454 
1455 	port = get_param_l(out_param);
1456 	mac = in_param;
1457 
1458 	err = __mlx4_register_mac(dev, port, mac);
1459 	if (err >= 0) {
1460 		set_param_l(out_param, err);
1461 		err = 0;
1462 	}
1463 
1464 	if (!err) {
1465 		err = mac_add_to_slave(dev, slave, mac, port);
1466 		if (err)
1467 			__mlx4_unregister_mac(dev, port, mac);
1468 	}
1469 	return err;
1470 }
1471 
1472 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1473 			 u64 in_param, u64 *out_param)
1474 {
1475 	return 0;
1476 }
1477 
1478 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1479 			     u64 in_param, u64 *out_param)
1480 {
1481 	u32 index;
1482 	int err;
1483 
1484 	if (op != RES_OP_RESERVE)
1485 		return -EINVAL;
1486 
1487 	err = __mlx4_counter_alloc(dev, &index);
1488 	if (err)
1489 		return err;
1490 
1491 	err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1492 	if (err)
1493 		__mlx4_counter_free(dev, index);
1494 	else
1495 		set_param_l(out_param, index);
1496 
1497 	return err;
1498 }
1499 
1500 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1501 			   u64 in_param, u64 *out_param)
1502 {
1503 	u32 xrcdn;
1504 	int err;
1505 
1506 	if (op != RES_OP_RESERVE)
1507 		return -EINVAL;
1508 
1509 	err = __mlx4_xrcd_alloc(dev, &xrcdn);
1510 	if (err)
1511 		return err;
1512 
1513 	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1514 	if (err)
1515 		__mlx4_xrcd_free(dev, xrcdn);
1516 	else
1517 		set_param_l(out_param, xrcdn);
1518 
1519 	return err;
1520 }
1521 
1522 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1523 			   struct mlx4_vhcr *vhcr,
1524 			   struct mlx4_cmd_mailbox *inbox,
1525 			   struct mlx4_cmd_mailbox *outbox,
1526 			   struct mlx4_cmd_info *cmd)
1527 {
1528 	int err;
1529 	int alop = vhcr->op_modifier;
1530 
1531 	switch (vhcr->in_modifier) {
1532 	case RES_QP:
1533 		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534 				   vhcr->in_param, &vhcr->out_param);
1535 		break;
1536 
1537 	case RES_MTT:
1538 		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539 				    vhcr->in_param, &vhcr->out_param);
1540 		break;
1541 
1542 	case RES_MPT:
1543 		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544 				    vhcr->in_param, &vhcr->out_param);
1545 		break;
1546 
1547 	case RES_CQ:
1548 		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549 				   vhcr->in_param, &vhcr->out_param);
1550 		break;
1551 
1552 	case RES_SRQ:
1553 		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1554 				    vhcr->in_param, &vhcr->out_param);
1555 		break;
1556 
1557 	case RES_MAC:
1558 		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1559 				    vhcr->in_param, &vhcr->out_param);
1560 		break;
1561 
1562 	case RES_VLAN:
1563 		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1564 				    vhcr->in_param, &vhcr->out_param);
1565 		break;
1566 
1567 	case RES_COUNTER:
1568 		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1569 					vhcr->in_param, &vhcr->out_param);
1570 		break;
1571 
1572 	case RES_XRCD:
1573 		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1574 				      vhcr->in_param, &vhcr->out_param);
1575 		break;
1576 
1577 	default:
1578 		err = -EINVAL;
1579 		break;
1580 	}
1581 
1582 	return err;
1583 }
1584 
1585 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1586 		       u64 in_param)
1587 {
1588 	int err;
1589 	int count;
1590 	int base;
1591 	int qpn;
1592 
1593 	switch (op) {
1594 	case RES_OP_RESERVE:
1595 		base = get_param_l(&in_param) & 0x7fffff;
1596 		count = get_param_h(&in_param);
1597 		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1598 		if (err)
1599 			break;
1600 		__mlx4_qp_release_range(dev, base, count);
1601 		break;
1602 	case RES_OP_MAP_ICM:
1603 		qpn = get_param_l(&in_param) & 0x7fffff;
1604 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1605 					   NULL, 0);
1606 		if (err)
1607 			return err;
1608 
1609 		if (!fw_reserved(dev, qpn))
1610 			__mlx4_qp_free_icm(dev, qpn);
1611 
1612 		res_end_move(dev, slave, RES_QP, qpn);
1613 
1614 		if (valid_reserved(dev, slave, qpn))
1615 			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1616 		break;
1617 	default:
1618 		err = -EINVAL;
1619 		break;
1620 	}
1621 	return err;
1622 }
1623 
1624 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1625 			u64 in_param, u64 *out_param)
1626 {
1627 	int err = -EINVAL;
1628 	int base;
1629 	int order;
1630 
1631 	if (op != RES_OP_RESERVE_AND_MAP)
1632 		return err;
1633 
1634 	base = get_param_l(&in_param);
1635 	order = get_param_h(&in_param);
1636 	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1637 	if (!err)
1638 		__mlx4_free_mtt_range(dev, base, order);
1639 	return err;
1640 }
1641 
1642 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1643 			u64 in_param)
1644 {
1645 	int err = -EINVAL;
1646 	int index;
1647 	int id;
1648 	struct res_mpt *mpt;
1649 
1650 	switch (op) {
1651 	case RES_OP_RESERVE:
1652 		index = get_param_l(&in_param);
1653 		id = index & mpt_mask(dev);
1654 		err = get_res(dev, slave, id, RES_MPT, &mpt);
1655 		if (err)
1656 			break;
1657 		index = mpt->key;
1658 		put_res(dev, slave, id, RES_MPT);
1659 
1660 		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1661 		if (err)
1662 			break;
1663 		__mlx4_mpt_release(dev, index);
1664 		break;
1665 	case RES_OP_MAP_ICM:
1666 			index = get_param_l(&in_param);
1667 			id = index & mpt_mask(dev);
1668 			err = mr_res_start_move_to(dev, slave, id,
1669 						   RES_MPT_RESERVED, &mpt);
1670 			if (err)
1671 				return err;
1672 
1673 			__mlx4_mpt_free_icm(dev, mpt->key);
1674 			res_end_move(dev, slave, RES_MPT, id);
1675 			return err;
1676 		break;
1677 	default:
1678 		err = -EINVAL;
1679 		break;
1680 	}
1681 	return err;
1682 }
1683 
1684 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1685 		       u64 in_param, u64 *out_param)
1686 {
1687 	int cqn;
1688 	int err;
1689 
1690 	switch (op) {
1691 	case RES_OP_RESERVE_AND_MAP:
1692 		cqn = get_param_l(&in_param);
1693 		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1694 		if (err)
1695 			break;
1696 
1697 		__mlx4_cq_free_icm(dev, cqn);
1698 		break;
1699 
1700 	default:
1701 		err = -EINVAL;
1702 		break;
1703 	}
1704 
1705 	return err;
1706 }
1707 
1708 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1709 			u64 in_param, u64 *out_param)
1710 {
1711 	int srqn;
1712 	int err;
1713 
1714 	switch (op) {
1715 	case RES_OP_RESERVE_AND_MAP:
1716 		srqn = get_param_l(&in_param);
1717 		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1718 		if (err)
1719 			break;
1720 
1721 		__mlx4_srq_free_icm(dev, srqn);
1722 		break;
1723 
1724 	default:
1725 		err = -EINVAL;
1726 		break;
1727 	}
1728 
1729 	return err;
1730 }
1731 
1732 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1733 			    u64 in_param, u64 *out_param)
1734 {
1735 	int port;
1736 	int err = 0;
1737 
1738 	switch (op) {
1739 	case RES_OP_RESERVE_AND_MAP:
1740 		port = get_param_l(out_param);
1741 		mac_del_from_slave(dev, slave, in_param, port);
1742 		__mlx4_unregister_mac(dev, port, in_param);
1743 		break;
1744 	default:
1745 		err = -EINVAL;
1746 		break;
1747 	}
1748 
1749 	return err;
1750 
1751 }
1752 
1753 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1754 			    u64 in_param, u64 *out_param)
1755 {
1756 	return 0;
1757 }
1758 
1759 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1760 			    u64 in_param, u64 *out_param)
1761 {
1762 	int index;
1763 	int err;
1764 
1765 	if (op != RES_OP_RESERVE)
1766 		return -EINVAL;
1767 
1768 	index = get_param_l(&in_param);
1769 	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1770 	if (err)
1771 		return err;
1772 
1773 	__mlx4_counter_free(dev, index);
1774 
1775 	return err;
1776 }
1777 
1778 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1779 			  u64 in_param, u64 *out_param)
1780 {
1781 	int xrcdn;
1782 	int err;
1783 
1784 	if (op != RES_OP_RESERVE)
1785 		return -EINVAL;
1786 
1787 	xrcdn = get_param_l(&in_param);
1788 	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1789 	if (err)
1790 		return err;
1791 
1792 	__mlx4_xrcd_free(dev, xrcdn);
1793 
1794 	return err;
1795 }
1796 
1797 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1798 			  struct mlx4_vhcr *vhcr,
1799 			  struct mlx4_cmd_mailbox *inbox,
1800 			  struct mlx4_cmd_mailbox *outbox,
1801 			  struct mlx4_cmd_info *cmd)
1802 {
1803 	int err = -EINVAL;
1804 	int alop = vhcr->op_modifier;
1805 
1806 	switch (vhcr->in_modifier) {
1807 	case RES_QP:
1808 		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1809 				  vhcr->in_param);
1810 		break;
1811 
1812 	case RES_MTT:
1813 		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1814 				   vhcr->in_param, &vhcr->out_param);
1815 		break;
1816 
1817 	case RES_MPT:
1818 		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1819 				   vhcr->in_param);
1820 		break;
1821 
1822 	case RES_CQ:
1823 		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1824 				  vhcr->in_param, &vhcr->out_param);
1825 		break;
1826 
1827 	case RES_SRQ:
1828 		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1829 				   vhcr->in_param, &vhcr->out_param);
1830 		break;
1831 
1832 	case RES_MAC:
1833 		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1834 				   vhcr->in_param, &vhcr->out_param);
1835 		break;
1836 
1837 	case RES_VLAN:
1838 		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1839 				   vhcr->in_param, &vhcr->out_param);
1840 		break;
1841 
1842 	case RES_COUNTER:
1843 		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1844 				       vhcr->in_param, &vhcr->out_param);
1845 		break;
1846 
1847 	case RES_XRCD:
1848 		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1849 				     vhcr->in_param, &vhcr->out_param);
1850 
1851 	default:
1852 		break;
1853 	}
1854 	return err;
1855 }
1856 
1857 /* ugly but other choices are uglier */
1858 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1859 {
1860 	return (be32_to_cpu(mpt->flags) >> 9) & 1;
1861 }
1862 
1863 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1864 {
1865 	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1866 }
1867 
1868 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1869 {
1870 	return be32_to_cpu(mpt->mtt_sz);
1871 }
1872 
1873 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1874 {
1875 	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1876 }
1877 
1878 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1879 {
1880 	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1881 }
1882 
1883 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1884 {
1885 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1886 }
1887 
1888 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1889 {
1890 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1891 }
1892 
1893 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1894 {
1895 	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1896 }
1897 
1898 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1899 {
1900 	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1901 }
1902 
1903 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1904 {
1905 	int page_shift = (qpc->log_page_size & 0x3f) + 12;
1906 	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1907 	int log_sq_sride = qpc->sq_size_stride & 7;
1908 	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1909 	int log_rq_stride = qpc->rq_size_stride & 7;
1910 	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1911 	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1912 	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
1913 	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
1914 	int sq_size;
1915 	int rq_size;
1916 	int total_pages;
1917 	int total_mem;
1918 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1919 
1920 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1921 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1922 	total_mem = sq_size + rq_size;
1923 	total_pages =
1924 		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1925 				   page_shift);
1926 
1927 	return total_pages;
1928 }
1929 
1930 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1931 			   int size, struct res_mtt *mtt)
1932 {
1933 	int res_start = mtt->com.res_id;
1934 	int res_size = (1 << mtt->order);
1935 
1936 	if (start < res_start || start + size > res_start + res_size)
1937 		return -EPERM;
1938 	return 0;
1939 }
1940 
1941 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1942 			   struct mlx4_vhcr *vhcr,
1943 			   struct mlx4_cmd_mailbox *inbox,
1944 			   struct mlx4_cmd_mailbox *outbox,
1945 			   struct mlx4_cmd_info *cmd)
1946 {
1947 	int err;
1948 	int index = vhcr->in_modifier;
1949 	struct res_mtt *mtt;
1950 	struct res_mpt *mpt;
1951 	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1952 	int phys;
1953 	int id;
1954 	u32 pd;
1955 	int pd_slave;
1956 
1957 	id = index & mpt_mask(dev);
1958 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1959 	if (err)
1960 		return err;
1961 
1962 	/* Disable memory windows for VFs. */
1963 	if (!mr_is_region(inbox->buf)) {
1964 		err = -EPERM;
1965 		goto ex_abort;
1966 	}
1967 
1968 	/* Make sure that the PD bits related to the slave id are zeros. */
1969 	pd = mr_get_pd(inbox->buf);
1970 	pd_slave = (pd >> 17) & 0x7f;
1971 	if (pd_slave != 0 && pd_slave != slave) {
1972 		err = -EPERM;
1973 		goto ex_abort;
1974 	}
1975 
1976 	if (mr_is_fmr(inbox->buf)) {
1977 		/* FMR and Bind Enable are forbidden in slave devices. */
1978 		if (mr_is_bind_enabled(inbox->buf)) {
1979 			err = -EPERM;
1980 			goto ex_abort;
1981 		}
1982 		/* FMR and Memory Windows are also forbidden. */
1983 		if (!mr_is_region(inbox->buf)) {
1984 			err = -EPERM;
1985 			goto ex_abort;
1986 		}
1987 	}
1988 
1989 	phys = mr_phys_mpt(inbox->buf);
1990 	if (!phys) {
1991 		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1992 		if (err)
1993 			goto ex_abort;
1994 
1995 		err = check_mtt_range(dev, slave, mtt_base,
1996 				      mr_get_mtt_size(inbox->buf), mtt);
1997 		if (err)
1998 			goto ex_put;
1999 
2000 		mpt->mtt = mtt;
2001 	}
2002 
2003 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2004 	if (err)
2005 		goto ex_put;
2006 
2007 	if (!phys) {
2008 		atomic_inc(&mtt->ref_count);
2009 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2010 	}
2011 
2012 	res_end_move(dev, slave, RES_MPT, id);
2013 	return 0;
2014 
2015 ex_put:
2016 	if (!phys)
2017 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2018 ex_abort:
2019 	res_abort_move(dev, slave, RES_MPT, id);
2020 
2021 	return err;
2022 }
2023 
2024 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2025 			   struct mlx4_vhcr *vhcr,
2026 			   struct mlx4_cmd_mailbox *inbox,
2027 			   struct mlx4_cmd_mailbox *outbox,
2028 			   struct mlx4_cmd_info *cmd)
2029 {
2030 	int err;
2031 	int index = vhcr->in_modifier;
2032 	struct res_mpt *mpt;
2033 	int id;
2034 
2035 	id = index & mpt_mask(dev);
2036 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2037 	if (err)
2038 		return err;
2039 
2040 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2041 	if (err)
2042 		goto ex_abort;
2043 
2044 	if (mpt->mtt)
2045 		atomic_dec(&mpt->mtt->ref_count);
2046 
2047 	res_end_move(dev, slave, RES_MPT, id);
2048 	return 0;
2049 
2050 ex_abort:
2051 	res_abort_move(dev, slave, RES_MPT, id);
2052 
2053 	return err;
2054 }
2055 
2056 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2057 			   struct mlx4_vhcr *vhcr,
2058 			   struct mlx4_cmd_mailbox *inbox,
2059 			   struct mlx4_cmd_mailbox *outbox,
2060 			   struct mlx4_cmd_info *cmd)
2061 {
2062 	int err;
2063 	int index = vhcr->in_modifier;
2064 	struct res_mpt *mpt;
2065 	int id;
2066 
2067 	id = index & mpt_mask(dev);
2068 	err = get_res(dev, slave, id, RES_MPT, &mpt);
2069 	if (err)
2070 		return err;
2071 
2072 	if (mpt->com.from_state != RES_MPT_HW) {
2073 		err = -EBUSY;
2074 		goto out;
2075 	}
2076 
2077 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2078 
2079 out:
2080 	put_res(dev, slave, id, RES_MPT);
2081 	return err;
2082 }
2083 
2084 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2085 {
2086 	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2087 }
2088 
2089 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2090 {
2091 	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2092 }
2093 
2094 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2095 {
2096 	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2097 }
2098 
2099 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2100 				  struct mlx4_qp_context *context)
2101 {
2102 	u32 qpn = vhcr->in_modifier & 0xffffff;
2103 	u32 qkey = 0;
2104 
2105 	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2106 		return;
2107 
2108 	/* adjust qkey in qp context */
2109 	context->qkey = cpu_to_be32(qkey);
2110 }
2111 
2112 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2113 			     struct mlx4_vhcr *vhcr,
2114 			     struct mlx4_cmd_mailbox *inbox,
2115 			     struct mlx4_cmd_mailbox *outbox,
2116 			     struct mlx4_cmd_info *cmd)
2117 {
2118 	int err;
2119 	int qpn = vhcr->in_modifier & 0x7fffff;
2120 	struct res_mtt *mtt;
2121 	struct res_qp *qp;
2122 	struct mlx4_qp_context *qpc = inbox->buf + 8;
2123 	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2124 	int mtt_size = qp_get_mtt_size(qpc);
2125 	struct res_cq *rcq;
2126 	struct res_cq *scq;
2127 	int rcqn = qp_get_rcqn(qpc);
2128 	int scqn = qp_get_scqn(qpc);
2129 	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2130 	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2131 	struct res_srq *srq;
2132 	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2133 
2134 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2135 	if (err)
2136 		return err;
2137 	qp->local_qpn = local_qpn;
2138 	qp->sched_queue = 0;
2139 	qp->qpc_flags = be32_to_cpu(qpc->flags);
2140 
2141 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2142 	if (err)
2143 		goto ex_abort;
2144 
2145 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2146 	if (err)
2147 		goto ex_put_mtt;
2148 
2149 	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2150 	if (err)
2151 		goto ex_put_mtt;
2152 
2153 	if (scqn != rcqn) {
2154 		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2155 		if (err)
2156 			goto ex_put_rcq;
2157 	} else
2158 		scq = rcq;
2159 
2160 	if (use_srq) {
2161 		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2162 		if (err)
2163 			goto ex_put_scq;
2164 	}
2165 
2166 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2167 	update_pkey_index(dev, slave, inbox);
2168 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2169 	if (err)
2170 		goto ex_put_srq;
2171 	atomic_inc(&mtt->ref_count);
2172 	qp->mtt = mtt;
2173 	atomic_inc(&rcq->ref_count);
2174 	qp->rcq = rcq;
2175 	atomic_inc(&scq->ref_count);
2176 	qp->scq = scq;
2177 
2178 	if (scqn != rcqn)
2179 		put_res(dev, slave, scqn, RES_CQ);
2180 
2181 	if (use_srq) {
2182 		atomic_inc(&srq->ref_count);
2183 		put_res(dev, slave, srqn, RES_SRQ);
2184 		qp->srq = srq;
2185 	}
2186 	put_res(dev, slave, rcqn, RES_CQ);
2187 	put_res(dev, slave, mtt_base, RES_MTT);
2188 	res_end_move(dev, slave, RES_QP, qpn);
2189 
2190 	return 0;
2191 
2192 ex_put_srq:
2193 	if (use_srq)
2194 		put_res(dev, slave, srqn, RES_SRQ);
2195 ex_put_scq:
2196 	if (scqn != rcqn)
2197 		put_res(dev, slave, scqn, RES_CQ);
2198 ex_put_rcq:
2199 	put_res(dev, slave, rcqn, RES_CQ);
2200 ex_put_mtt:
2201 	put_res(dev, slave, mtt_base, RES_MTT);
2202 ex_abort:
2203 	res_abort_move(dev, slave, RES_QP, qpn);
2204 
2205 	return err;
2206 }
2207 
2208 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2209 {
2210 	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2211 }
2212 
2213 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2214 {
2215 	int log_eq_size = eqc->log_eq_size & 0x1f;
2216 	int page_shift = (eqc->log_page_size & 0x3f) + 12;
2217 
2218 	if (log_eq_size + 5 < page_shift)
2219 		return 1;
2220 
2221 	return 1 << (log_eq_size + 5 - page_shift);
2222 }
2223 
2224 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2225 {
2226 	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2227 }
2228 
2229 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2230 {
2231 	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2232 	int page_shift = (cqc->log_page_size & 0x3f) + 12;
2233 
2234 	if (log_cq_size + 5 < page_shift)
2235 		return 1;
2236 
2237 	return 1 << (log_cq_size + 5 - page_shift);
2238 }
2239 
2240 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2241 			  struct mlx4_vhcr *vhcr,
2242 			  struct mlx4_cmd_mailbox *inbox,
2243 			  struct mlx4_cmd_mailbox *outbox,
2244 			  struct mlx4_cmd_info *cmd)
2245 {
2246 	int err;
2247 	int eqn = vhcr->in_modifier;
2248 	int res_id = (slave << 8) | eqn;
2249 	struct mlx4_eq_context *eqc = inbox->buf;
2250 	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2251 	int mtt_size = eq_get_mtt_size(eqc);
2252 	struct res_eq *eq;
2253 	struct res_mtt *mtt;
2254 
2255 	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2256 	if (err)
2257 		return err;
2258 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2259 	if (err)
2260 		goto out_add;
2261 
2262 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2263 	if (err)
2264 		goto out_move;
2265 
2266 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2267 	if (err)
2268 		goto out_put;
2269 
2270 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2271 	if (err)
2272 		goto out_put;
2273 
2274 	atomic_inc(&mtt->ref_count);
2275 	eq->mtt = mtt;
2276 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2277 	res_end_move(dev, slave, RES_EQ, res_id);
2278 	return 0;
2279 
2280 out_put:
2281 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2282 out_move:
2283 	res_abort_move(dev, slave, RES_EQ, res_id);
2284 out_add:
2285 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2286 	return err;
2287 }
2288 
2289 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2290 			      int len, struct res_mtt **res)
2291 {
2292 	struct mlx4_priv *priv = mlx4_priv(dev);
2293 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2294 	struct res_mtt *mtt;
2295 	int err = -EINVAL;
2296 
2297 	spin_lock_irq(mlx4_tlock(dev));
2298 	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2299 			    com.list) {
2300 		if (!check_mtt_range(dev, slave, start, len, mtt)) {
2301 			*res = mtt;
2302 			mtt->com.from_state = mtt->com.state;
2303 			mtt->com.state = RES_MTT_BUSY;
2304 			err = 0;
2305 			break;
2306 		}
2307 	}
2308 	spin_unlock_irq(mlx4_tlock(dev));
2309 
2310 	return err;
2311 }
2312 
2313 static int verify_qp_parameters(struct mlx4_dev *dev,
2314 				struct mlx4_cmd_mailbox *inbox,
2315 				enum qp_transition transition, u8 slave)
2316 {
2317 	u32			qp_type;
2318 	struct mlx4_qp_context	*qp_ctx;
2319 	enum mlx4_qp_optpar	optpar;
2320 
2321 	qp_ctx  = inbox->buf + 8;
2322 	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2323 	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
2324 
2325 	switch (qp_type) {
2326 	case MLX4_QP_ST_RC:
2327 	case MLX4_QP_ST_UC:
2328 		switch (transition) {
2329 		case QP_TRANS_INIT2RTR:
2330 		case QP_TRANS_RTR2RTS:
2331 		case QP_TRANS_RTS2RTS:
2332 		case QP_TRANS_SQD2SQD:
2333 		case QP_TRANS_SQD2RTS:
2334 			if (slave != mlx4_master_func_num(dev))
2335 				/* slaves have only gid index 0 */
2336 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2337 					if (qp_ctx->pri_path.mgid_index)
2338 						return -EINVAL;
2339 				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2340 					if (qp_ctx->alt_path.mgid_index)
2341 						return -EINVAL;
2342 			break;
2343 		default:
2344 			break;
2345 		}
2346 
2347 		break;
2348 	default:
2349 		break;
2350 	}
2351 
2352 	return 0;
2353 }
2354 
2355 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2356 			   struct mlx4_vhcr *vhcr,
2357 			   struct mlx4_cmd_mailbox *inbox,
2358 			   struct mlx4_cmd_mailbox *outbox,
2359 			   struct mlx4_cmd_info *cmd)
2360 {
2361 	struct mlx4_mtt mtt;
2362 	__be64 *page_list = inbox->buf;
2363 	u64 *pg_list = (u64 *)page_list;
2364 	int i;
2365 	struct res_mtt *rmtt = NULL;
2366 	int start = be64_to_cpu(page_list[0]);
2367 	int npages = vhcr->in_modifier;
2368 	int err;
2369 
2370 	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2371 	if (err)
2372 		return err;
2373 
2374 	/* Call the SW implementation of write_mtt:
2375 	 * - Prepare a dummy mtt struct
2376 	 * - Translate inbox contents to simple addresses in host endianess */
2377 	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2378 			    we don't really use it */
2379 	mtt.order = 0;
2380 	mtt.page_shift = 0;
2381 	for (i = 0; i < npages; ++i)
2382 		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2383 
2384 	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2385 			       ((u64 *)page_list + 2));
2386 
2387 	if (rmtt)
2388 		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2389 
2390 	return err;
2391 }
2392 
2393 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2394 			  struct mlx4_vhcr *vhcr,
2395 			  struct mlx4_cmd_mailbox *inbox,
2396 			  struct mlx4_cmd_mailbox *outbox,
2397 			  struct mlx4_cmd_info *cmd)
2398 {
2399 	int eqn = vhcr->in_modifier;
2400 	int res_id = eqn | (slave << 8);
2401 	struct res_eq *eq;
2402 	int err;
2403 
2404 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2405 	if (err)
2406 		return err;
2407 
2408 	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2409 	if (err)
2410 		goto ex_abort;
2411 
2412 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2413 	if (err)
2414 		goto ex_put;
2415 
2416 	atomic_dec(&eq->mtt->ref_count);
2417 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2418 	res_end_move(dev, slave, RES_EQ, res_id);
2419 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2420 
2421 	return 0;
2422 
2423 ex_put:
2424 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2425 ex_abort:
2426 	res_abort_move(dev, slave, RES_EQ, res_id);
2427 
2428 	return err;
2429 }
2430 
2431 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2432 {
2433 	struct mlx4_priv *priv = mlx4_priv(dev);
2434 	struct mlx4_slave_event_eq_info *event_eq;
2435 	struct mlx4_cmd_mailbox *mailbox;
2436 	u32 in_modifier = 0;
2437 	int err;
2438 	int res_id;
2439 	struct res_eq *req;
2440 
2441 	if (!priv->mfunc.master.slave_state)
2442 		return -EINVAL;
2443 
2444 	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2445 
2446 	/* Create the event only if the slave is registered */
2447 	if (event_eq->eqn < 0)
2448 		return 0;
2449 
2450 	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2451 	res_id = (slave << 8) | event_eq->eqn;
2452 	err = get_res(dev, slave, res_id, RES_EQ, &req);
2453 	if (err)
2454 		goto unlock;
2455 
2456 	if (req->com.from_state != RES_EQ_HW) {
2457 		err = -EINVAL;
2458 		goto put;
2459 	}
2460 
2461 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2462 	if (IS_ERR(mailbox)) {
2463 		err = PTR_ERR(mailbox);
2464 		goto put;
2465 	}
2466 
2467 	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2468 		++event_eq->token;
2469 		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2470 	}
2471 
2472 	memcpy(mailbox->buf, (u8 *) eqe, 28);
2473 
2474 	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2475 
2476 	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2477 		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2478 		       MLX4_CMD_NATIVE);
2479 
2480 	put_res(dev, slave, res_id, RES_EQ);
2481 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2482 	mlx4_free_cmd_mailbox(dev, mailbox);
2483 	return err;
2484 
2485 put:
2486 	put_res(dev, slave, res_id, RES_EQ);
2487 
2488 unlock:
2489 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2490 	return err;
2491 }
2492 
2493 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2494 			  struct mlx4_vhcr *vhcr,
2495 			  struct mlx4_cmd_mailbox *inbox,
2496 			  struct mlx4_cmd_mailbox *outbox,
2497 			  struct mlx4_cmd_info *cmd)
2498 {
2499 	int eqn = vhcr->in_modifier;
2500 	int res_id = eqn | (slave << 8);
2501 	struct res_eq *eq;
2502 	int err;
2503 
2504 	err = get_res(dev, slave, res_id, RES_EQ, &eq);
2505 	if (err)
2506 		return err;
2507 
2508 	if (eq->com.from_state != RES_EQ_HW) {
2509 		err = -EINVAL;
2510 		goto ex_put;
2511 	}
2512 
2513 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2514 
2515 ex_put:
2516 	put_res(dev, slave, res_id, RES_EQ);
2517 	return err;
2518 }
2519 
2520 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2521 			  struct mlx4_vhcr *vhcr,
2522 			  struct mlx4_cmd_mailbox *inbox,
2523 			  struct mlx4_cmd_mailbox *outbox,
2524 			  struct mlx4_cmd_info *cmd)
2525 {
2526 	int err;
2527 	int cqn = vhcr->in_modifier;
2528 	struct mlx4_cq_context *cqc = inbox->buf;
2529 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2530 	struct res_cq *cq;
2531 	struct res_mtt *mtt;
2532 
2533 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2534 	if (err)
2535 		return err;
2536 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2537 	if (err)
2538 		goto out_move;
2539 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2540 	if (err)
2541 		goto out_put;
2542 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2543 	if (err)
2544 		goto out_put;
2545 	atomic_inc(&mtt->ref_count);
2546 	cq->mtt = mtt;
2547 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2548 	res_end_move(dev, slave, RES_CQ, cqn);
2549 	return 0;
2550 
2551 out_put:
2552 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2553 out_move:
2554 	res_abort_move(dev, slave, RES_CQ, cqn);
2555 	return err;
2556 }
2557 
2558 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2559 			  struct mlx4_vhcr *vhcr,
2560 			  struct mlx4_cmd_mailbox *inbox,
2561 			  struct mlx4_cmd_mailbox *outbox,
2562 			  struct mlx4_cmd_info *cmd)
2563 {
2564 	int err;
2565 	int cqn = vhcr->in_modifier;
2566 	struct res_cq *cq;
2567 
2568 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2569 	if (err)
2570 		return err;
2571 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2572 	if (err)
2573 		goto out_move;
2574 	atomic_dec(&cq->mtt->ref_count);
2575 	res_end_move(dev, slave, RES_CQ, cqn);
2576 	return 0;
2577 
2578 out_move:
2579 	res_abort_move(dev, slave, RES_CQ, cqn);
2580 	return err;
2581 }
2582 
2583 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2584 			  struct mlx4_vhcr *vhcr,
2585 			  struct mlx4_cmd_mailbox *inbox,
2586 			  struct mlx4_cmd_mailbox *outbox,
2587 			  struct mlx4_cmd_info *cmd)
2588 {
2589 	int cqn = vhcr->in_modifier;
2590 	struct res_cq *cq;
2591 	int err;
2592 
2593 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
2594 	if (err)
2595 		return err;
2596 
2597 	if (cq->com.from_state != RES_CQ_HW)
2598 		goto ex_put;
2599 
2600 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2601 ex_put:
2602 	put_res(dev, slave, cqn, RES_CQ);
2603 
2604 	return err;
2605 }
2606 
2607 static int handle_resize(struct mlx4_dev *dev, int slave,
2608 			 struct mlx4_vhcr *vhcr,
2609 			 struct mlx4_cmd_mailbox *inbox,
2610 			 struct mlx4_cmd_mailbox *outbox,
2611 			 struct mlx4_cmd_info *cmd,
2612 			 struct res_cq *cq)
2613 {
2614 	int err;
2615 	struct res_mtt *orig_mtt;
2616 	struct res_mtt *mtt;
2617 	struct mlx4_cq_context *cqc = inbox->buf;
2618 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2619 
2620 	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2621 	if (err)
2622 		return err;
2623 
2624 	if (orig_mtt != cq->mtt) {
2625 		err = -EINVAL;
2626 		goto ex_put;
2627 	}
2628 
2629 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2630 	if (err)
2631 		goto ex_put;
2632 
2633 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2634 	if (err)
2635 		goto ex_put1;
2636 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2637 	if (err)
2638 		goto ex_put1;
2639 	atomic_dec(&orig_mtt->ref_count);
2640 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2641 	atomic_inc(&mtt->ref_count);
2642 	cq->mtt = mtt;
2643 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2644 	return 0;
2645 
2646 ex_put1:
2647 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2648 ex_put:
2649 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2650 
2651 	return err;
2652 
2653 }
2654 
2655 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2656 			   struct mlx4_vhcr *vhcr,
2657 			   struct mlx4_cmd_mailbox *inbox,
2658 			   struct mlx4_cmd_mailbox *outbox,
2659 			   struct mlx4_cmd_info *cmd)
2660 {
2661 	int cqn = vhcr->in_modifier;
2662 	struct res_cq *cq;
2663 	int err;
2664 
2665 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
2666 	if (err)
2667 		return err;
2668 
2669 	if (cq->com.from_state != RES_CQ_HW)
2670 		goto ex_put;
2671 
2672 	if (vhcr->op_modifier == 0) {
2673 		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2674 		goto ex_put;
2675 	}
2676 
2677 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2678 ex_put:
2679 	put_res(dev, slave, cqn, RES_CQ);
2680 
2681 	return err;
2682 }
2683 
2684 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2685 {
2686 	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2687 	int log_rq_stride = srqc->logstride & 7;
2688 	int page_shift = (srqc->log_page_size & 0x3f) + 12;
2689 
2690 	if (log_srq_size + log_rq_stride + 4 < page_shift)
2691 		return 1;
2692 
2693 	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2694 }
2695 
2696 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2697 			   struct mlx4_vhcr *vhcr,
2698 			   struct mlx4_cmd_mailbox *inbox,
2699 			   struct mlx4_cmd_mailbox *outbox,
2700 			   struct mlx4_cmd_info *cmd)
2701 {
2702 	int err;
2703 	int srqn = vhcr->in_modifier;
2704 	struct res_mtt *mtt;
2705 	struct res_srq *srq;
2706 	struct mlx4_srq_context *srqc = inbox->buf;
2707 	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2708 
2709 	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2710 		return -EINVAL;
2711 
2712 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2713 	if (err)
2714 		return err;
2715 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2716 	if (err)
2717 		goto ex_abort;
2718 	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2719 			      mtt);
2720 	if (err)
2721 		goto ex_put_mtt;
2722 
2723 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2724 	if (err)
2725 		goto ex_put_mtt;
2726 
2727 	atomic_inc(&mtt->ref_count);
2728 	srq->mtt = mtt;
2729 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2730 	res_end_move(dev, slave, RES_SRQ, srqn);
2731 	return 0;
2732 
2733 ex_put_mtt:
2734 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2735 ex_abort:
2736 	res_abort_move(dev, slave, RES_SRQ, srqn);
2737 
2738 	return err;
2739 }
2740 
2741 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2742 			   struct mlx4_vhcr *vhcr,
2743 			   struct mlx4_cmd_mailbox *inbox,
2744 			   struct mlx4_cmd_mailbox *outbox,
2745 			   struct mlx4_cmd_info *cmd)
2746 {
2747 	int err;
2748 	int srqn = vhcr->in_modifier;
2749 	struct res_srq *srq;
2750 
2751 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2752 	if (err)
2753 		return err;
2754 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2755 	if (err)
2756 		goto ex_abort;
2757 	atomic_dec(&srq->mtt->ref_count);
2758 	if (srq->cq)
2759 		atomic_dec(&srq->cq->ref_count);
2760 	res_end_move(dev, slave, RES_SRQ, srqn);
2761 
2762 	return 0;
2763 
2764 ex_abort:
2765 	res_abort_move(dev, slave, RES_SRQ, srqn);
2766 
2767 	return err;
2768 }
2769 
2770 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2771 			   struct mlx4_vhcr *vhcr,
2772 			   struct mlx4_cmd_mailbox *inbox,
2773 			   struct mlx4_cmd_mailbox *outbox,
2774 			   struct mlx4_cmd_info *cmd)
2775 {
2776 	int err;
2777 	int srqn = vhcr->in_modifier;
2778 	struct res_srq *srq;
2779 
2780 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2781 	if (err)
2782 		return err;
2783 	if (srq->com.from_state != RES_SRQ_HW) {
2784 		err = -EBUSY;
2785 		goto out;
2786 	}
2787 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2788 out:
2789 	put_res(dev, slave, srqn, RES_SRQ);
2790 	return err;
2791 }
2792 
2793 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2794 			 struct mlx4_vhcr *vhcr,
2795 			 struct mlx4_cmd_mailbox *inbox,
2796 			 struct mlx4_cmd_mailbox *outbox,
2797 			 struct mlx4_cmd_info *cmd)
2798 {
2799 	int err;
2800 	int srqn = vhcr->in_modifier;
2801 	struct res_srq *srq;
2802 
2803 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2804 	if (err)
2805 		return err;
2806 
2807 	if (srq->com.from_state != RES_SRQ_HW) {
2808 		err = -EBUSY;
2809 		goto out;
2810 	}
2811 
2812 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2813 out:
2814 	put_res(dev, slave, srqn, RES_SRQ);
2815 	return err;
2816 }
2817 
2818 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2819 			struct mlx4_vhcr *vhcr,
2820 			struct mlx4_cmd_mailbox *inbox,
2821 			struct mlx4_cmd_mailbox *outbox,
2822 			struct mlx4_cmd_info *cmd)
2823 {
2824 	int err;
2825 	int qpn = vhcr->in_modifier & 0x7fffff;
2826 	struct res_qp *qp;
2827 
2828 	err = get_res(dev, slave, qpn, RES_QP, &qp);
2829 	if (err)
2830 		return err;
2831 	if (qp->com.from_state != RES_QP_HW) {
2832 		err = -EBUSY;
2833 		goto out;
2834 	}
2835 
2836 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2837 out:
2838 	put_res(dev, slave, qpn, RES_QP);
2839 	return err;
2840 }
2841 
2842 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2843 			      struct mlx4_vhcr *vhcr,
2844 			      struct mlx4_cmd_mailbox *inbox,
2845 			      struct mlx4_cmd_mailbox *outbox,
2846 			      struct mlx4_cmd_info *cmd)
2847 {
2848 	struct mlx4_qp_context *context = inbox->buf + 8;
2849 	adjust_proxy_tun_qkey(dev, vhcr, context);
2850 	update_pkey_index(dev, slave, inbox);
2851 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2852 }
2853 
2854 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2855 			     struct mlx4_vhcr *vhcr,
2856 			     struct mlx4_cmd_mailbox *inbox,
2857 			     struct mlx4_cmd_mailbox *outbox,
2858 			     struct mlx4_cmd_info *cmd)
2859 {
2860 	int err;
2861 	struct mlx4_qp_context *qpc = inbox->buf + 8;
2862 	int qpn = vhcr->in_modifier & 0x7fffff;
2863 	struct res_qp *qp;
2864 	u8 orig_sched_queue;
2865 
2866 	err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2867 	if (err)
2868 		return err;
2869 
2870 	update_pkey_index(dev, slave, inbox);
2871 	update_gid(dev, inbox, (u8)slave);
2872 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2873 	orig_sched_queue = qpc->pri_path.sched_queue;
2874 	err = update_vport_qp_param(dev, inbox, slave, qpn);
2875 	if (err)
2876 		return err;
2877 
2878 	err = get_res(dev, slave, qpn, RES_QP, &qp);
2879 	if (err)
2880 		return err;
2881 	if (qp->com.from_state != RES_QP_HW) {
2882 		err = -EBUSY;
2883 		goto out;
2884 	}
2885 
2886 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2887 out:
2888 	/* if no error, save sched queue value passed in by VF. This is
2889 	 * essentially the QOS value provided by the VF. This will be useful
2890 	 * if we allow dynamic changes from VST back to VGT
2891 	 */
2892 	if (!err)
2893 		qp->sched_queue = orig_sched_queue;
2894 
2895 	put_res(dev, slave, qpn, RES_QP);
2896 	return err;
2897 }
2898 
2899 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2900 			    struct mlx4_vhcr *vhcr,
2901 			    struct mlx4_cmd_mailbox *inbox,
2902 			    struct mlx4_cmd_mailbox *outbox,
2903 			    struct mlx4_cmd_info *cmd)
2904 {
2905 	int err;
2906 	struct mlx4_qp_context *context = inbox->buf + 8;
2907 
2908 	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2909 	if (err)
2910 		return err;
2911 
2912 	update_pkey_index(dev, slave, inbox);
2913 	update_gid(dev, inbox, (u8)slave);
2914 	adjust_proxy_tun_qkey(dev, vhcr, context);
2915 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2916 }
2917 
2918 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2919 			    struct mlx4_vhcr *vhcr,
2920 			    struct mlx4_cmd_mailbox *inbox,
2921 			    struct mlx4_cmd_mailbox *outbox,
2922 			    struct mlx4_cmd_info *cmd)
2923 {
2924 	int err;
2925 	struct mlx4_qp_context *context = inbox->buf + 8;
2926 
2927 	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2928 	if (err)
2929 		return err;
2930 
2931 	update_pkey_index(dev, slave, inbox);
2932 	update_gid(dev, inbox, (u8)slave);
2933 	adjust_proxy_tun_qkey(dev, vhcr, context);
2934 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2935 }
2936 
2937 
2938 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2939 			      struct mlx4_vhcr *vhcr,
2940 			      struct mlx4_cmd_mailbox *inbox,
2941 			      struct mlx4_cmd_mailbox *outbox,
2942 			      struct mlx4_cmd_info *cmd)
2943 {
2944 	struct mlx4_qp_context *context = inbox->buf + 8;
2945 	adjust_proxy_tun_qkey(dev, vhcr, context);
2946 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2947 }
2948 
2949 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2950 			    struct mlx4_vhcr *vhcr,
2951 			    struct mlx4_cmd_mailbox *inbox,
2952 			    struct mlx4_cmd_mailbox *outbox,
2953 			    struct mlx4_cmd_info *cmd)
2954 {
2955 	int err;
2956 	struct mlx4_qp_context *context = inbox->buf + 8;
2957 
2958 	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2959 	if (err)
2960 		return err;
2961 
2962 	adjust_proxy_tun_qkey(dev, vhcr, context);
2963 	update_gid(dev, inbox, (u8)slave);
2964 	update_pkey_index(dev, slave, inbox);
2965 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2966 }
2967 
2968 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2969 			    struct mlx4_vhcr *vhcr,
2970 			    struct mlx4_cmd_mailbox *inbox,
2971 			    struct mlx4_cmd_mailbox *outbox,
2972 			    struct mlx4_cmd_info *cmd)
2973 {
2974 	int err;
2975 	struct mlx4_qp_context *context = inbox->buf + 8;
2976 
2977 	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2978 	if (err)
2979 		return err;
2980 
2981 	adjust_proxy_tun_qkey(dev, vhcr, context);
2982 	update_gid(dev, inbox, (u8)slave);
2983 	update_pkey_index(dev, slave, inbox);
2984 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2985 }
2986 
2987 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2988 			 struct mlx4_vhcr *vhcr,
2989 			 struct mlx4_cmd_mailbox *inbox,
2990 			 struct mlx4_cmd_mailbox *outbox,
2991 			 struct mlx4_cmd_info *cmd)
2992 {
2993 	int err;
2994 	int qpn = vhcr->in_modifier & 0x7fffff;
2995 	struct res_qp *qp;
2996 
2997 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2998 	if (err)
2999 		return err;
3000 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3001 	if (err)
3002 		goto ex_abort;
3003 
3004 	atomic_dec(&qp->mtt->ref_count);
3005 	atomic_dec(&qp->rcq->ref_count);
3006 	atomic_dec(&qp->scq->ref_count);
3007 	if (qp->srq)
3008 		atomic_dec(&qp->srq->ref_count);
3009 	res_end_move(dev, slave, RES_QP, qpn);
3010 	return 0;
3011 
3012 ex_abort:
3013 	res_abort_move(dev, slave, RES_QP, qpn);
3014 
3015 	return err;
3016 }
3017 
3018 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3019 				struct res_qp *rqp, u8 *gid)
3020 {
3021 	struct res_gid *res;
3022 
3023 	list_for_each_entry(res, &rqp->mcg_list, list) {
3024 		if (!memcmp(res->gid, gid, 16))
3025 			return res;
3026 	}
3027 	return NULL;
3028 }
3029 
3030 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3031 		       u8 *gid, enum mlx4_protocol prot,
3032 		       enum mlx4_steer_type steer, u64 reg_id)
3033 {
3034 	struct res_gid *res;
3035 	int err;
3036 
3037 	res = kzalloc(sizeof *res, GFP_KERNEL);
3038 	if (!res)
3039 		return -ENOMEM;
3040 
3041 	spin_lock_irq(&rqp->mcg_spl);
3042 	if (find_gid(dev, slave, rqp, gid)) {
3043 		kfree(res);
3044 		err = -EEXIST;
3045 	} else {
3046 		memcpy(res->gid, gid, 16);
3047 		res->prot = prot;
3048 		res->steer = steer;
3049 		res->reg_id = reg_id;
3050 		list_add_tail(&res->list, &rqp->mcg_list);
3051 		err = 0;
3052 	}
3053 	spin_unlock_irq(&rqp->mcg_spl);
3054 
3055 	return err;
3056 }
3057 
3058 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3059 		       u8 *gid, enum mlx4_protocol prot,
3060 		       enum mlx4_steer_type steer, u64 *reg_id)
3061 {
3062 	struct res_gid *res;
3063 	int err;
3064 
3065 	spin_lock_irq(&rqp->mcg_spl);
3066 	res = find_gid(dev, slave, rqp, gid);
3067 	if (!res || res->prot != prot || res->steer != steer)
3068 		err = -EINVAL;
3069 	else {
3070 		*reg_id = res->reg_id;
3071 		list_del(&res->list);
3072 		kfree(res);
3073 		err = 0;
3074 	}
3075 	spin_unlock_irq(&rqp->mcg_spl);
3076 
3077 	return err;
3078 }
3079 
3080 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3081 		     int block_loopback, enum mlx4_protocol prot,
3082 		     enum mlx4_steer_type type, u64 *reg_id)
3083 {
3084 	switch (dev->caps.steering_mode) {
3085 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3086 		return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3087 						block_loopback, prot,
3088 						reg_id);
3089 	case MLX4_STEERING_MODE_B0:
3090 		return mlx4_qp_attach_common(dev, qp, gid,
3091 					    block_loopback, prot, type);
3092 	default:
3093 		return -EINVAL;
3094 	}
3095 }
3096 
3097 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3098 		     enum mlx4_protocol prot, enum mlx4_steer_type type,
3099 		     u64 reg_id)
3100 {
3101 	switch (dev->caps.steering_mode) {
3102 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3103 		return mlx4_flow_detach(dev, reg_id);
3104 	case MLX4_STEERING_MODE_B0:
3105 		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3106 	default:
3107 		return -EINVAL;
3108 	}
3109 }
3110 
3111 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3112 			       struct mlx4_vhcr *vhcr,
3113 			       struct mlx4_cmd_mailbox *inbox,
3114 			       struct mlx4_cmd_mailbox *outbox,
3115 			       struct mlx4_cmd_info *cmd)
3116 {
3117 	struct mlx4_qp qp; /* dummy for calling attach/detach */
3118 	u8 *gid = inbox->buf;
3119 	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3120 	int err;
3121 	int qpn;
3122 	struct res_qp *rqp;
3123 	u64 reg_id = 0;
3124 	int attach = vhcr->op_modifier;
3125 	int block_loopback = vhcr->in_modifier >> 31;
3126 	u8 steer_type_mask = 2;
3127 	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3128 
3129 	qpn = vhcr->in_modifier & 0xffffff;
3130 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3131 	if (err)
3132 		return err;
3133 
3134 	qp.qpn = qpn;
3135 	if (attach) {
3136 		err = qp_attach(dev, &qp, gid, block_loopback, prot,
3137 				type, &reg_id);
3138 		if (err) {
3139 			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3140 			goto ex_put;
3141 		}
3142 		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3143 		if (err)
3144 			goto ex_detach;
3145 	} else {
3146 		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3147 		if (err)
3148 			goto ex_put;
3149 
3150 		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3151 		if (err)
3152 			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3153 			       qpn, reg_id);
3154 	}
3155 	put_res(dev, slave, qpn, RES_QP);
3156 	return err;
3157 
3158 ex_detach:
3159 	qp_detach(dev, &qp, gid, prot, type, reg_id);
3160 ex_put:
3161 	put_res(dev, slave, qpn, RES_QP);
3162 	return err;
3163 }
3164 
3165 /*
3166  * MAC validation for Flow Steering rules.
3167  * VF can attach rules only with a mac address which is assigned to it.
3168  */
3169 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3170 				   struct list_head *rlist)
3171 {
3172 	struct mac_res *res, *tmp;
3173 	__be64 be_mac;
3174 
3175 	/* make sure it isn't multicast or broadcast mac*/
3176 	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3177 	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3178 		list_for_each_entry_safe(res, tmp, rlist, list) {
3179 			be_mac = cpu_to_be64(res->mac << 16);
3180 			if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3181 				return 0;
3182 		}
3183 		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3184 		       eth_header->eth.dst_mac, slave);
3185 		return -EINVAL;
3186 	}
3187 	return 0;
3188 }
3189 
3190 /*
3191  * In case of missing eth header, append eth header with a MAC address
3192  * assigned to the VF.
3193  */
3194 static int add_eth_header(struct mlx4_dev *dev, int slave,
3195 			  struct mlx4_cmd_mailbox *inbox,
3196 			  struct list_head *rlist, int header_id)
3197 {
3198 	struct mac_res *res, *tmp;
3199 	u8 port;
3200 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3201 	struct mlx4_net_trans_rule_hw_eth *eth_header;
3202 	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3203 	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3204 	__be64 be_mac = 0;
3205 	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3206 
3207 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3208 	port = ctrl->port;
3209 	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3210 
3211 	/* Clear a space in the inbox for eth header */
3212 	switch (header_id) {
3213 	case MLX4_NET_TRANS_RULE_ID_IPV4:
3214 		ip_header =
3215 			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3216 		memmove(ip_header, eth_header,
3217 			sizeof(*ip_header) + sizeof(*l4_header));
3218 		break;
3219 	case MLX4_NET_TRANS_RULE_ID_TCP:
3220 	case MLX4_NET_TRANS_RULE_ID_UDP:
3221 		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3222 			    (eth_header + 1);
3223 		memmove(l4_header, eth_header, sizeof(*l4_header));
3224 		break;
3225 	default:
3226 		return -EINVAL;
3227 	}
3228 	list_for_each_entry_safe(res, tmp, rlist, list) {
3229 		if (port == res->port) {
3230 			be_mac = cpu_to_be64(res->mac << 16);
3231 			break;
3232 		}
3233 	}
3234 	if (!be_mac) {
3235 		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3236 		       port);
3237 		return -EINVAL;
3238 	}
3239 
3240 	memset(eth_header, 0, sizeof(*eth_header));
3241 	eth_header->size = sizeof(*eth_header) >> 2;
3242 	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3243 	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3244 	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3245 
3246 	return 0;
3247 
3248 }
3249 
3250 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3251 					 struct mlx4_vhcr *vhcr,
3252 					 struct mlx4_cmd_mailbox *inbox,
3253 					 struct mlx4_cmd_mailbox *outbox,
3254 					 struct mlx4_cmd_info *cmd)
3255 {
3256 
3257 	struct mlx4_priv *priv = mlx4_priv(dev);
3258 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3259 	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3260 	int err;
3261 	int qpn;
3262 	struct res_qp *rqp;
3263 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3264 	struct _rule_hw  *rule_header;
3265 	int header_id;
3266 
3267 	if (dev->caps.steering_mode !=
3268 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3269 		return -EOPNOTSUPP;
3270 
3271 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3272 	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3273 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3274 	if (err) {
3275 		pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3276 		return err;
3277 	}
3278 	rule_header = (struct _rule_hw *)(ctrl + 1);
3279 	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3280 
3281 	switch (header_id) {
3282 	case MLX4_NET_TRANS_RULE_ID_ETH:
3283 		if (validate_eth_header_mac(slave, rule_header, rlist)) {
3284 			err = -EINVAL;
3285 			goto err_put;
3286 		}
3287 		break;
3288 	case MLX4_NET_TRANS_RULE_ID_IB:
3289 		break;
3290 	case MLX4_NET_TRANS_RULE_ID_IPV4:
3291 	case MLX4_NET_TRANS_RULE_ID_TCP:
3292 	case MLX4_NET_TRANS_RULE_ID_UDP:
3293 		pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3294 		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3295 			err = -EINVAL;
3296 			goto err_put;
3297 		}
3298 		vhcr->in_modifier +=
3299 			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3300 		break;
3301 	default:
3302 		pr_err("Corrupted mailbox.\n");
3303 		err = -EINVAL;
3304 		goto err_put;
3305 	}
3306 
3307 	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3308 			   vhcr->in_modifier, 0,
3309 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3310 			   MLX4_CMD_NATIVE);
3311 	if (err)
3312 		goto err_put;
3313 
3314 	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3315 	if (err) {
3316 		mlx4_err(dev, "Fail to add flow steering resources.\n ");
3317 		/* detach rule*/
3318 		mlx4_cmd(dev, vhcr->out_param, 0, 0,
3319 			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3320 			 MLX4_CMD_NATIVE);
3321 		goto err_put;
3322 	}
3323 	atomic_inc(&rqp->ref_count);
3324 err_put:
3325 	put_res(dev, slave, qpn, RES_QP);
3326 	return err;
3327 }
3328 
3329 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3330 					 struct mlx4_vhcr *vhcr,
3331 					 struct mlx4_cmd_mailbox *inbox,
3332 					 struct mlx4_cmd_mailbox *outbox,
3333 					 struct mlx4_cmd_info *cmd)
3334 {
3335 	int err;
3336 	struct res_qp *rqp;
3337 	struct res_fs_rule *rrule;
3338 
3339 	if (dev->caps.steering_mode !=
3340 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3341 		return -EOPNOTSUPP;
3342 
3343 	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3344 	if (err)
3345 		return err;
3346 	/* Release the rule form busy state before removal */
3347 	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3348 	err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3349 	if (err)
3350 		return err;
3351 
3352 	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3353 	if (err) {
3354 		mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3355 		goto out;
3356 	}
3357 
3358 	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3359 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3360 		       MLX4_CMD_NATIVE);
3361 	if (!err)
3362 		atomic_dec(&rqp->ref_count);
3363 out:
3364 	put_res(dev, slave, rrule->qpn, RES_QP);
3365 	return err;
3366 }
3367 
3368 enum {
3369 	BUSY_MAX_RETRIES = 10
3370 };
3371 
3372 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3373 			       struct mlx4_vhcr *vhcr,
3374 			       struct mlx4_cmd_mailbox *inbox,
3375 			       struct mlx4_cmd_mailbox *outbox,
3376 			       struct mlx4_cmd_info *cmd)
3377 {
3378 	int err;
3379 	int index = vhcr->in_modifier & 0xffff;
3380 
3381 	err = get_res(dev, slave, index, RES_COUNTER, NULL);
3382 	if (err)
3383 		return err;
3384 
3385 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3386 	put_res(dev, slave, index, RES_COUNTER);
3387 	return err;
3388 }
3389 
3390 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3391 {
3392 	struct res_gid *rgid;
3393 	struct res_gid *tmp;
3394 	struct mlx4_qp qp; /* dummy for calling attach/detach */
3395 
3396 	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3397 		switch (dev->caps.steering_mode) {
3398 		case MLX4_STEERING_MODE_DEVICE_MANAGED:
3399 			mlx4_flow_detach(dev, rgid->reg_id);
3400 			break;
3401 		case MLX4_STEERING_MODE_B0:
3402 			qp.qpn = rqp->local_qpn;
3403 			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3404 						     rgid->prot, rgid->steer);
3405 			break;
3406 		}
3407 		list_del(&rgid->list);
3408 		kfree(rgid);
3409 	}
3410 }
3411 
3412 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3413 			  enum mlx4_resource type, int print)
3414 {
3415 	struct mlx4_priv *priv = mlx4_priv(dev);
3416 	struct mlx4_resource_tracker *tracker =
3417 		&priv->mfunc.master.res_tracker;
3418 	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3419 	struct res_common *r;
3420 	struct res_common *tmp;
3421 	int busy;
3422 
3423 	busy = 0;
3424 	spin_lock_irq(mlx4_tlock(dev));
3425 	list_for_each_entry_safe(r, tmp, rlist, list) {
3426 		if (r->owner == slave) {
3427 			if (!r->removing) {
3428 				if (r->state == RES_ANY_BUSY) {
3429 					if (print)
3430 						mlx4_dbg(dev,
3431 							 "%s id 0x%llx is busy\n",
3432 							  ResourceType(type),
3433 							  r->res_id);
3434 					++busy;
3435 				} else {
3436 					r->from_state = r->state;
3437 					r->state = RES_ANY_BUSY;
3438 					r->removing = 1;
3439 				}
3440 			}
3441 		}
3442 	}
3443 	spin_unlock_irq(mlx4_tlock(dev));
3444 
3445 	return busy;
3446 }
3447 
3448 static int move_all_busy(struct mlx4_dev *dev, int slave,
3449 			 enum mlx4_resource type)
3450 {
3451 	unsigned long begin;
3452 	int busy;
3453 
3454 	begin = jiffies;
3455 	do {
3456 		busy = _move_all_busy(dev, slave, type, 0);
3457 		if (time_after(jiffies, begin + 5 * HZ))
3458 			break;
3459 		if (busy)
3460 			cond_resched();
3461 	} while (busy);
3462 
3463 	if (busy)
3464 		busy = _move_all_busy(dev, slave, type, 1);
3465 
3466 	return busy;
3467 }
3468 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3469 {
3470 	struct mlx4_priv *priv = mlx4_priv(dev);
3471 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3472 	struct list_head *qp_list =
3473 		&tracker->slave_list[slave].res_list[RES_QP];
3474 	struct res_qp *qp;
3475 	struct res_qp *tmp;
3476 	int state;
3477 	u64 in_param;
3478 	int qpn;
3479 	int err;
3480 
3481 	err = move_all_busy(dev, slave, RES_QP);
3482 	if (err)
3483 		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3484 			  "for slave %d\n", slave);
3485 
3486 	spin_lock_irq(mlx4_tlock(dev));
3487 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3488 		spin_unlock_irq(mlx4_tlock(dev));
3489 		if (qp->com.owner == slave) {
3490 			qpn = qp->com.res_id;
3491 			detach_qp(dev, slave, qp);
3492 			state = qp->com.from_state;
3493 			while (state != 0) {
3494 				switch (state) {
3495 				case RES_QP_RESERVED:
3496 					spin_lock_irq(mlx4_tlock(dev));
3497 					rb_erase(&qp->com.node,
3498 						 &tracker->res_tree[RES_QP]);
3499 					list_del(&qp->com.list);
3500 					spin_unlock_irq(mlx4_tlock(dev));
3501 					kfree(qp);
3502 					state = 0;
3503 					break;
3504 				case RES_QP_MAPPED:
3505 					if (!valid_reserved(dev, slave, qpn))
3506 						__mlx4_qp_free_icm(dev, qpn);
3507 					state = RES_QP_RESERVED;
3508 					break;
3509 				case RES_QP_HW:
3510 					in_param = slave;
3511 					err = mlx4_cmd(dev, in_param,
3512 						       qp->local_qpn, 2,
3513 						       MLX4_CMD_2RST_QP,
3514 						       MLX4_CMD_TIME_CLASS_A,
3515 						       MLX4_CMD_NATIVE);
3516 					if (err)
3517 						mlx4_dbg(dev, "rem_slave_qps: failed"
3518 							 " to move slave %d qpn %d to"
3519 							 " reset\n", slave,
3520 							 qp->local_qpn);
3521 					atomic_dec(&qp->rcq->ref_count);
3522 					atomic_dec(&qp->scq->ref_count);
3523 					atomic_dec(&qp->mtt->ref_count);
3524 					if (qp->srq)
3525 						atomic_dec(&qp->srq->ref_count);
3526 					state = RES_QP_MAPPED;
3527 					break;
3528 				default:
3529 					state = 0;
3530 				}
3531 			}
3532 		}
3533 		spin_lock_irq(mlx4_tlock(dev));
3534 	}
3535 	spin_unlock_irq(mlx4_tlock(dev));
3536 }
3537 
3538 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3539 {
3540 	struct mlx4_priv *priv = mlx4_priv(dev);
3541 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3542 	struct list_head *srq_list =
3543 		&tracker->slave_list[slave].res_list[RES_SRQ];
3544 	struct res_srq *srq;
3545 	struct res_srq *tmp;
3546 	int state;
3547 	u64 in_param;
3548 	LIST_HEAD(tlist);
3549 	int srqn;
3550 	int err;
3551 
3552 	err = move_all_busy(dev, slave, RES_SRQ);
3553 	if (err)
3554 		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3555 			  "busy for slave %d\n", slave);
3556 
3557 	spin_lock_irq(mlx4_tlock(dev));
3558 	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3559 		spin_unlock_irq(mlx4_tlock(dev));
3560 		if (srq->com.owner == slave) {
3561 			srqn = srq->com.res_id;
3562 			state = srq->com.from_state;
3563 			while (state != 0) {
3564 				switch (state) {
3565 				case RES_SRQ_ALLOCATED:
3566 					__mlx4_srq_free_icm(dev, srqn);
3567 					spin_lock_irq(mlx4_tlock(dev));
3568 					rb_erase(&srq->com.node,
3569 						 &tracker->res_tree[RES_SRQ]);
3570 					list_del(&srq->com.list);
3571 					spin_unlock_irq(mlx4_tlock(dev));
3572 					kfree(srq);
3573 					state = 0;
3574 					break;
3575 
3576 				case RES_SRQ_HW:
3577 					in_param = slave;
3578 					err = mlx4_cmd(dev, in_param, srqn, 1,
3579 						       MLX4_CMD_HW2SW_SRQ,
3580 						       MLX4_CMD_TIME_CLASS_A,
3581 						       MLX4_CMD_NATIVE);
3582 					if (err)
3583 						mlx4_dbg(dev, "rem_slave_srqs: failed"
3584 							 " to move slave %d srq %d to"
3585 							 " SW ownership\n",
3586 							 slave, srqn);
3587 
3588 					atomic_dec(&srq->mtt->ref_count);
3589 					if (srq->cq)
3590 						atomic_dec(&srq->cq->ref_count);
3591 					state = RES_SRQ_ALLOCATED;
3592 					break;
3593 
3594 				default:
3595 					state = 0;
3596 				}
3597 			}
3598 		}
3599 		spin_lock_irq(mlx4_tlock(dev));
3600 	}
3601 	spin_unlock_irq(mlx4_tlock(dev));
3602 }
3603 
3604 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3605 {
3606 	struct mlx4_priv *priv = mlx4_priv(dev);
3607 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3608 	struct list_head *cq_list =
3609 		&tracker->slave_list[slave].res_list[RES_CQ];
3610 	struct res_cq *cq;
3611 	struct res_cq *tmp;
3612 	int state;
3613 	u64 in_param;
3614 	LIST_HEAD(tlist);
3615 	int cqn;
3616 	int err;
3617 
3618 	err = move_all_busy(dev, slave, RES_CQ);
3619 	if (err)
3620 		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3621 			  "busy for slave %d\n", slave);
3622 
3623 	spin_lock_irq(mlx4_tlock(dev));
3624 	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3625 		spin_unlock_irq(mlx4_tlock(dev));
3626 		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3627 			cqn = cq->com.res_id;
3628 			state = cq->com.from_state;
3629 			while (state != 0) {
3630 				switch (state) {
3631 				case RES_CQ_ALLOCATED:
3632 					__mlx4_cq_free_icm(dev, cqn);
3633 					spin_lock_irq(mlx4_tlock(dev));
3634 					rb_erase(&cq->com.node,
3635 						 &tracker->res_tree[RES_CQ]);
3636 					list_del(&cq->com.list);
3637 					spin_unlock_irq(mlx4_tlock(dev));
3638 					kfree(cq);
3639 					state = 0;
3640 					break;
3641 
3642 				case RES_CQ_HW:
3643 					in_param = slave;
3644 					err = mlx4_cmd(dev, in_param, cqn, 1,
3645 						       MLX4_CMD_HW2SW_CQ,
3646 						       MLX4_CMD_TIME_CLASS_A,
3647 						       MLX4_CMD_NATIVE);
3648 					if (err)
3649 						mlx4_dbg(dev, "rem_slave_cqs: failed"
3650 							 " to move slave %d cq %d to"
3651 							 " SW ownership\n",
3652 							 slave, cqn);
3653 					atomic_dec(&cq->mtt->ref_count);
3654 					state = RES_CQ_ALLOCATED;
3655 					break;
3656 
3657 				default:
3658 					state = 0;
3659 				}
3660 			}
3661 		}
3662 		spin_lock_irq(mlx4_tlock(dev));
3663 	}
3664 	spin_unlock_irq(mlx4_tlock(dev));
3665 }
3666 
3667 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3668 {
3669 	struct mlx4_priv *priv = mlx4_priv(dev);
3670 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3671 	struct list_head *mpt_list =
3672 		&tracker->slave_list[slave].res_list[RES_MPT];
3673 	struct res_mpt *mpt;
3674 	struct res_mpt *tmp;
3675 	int state;
3676 	u64 in_param;
3677 	LIST_HEAD(tlist);
3678 	int mptn;
3679 	int err;
3680 
3681 	err = move_all_busy(dev, slave, RES_MPT);
3682 	if (err)
3683 		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3684 			  "busy for slave %d\n", slave);
3685 
3686 	spin_lock_irq(mlx4_tlock(dev));
3687 	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3688 		spin_unlock_irq(mlx4_tlock(dev));
3689 		if (mpt->com.owner == slave) {
3690 			mptn = mpt->com.res_id;
3691 			state = mpt->com.from_state;
3692 			while (state != 0) {
3693 				switch (state) {
3694 				case RES_MPT_RESERVED:
3695 					__mlx4_mpt_release(dev, mpt->key);
3696 					spin_lock_irq(mlx4_tlock(dev));
3697 					rb_erase(&mpt->com.node,
3698 						 &tracker->res_tree[RES_MPT]);
3699 					list_del(&mpt->com.list);
3700 					spin_unlock_irq(mlx4_tlock(dev));
3701 					kfree(mpt);
3702 					state = 0;
3703 					break;
3704 
3705 				case RES_MPT_MAPPED:
3706 					__mlx4_mpt_free_icm(dev, mpt->key);
3707 					state = RES_MPT_RESERVED;
3708 					break;
3709 
3710 				case RES_MPT_HW:
3711 					in_param = slave;
3712 					err = mlx4_cmd(dev, in_param, mptn, 0,
3713 						     MLX4_CMD_HW2SW_MPT,
3714 						     MLX4_CMD_TIME_CLASS_A,
3715 						     MLX4_CMD_NATIVE);
3716 					if (err)
3717 						mlx4_dbg(dev, "rem_slave_mrs: failed"
3718 							 " to move slave %d mpt %d to"
3719 							 " SW ownership\n",
3720 							 slave, mptn);
3721 					if (mpt->mtt)
3722 						atomic_dec(&mpt->mtt->ref_count);
3723 					state = RES_MPT_MAPPED;
3724 					break;
3725 				default:
3726 					state = 0;
3727 				}
3728 			}
3729 		}
3730 		spin_lock_irq(mlx4_tlock(dev));
3731 	}
3732 	spin_unlock_irq(mlx4_tlock(dev));
3733 }
3734 
3735 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3736 {
3737 	struct mlx4_priv *priv = mlx4_priv(dev);
3738 	struct mlx4_resource_tracker *tracker =
3739 		&priv->mfunc.master.res_tracker;
3740 	struct list_head *mtt_list =
3741 		&tracker->slave_list[slave].res_list[RES_MTT];
3742 	struct res_mtt *mtt;
3743 	struct res_mtt *tmp;
3744 	int state;
3745 	LIST_HEAD(tlist);
3746 	int base;
3747 	int err;
3748 
3749 	err = move_all_busy(dev, slave, RES_MTT);
3750 	if (err)
3751 		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3752 			  "busy for slave %d\n", slave);
3753 
3754 	spin_lock_irq(mlx4_tlock(dev));
3755 	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3756 		spin_unlock_irq(mlx4_tlock(dev));
3757 		if (mtt->com.owner == slave) {
3758 			base = mtt->com.res_id;
3759 			state = mtt->com.from_state;
3760 			while (state != 0) {
3761 				switch (state) {
3762 				case RES_MTT_ALLOCATED:
3763 					__mlx4_free_mtt_range(dev, base,
3764 							      mtt->order);
3765 					spin_lock_irq(mlx4_tlock(dev));
3766 					rb_erase(&mtt->com.node,
3767 						 &tracker->res_tree[RES_MTT]);
3768 					list_del(&mtt->com.list);
3769 					spin_unlock_irq(mlx4_tlock(dev));
3770 					kfree(mtt);
3771 					state = 0;
3772 					break;
3773 
3774 				default:
3775 					state = 0;
3776 				}
3777 			}
3778 		}
3779 		spin_lock_irq(mlx4_tlock(dev));
3780 	}
3781 	spin_unlock_irq(mlx4_tlock(dev));
3782 }
3783 
3784 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3785 {
3786 	struct mlx4_priv *priv = mlx4_priv(dev);
3787 	struct mlx4_resource_tracker *tracker =
3788 		&priv->mfunc.master.res_tracker;
3789 	struct list_head *fs_rule_list =
3790 		&tracker->slave_list[slave].res_list[RES_FS_RULE];
3791 	struct res_fs_rule *fs_rule;
3792 	struct res_fs_rule *tmp;
3793 	int state;
3794 	u64 base;
3795 	int err;
3796 
3797 	err = move_all_busy(dev, slave, RES_FS_RULE);
3798 	if (err)
3799 		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3800 			  slave);
3801 
3802 	spin_lock_irq(mlx4_tlock(dev));
3803 	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3804 		spin_unlock_irq(mlx4_tlock(dev));
3805 		if (fs_rule->com.owner == slave) {
3806 			base = fs_rule->com.res_id;
3807 			state = fs_rule->com.from_state;
3808 			while (state != 0) {
3809 				switch (state) {
3810 				case RES_FS_RULE_ALLOCATED:
3811 					/* detach rule */
3812 					err = mlx4_cmd(dev, base, 0, 0,
3813 						       MLX4_QP_FLOW_STEERING_DETACH,
3814 						       MLX4_CMD_TIME_CLASS_A,
3815 						       MLX4_CMD_NATIVE);
3816 
3817 					spin_lock_irq(mlx4_tlock(dev));
3818 					rb_erase(&fs_rule->com.node,
3819 						 &tracker->res_tree[RES_FS_RULE]);
3820 					list_del(&fs_rule->com.list);
3821 					spin_unlock_irq(mlx4_tlock(dev));
3822 					kfree(fs_rule);
3823 					state = 0;
3824 					break;
3825 
3826 				default:
3827 					state = 0;
3828 				}
3829 			}
3830 		}
3831 		spin_lock_irq(mlx4_tlock(dev));
3832 	}
3833 	spin_unlock_irq(mlx4_tlock(dev));
3834 }
3835 
3836 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3837 {
3838 	struct mlx4_priv *priv = mlx4_priv(dev);
3839 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3840 	struct list_head *eq_list =
3841 		&tracker->slave_list[slave].res_list[RES_EQ];
3842 	struct res_eq *eq;
3843 	struct res_eq *tmp;
3844 	int err;
3845 	int state;
3846 	LIST_HEAD(tlist);
3847 	int eqn;
3848 	struct mlx4_cmd_mailbox *mailbox;
3849 
3850 	err = move_all_busy(dev, slave, RES_EQ);
3851 	if (err)
3852 		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3853 			  "busy for slave %d\n", slave);
3854 
3855 	spin_lock_irq(mlx4_tlock(dev));
3856 	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3857 		spin_unlock_irq(mlx4_tlock(dev));
3858 		if (eq->com.owner == slave) {
3859 			eqn = eq->com.res_id;
3860 			state = eq->com.from_state;
3861 			while (state != 0) {
3862 				switch (state) {
3863 				case RES_EQ_RESERVED:
3864 					spin_lock_irq(mlx4_tlock(dev));
3865 					rb_erase(&eq->com.node,
3866 						 &tracker->res_tree[RES_EQ]);
3867 					list_del(&eq->com.list);
3868 					spin_unlock_irq(mlx4_tlock(dev));
3869 					kfree(eq);
3870 					state = 0;
3871 					break;
3872 
3873 				case RES_EQ_HW:
3874 					mailbox = mlx4_alloc_cmd_mailbox(dev);
3875 					if (IS_ERR(mailbox)) {
3876 						cond_resched();
3877 						continue;
3878 					}
3879 					err = mlx4_cmd_box(dev, slave, 0,
3880 							   eqn & 0xff, 0,
3881 							   MLX4_CMD_HW2SW_EQ,
3882 							   MLX4_CMD_TIME_CLASS_A,
3883 							   MLX4_CMD_NATIVE);
3884 					if (err)
3885 						mlx4_dbg(dev, "rem_slave_eqs: failed"
3886 							 " to move slave %d eqs %d to"
3887 							 " SW ownership\n", slave, eqn);
3888 					mlx4_free_cmd_mailbox(dev, mailbox);
3889 					atomic_dec(&eq->mtt->ref_count);
3890 					state = RES_EQ_RESERVED;
3891 					break;
3892 
3893 				default:
3894 					state = 0;
3895 				}
3896 			}
3897 		}
3898 		spin_lock_irq(mlx4_tlock(dev));
3899 	}
3900 	spin_unlock_irq(mlx4_tlock(dev));
3901 }
3902 
3903 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3904 {
3905 	struct mlx4_priv *priv = mlx4_priv(dev);
3906 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3907 	struct list_head *counter_list =
3908 		&tracker->slave_list[slave].res_list[RES_COUNTER];
3909 	struct res_counter *counter;
3910 	struct res_counter *tmp;
3911 	int err;
3912 	int index;
3913 
3914 	err = move_all_busy(dev, slave, RES_COUNTER);
3915 	if (err)
3916 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3917 			  "busy for slave %d\n", slave);
3918 
3919 	spin_lock_irq(mlx4_tlock(dev));
3920 	list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3921 		if (counter->com.owner == slave) {
3922 			index = counter->com.res_id;
3923 			rb_erase(&counter->com.node,
3924 				 &tracker->res_tree[RES_COUNTER]);
3925 			list_del(&counter->com.list);
3926 			kfree(counter);
3927 			__mlx4_counter_free(dev, index);
3928 		}
3929 	}
3930 	spin_unlock_irq(mlx4_tlock(dev));
3931 }
3932 
3933 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3934 {
3935 	struct mlx4_priv *priv = mlx4_priv(dev);
3936 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3937 	struct list_head *xrcdn_list =
3938 		&tracker->slave_list[slave].res_list[RES_XRCD];
3939 	struct res_xrcdn *xrcd;
3940 	struct res_xrcdn *tmp;
3941 	int err;
3942 	int xrcdn;
3943 
3944 	err = move_all_busy(dev, slave, RES_XRCD);
3945 	if (err)
3946 		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3947 			  "busy for slave %d\n", slave);
3948 
3949 	spin_lock_irq(mlx4_tlock(dev));
3950 	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3951 		if (xrcd->com.owner == slave) {
3952 			xrcdn = xrcd->com.res_id;
3953 			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3954 			list_del(&xrcd->com.list);
3955 			kfree(xrcd);
3956 			__mlx4_xrcd_free(dev, xrcdn);
3957 		}
3958 	}
3959 	spin_unlock_irq(mlx4_tlock(dev));
3960 }
3961 
3962 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3963 {
3964 	struct mlx4_priv *priv = mlx4_priv(dev);
3965 
3966 	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3967 	/*VLAN*/
3968 	rem_slave_macs(dev, slave);
3969 	rem_slave_fs_rule(dev, slave);
3970 	rem_slave_qps(dev, slave);
3971 	rem_slave_srqs(dev, slave);
3972 	rem_slave_cqs(dev, slave);
3973 	rem_slave_mrs(dev, slave);
3974 	rem_slave_eqs(dev, slave);
3975 	rem_slave_mtts(dev, slave);
3976 	rem_slave_counters(dev, slave);
3977 	rem_slave_xrcdns(dev, slave);
3978 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3979 }
3980 
3981 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
3982 {
3983 	struct mlx4_vf_immed_vlan_work *work =
3984 		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
3985 	struct mlx4_cmd_mailbox *mailbox;
3986 	struct mlx4_update_qp_context *upd_context;
3987 	struct mlx4_dev *dev = &work->priv->dev;
3988 	struct mlx4_resource_tracker *tracker =
3989 		&work->priv->mfunc.master.res_tracker;
3990 	struct list_head *qp_list =
3991 		&tracker->slave_list[work->slave].res_list[RES_QP];
3992 	struct res_qp *qp;
3993 	struct res_qp *tmp;
3994 	u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
3995 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
3996 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
3997 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
3998 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
3999 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4000 		       (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4001 		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4002 
4003 	int err;
4004 	int port, errors = 0;
4005 	u8 vlan_control;
4006 
4007 	if (mlx4_is_slave(dev)) {
4008 		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4009 			  work->slave);
4010 		goto out;
4011 	}
4012 
4013 	mailbox = mlx4_alloc_cmd_mailbox(dev);
4014 	if (IS_ERR(mailbox))
4015 		goto out;
4016 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4017 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4018 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4019 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4020 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4021 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4022 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4023 	else if (!work->vlan_id)
4024 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4025 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4026 	else
4027 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4028 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4029 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4030 
4031 	upd_context = mailbox->buf;
4032 	upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4033 	upd_context->qp_context.pri_path.vlan_control = vlan_control;
4034 	upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4035 
4036 	spin_lock_irq(mlx4_tlock(dev));
4037 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4038 		spin_unlock_irq(mlx4_tlock(dev));
4039 		if (qp->com.owner == work->slave) {
4040 			if (qp->com.from_state != RES_QP_HW ||
4041 			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
4042 			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4043 			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4044 				spin_lock_irq(mlx4_tlock(dev));
4045 				continue;
4046 			}
4047 			port = (qp->sched_queue >> 6 & 1) + 1;
4048 			if (port != work->port) {
4049 				spin_lock_irq(mlx4_tlock(dev));
4050 				continue;
4051 			}
4052 			upd_context->qp_context.pri_path.sched_queue =
4053 				qp->sched_queue & 0xC7;
4054 			upd_context->qp_context.pri_path.sched_queue |=
4055 				((work->qos & 0x7) << 3);
4056 
4057 			err = mlx4_cmd(dev, mailbox->dma,
4058 				       qp->local_qpn & 0xffffff,
4059 				       0, MLX4_CMD_UPDATE_QP,
4060 				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4061 			if (err) {
4062 				mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4063 					  "port %d, qpn %d (%d)\n",
4064 					  work->slave, port, qp->local_qpn,
4065 					  err);
4066 				errors++;
4067 			}
4068 		}
4069 		spin_lock_irq(mlx4_tlock(dev));
4070 	}
4071 	spin_unlock_irq(mlx4_tlock(dev));
4072 	mlx4_free_cmd_mailbox(dev, mailbox);
4073 
4074 	if (errors)
4075 		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4076 			 errors, work->slave, work->port);
4077 
4078 	/* unregister previous vlan_id if needed and we had no errors
4079 	 * while updating the QPs
4080 	 */
4081 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4082 	    NO_INDX != work->orig_vlan_ix)
4083 		__mlx4_unregister_vlan(&work->priv->dev, work->port,
4084 				       work->orig_vlan_ix);
4085 out:
4086 	kfree(work);
4087 	return;
4088 }
4089