1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
40 
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
46 
47 #include <asm/io.h>
48 
49 #include "mlx4.h"
50 #include "fw.h"
51 #include "fw_qos.h"
52 #include "mlx4_stats.h"
53 
54 #define CMD_POLL_TOKEN 0xffff
55 #define INBOX_MASK	0xffffffffffffff00ULL
56 
57 #define CMD_CHAN_VER 1
58 #define CMD_CHAN_IF_REV 1
59 
60 enum {
61 	/* command completed successfully: */
62 	CMD_STAT_OK		= 0x00,
63 	/* Internal error (such as a bus error) occurred while processing command: */
64 	CMD_STAT_INTERNAL_ERR	= 0x01,
65 	/* Operation/command not supported or opcode modifier not supported: */
66 	CMD_STAT_BAD_OP		= 0x02,
67 	/* Parameter not supported or parameter out of range: */
68 	CMD_STAT_BAD_PARAM	= 0x03,
69 	/* System not enabled or bad system state: */
70 	CMD_STAT_BAD_SYS_STATE	= 0x04,
71 	/* Attempt to access reserved or unallocaterd resource: */
72 	CMD_STAT_BAD_RESOURCE	= 0x05,
73 	/* Requested resource is currently executing a command, or is otherwise busy: */
74 	CMD_STAT_RESOURCE_BUSY	= 0x06,
75 	/* Required capability exceeds device limits: */
76 	CMD_STAT_EXCEED_LIM	= 0x08,
77 	/* Resource is not in the appropriate state or ownership: */
78 	CMD_STAT_BAD_RES_STATE	= 0x09,
79 	/* Index out of range: */
80 	CMD_STAT_BAD_INDEX	= 0x0a,
81 	/* FW image corrupted: */
82 	CMD_STAT_BAD_NVMEM	= 0x0b,
83 	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
84 	CMD_STAT_ICM_ERROR	= 0x0c,
85 	/* Attempt to modify a QP/EE which is not in the presumed state: */
86 	CMD_STAT_BAD_QP_STATE   = 0x10,
87 	/* Bad segment parameters (Address/Size): */
88 	CMD_STAT_BAD_SEG_PARAM	= 0x20,
89 	/* Memory Region has Memory Windows bound to: */
90 	CMD_STAT_REG_BOUND	= 0x21,
91 	/* HCA local attached memory not present: */
92 	CMD_STAT_LAM_NOT_PRE	= 0x22,
93 	/* Bad management packet (silently discarded): */
94 	CMD_STAT_BAD_PKT	= 0x30,
95 	/* More outstanding CQEs in CQ than new CQ size: */
96 	CMD_STAT_BAD_SIZE	= 0x40,
97 	/* Multi Function device support required: */
98 	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
99 };
100 
101 enum {
102 	HCR_IN_PARAM_OFFSET	= 0x00,
103 	HCR_IN_MODIFIER_OFFSET	= 0x08,
104 	HCR_OUT_PARAM_OFFSET	= 0x0c,
105 	HCR_TOKEN_OFFSET	= 0x14,
106 	HCR_STATUS_OFFSET	= 0x18,
107 
108 	HCR_OPMOD_SHIFT		= 12,
109 	HCR_T_BIT		= 21,
110 	HCR_E_BIT		= 22,
111 	HCR_GO_BIT		= 23
112 };
113 
114 enum {
115 	GO_BIT_TIMEOUT_MSECS	= 10000
116 };
117 
118 enum mlx4_vlan_transition {
119 	MLX4_VLAN_TRANSITION_VST_VST = 0,
120 	MLX4_VLAN_TRANSITION_VST_VGT = 1,
121 	MLX4_VLAN_TRANSITION_VGT_VST = 2,
122 	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
123 };
124 
125 
126 struct mlx4_cmd_context {
127 	struct completion	done;
128 	int			result;
129 	int			next;
130 	u64			out_param;
131 	u16			token;
132 	u8			fw_status;
133 };
134 
135 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
136 				    struct mlx4_vhcr_cmd *in_vhcr);
137 
138 static int mlx4_status_to_errno(u8 status)
139 {
140 	static const int trans_table[] = {
141 		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
142 		[CMD_STAT_BAD_OP]	  = -EPERM,
143 		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
144 		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
145 		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
146 		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
147 		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
148 		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
149 		[CMD_STAT_BAD_INDEX]	  = -EBADF,
150 		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
151 		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
152 		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
153 		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
154 		[CMD_STAT_REG_BOUND]	  = -EBUSY,
155 		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
156 		[CMD_STAT_BAD_PKT]	  = -EINVAL,
157 		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
158 		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
159 	};
160 
161 	if (status >= ARRAY_SIZE(trans_table) ||
162 	    (status != CMD_STAT_OK && trans_table[status] == 0))
163 		return -EIO;
164 
165 	return trans_table[status];
166 }
167 
168 static u8 mlx4_errno_to_status(int errno)
169 {
170 	switch (errno) {
171 	case -EPERM:
172 		return CMD_STAT_BAD_OP;
173 	case -EINVAL:
174 		return CMD_STAT_BAD_PARAM;
175 	case -ENXIO:
176 		return CMD_STAT_BAD_SYS_STATE;
177 	case -EBUSY:
178 		return CMD_STAT_RESOURCE_BUSY;
179 	case -ENOMEM:
180 		return CMD_STAT_EXCEED_LIM;
181 	case -ENFILE:
182 		return CMD_STAT_ICM_ERROR;
183 	default:
184 		return CMD_STAT_INTERNAL_ERR;
185 	}
186 }
187 
188 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
189 				       u8 op_modifier)
190 {
191 	switch (op) {
192 	case MLX4_CMD_UNMAP_ICM:
193 	case MLX4_CMD_UNMAP_ICM_AUX:
194 	case MLX4_CMD_UNMAP_FA:
195 	case MLX4_CMD_2RST_QP:
196 	case MLX4_CMD_HW2SW_EQ:
197 	case MLX4_CMD_HW2SW_CQ:
198 	case MLX4_CMD_HW2SW_SRQ:
199 	case MLX4_CMD_HW2SW_MPT:
200 	case MLX4_CMD_CLOSE_HCA:
201 	case MLX4_QP_FLOW_STEERING_DETACH:
202 	case MLX4_CMD_FREE_RES:
203 	case MLX4_CMD_CLOSE_PORT:
204 		return CMD_STAT_OK;
205 
206 	case MLX4_CMD_QP_ATTACH:
207 		/* On Detach case return success */
208 		if (op_modifier == 0)
209 			return CMD_STAT_OK;
210 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
211 
212 	default:
213 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
214 	}
215 }
216 
217 static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
218 {
219 	/* Any error during the closing commands below is considered fatal */
220 	if (op == MLX4_CMD_CLOSE_HCA ||
221 	    op == MLX4_CMD_HW2SW_EQ ||
222 	    op == MLX4_CMD_HW2SW_CQ ||
223 	    op == MLX4_CMD_2RST_QP ||
224 	    op == MLX4_CMD_HW2SW_SRQ ||
225 	    op == MLX4_CMD_SYNC_TPT ||
226 	    op == MLX4_CMD_UNMAP_ICM ||
227 	    op == MLX4_CMD_UNMAP_ICM_AUX ||
228 	    op == MLX4_CMD_UNMAP_FA)
229 		return 1;
230 	/* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
231 	  * CMD_STAT_REG_BOUND.
232 	  * This status indicates that memory region has memory windows bound to it
233 	  * which may result from invalid user space usage and is not fatal.
234 	  */
235 	if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
236 		return 1;
237 	return 0;
238 }
239 
240 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
241 			       int err)
242 {
243 	/* Only if reset flow is really active return code is based on
244 	  * command, otherwise current error code is returned.
245 	  */
246 	if (mlx4_internal_err_reset) {
247 		mlx4_enter_error_state(dev->persist);
248 		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
249 	}
250 
251 	return err;
252 }
253 
254 static int comm_pending(struct mlx4_dev *dev)
255 {
256 	struct mlx4_priv *priv = mlx4_priv(dev);
257 	u32 status = readl(&priv->mfunc.comm->slave_read);
258 
259 	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
260 }
261 
262 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
263 {
264 	struct mlx4_priv *priv = mlx4_priv(dev);
265 	u32 val;
266 
267 	/* To avoid writing to unknown addresses after the device state was
268 	 * changed to internal error and the function was rest,
269 	 * check the INTERNAL_ERROR flag which is updated under
270 	 * device_state_mutex lock.
271 	 */
272 	mutex_lock(&dev->persist->device_state_mutex);
273 
274 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
275 		mutex_unlock(&dev->persist->device_state_mutex);
276 		return -EIO;
277 	}
278 
279 	priv->cmd.comm_toggle ^= 1;
280 	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
281 	__raw_writel((__force u32) cpu_to_be32(val),
282 		     &priv->mfunc.comm->slave_write);
283 	mmiowb();
284 	mutex_unlock(&dev->persist->device_state_mutex);
285 	return 0;
286 }
287 
288 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
289 		       unsigned long timeout)
290 {
291 	struct mlx4_priv *priv = mlx4_priv(dev);
292 	unsigned long end;
293 	int err = 0;
294 	int ret_from_pending = 0;
295 
296 	/* First, verify that the master reports correct status */
297 	if (comm_pending(dev)) {
298 		mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299 			  priv->cmd.comm_toggle, cmd);
300 		return -EAGAIN;
301 	}
302 
303 	/* Write command */
304 	down(&priv->cmd.poll_sem);
305 	if (mlx4_comm_cmd_post(dev, cmd, param)) {
306 		/* Only in case the device state is INTERNAL_ERROR,
307 		 * mlx4_comm_cmd_post returns with an error
308 		 */
309 		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
310 		goto out;
311 	}
312 
313 	end = msecs_to_jiffies(timeout) + jiffies;
314 	while (comm_pending(dev) && time_before(jiffies, end))
315 		cond_resched();
316 	ret_from_pending = comm_pending(dev);
317 	if (ret_from_pending) {
318 		/* check if the slave is trying to boot in the middle of
319 		 * FLR process. The only non-zero result in the RESET command
320 		 * is MLX4_DELAY_RESET_SLAVE*/
321 		if ((MLX4_COMM_CMD_RESET == cmd)) {
322 			err = MLX4_DELAY_RESET_SLAVE;
323 			goto out;
324 		} else {
325 			mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
326 				  cmd);
327 			err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
328 		}
329 	}
330 
331 	if (err)
332 		mlx4_enter_error_state(dev->persist);
333 out:
334 	up(&priv->cmd.poll_sem);
335 	return err;
336 }
337 
338 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
339 			      u16 param, u16 op, unsigned long timeout)
340 {
341 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
342 	struct mlx4_cmd_context *context;
343 	unsigned long end;
344 	int err = 0;
345 
346 	down(&cmd->event_sem);
347 
348 	spin_lock(&cmd->context_lock);
349 	BUG_ON(cmd->free_head < 0);
350 	context = &cmd->context[cmd->free_head];
351 	context->token += cmd->token_mask + 1;
352 	cmd->free_head = context->next;
353 	spin_unlock(&cmd->context_lock);
354 
355 	reinit_completion(&context->done);
356 
357 	if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
358 		/* Only in case the device state is INTERNAL_ERROR,
359 		 * mlx4_comm_cmd_post returns with an error
360 		 */
361 		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
362 		goto out;
363 	}
364 
365 	if (!wait_for_completion_timeout(&context->done,
366 					 msecs_to_jiffies(timeout))) {
367 		mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
368 			  vhcr_cmd, op);
369 		goto out_reset;
370 	}
371 
372 	err = context->result;
373 	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
374 		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
375 			 vhcr_cmd, context->fw_status);
376 		if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
377 			goto out_reset;
378 	}
379 
380 	/* wait for comm channel ready
381 	 * this is necessary for prevention the race
382 	 * when switching between event to polling mode
383 	 * Skipping this section in case the device is in FATAL_ERROR state,
384 	 * In this state, no commands are sent via the comm channel until
385 	 * the device has returned from reset.
386 	 */
387 	if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
388 		end = msecs_to_jiffies(timeout) + jiffies;
389 		while (comm_pending(dev) && time_before(jiffies, end))
390 			cond_resched();
391 	}
392 	goto out;
393 
394 out_reset:
395 	err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
396 	mlx4_enter_error_state(dev->persist);
397 out:
398 	spin_lock(&cmd->context_lock);
399 	context->next = cmd->free_head;
400 	cmd->free_head = context - cmd->context;
401 	spin_unlock(&cmd->context_lock);
402 
403 	up(&cmd->event_sem);
404 	return err;
405 }
406 
407 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
408 		  u16 op, unsigned long timeout)
409 {
410 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
411 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
412 
413 	if (mlx4_priv(dev)->cmd.use_events)
414 		return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
415 	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
416 }
417 
418 static int cmd_pending(struct mlx4_dev *dev)
419 {
420 	u32 status;
421 
422 	if (pci_channel_offline(dev->persist->pdev))
423 		return -EIO;
424 
425 	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
426 
427 	return (status & swab32(1 << HCR_GO_BIT)) ||
428 		(mlx4_priv(dev)->cmd.toggle ==
429 		 !!(status & swab32(1 << HCR_T_BIT)));
430 }
431 
432 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
433 			 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
434 			 int event)
435 {
436 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
437 	u32 __iomem *hcr = cmd->hcr;
438 	int ret = -EIO;
439 	unsigned long end;
440 
441 	mutex_lock(&dev->persist->device_state_mutex);
442 	/* To avoid writing to unknown addresses after the device state was
443 	  * changed to internal error and the chip was reset,
444 	  * check the INTERNAL_ERROR flag which is updated under
445 	  * device_state_mutex lock.
446 	  */
447 	if (pci_channel_offline(dev->persist->pdev) ||
448 	    (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
449 		/*
450 		 * Device is going through error recovery
451 		 * and cannot accept commands.
452 		 */
453 		goto out;
454 	}
455 
456 	end = jiffies;
457 	if (event)
458 		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
459 
460 	while (cmd_pending(dev)) {
461 		if (pci_channel_offline(dev->persist->pdev)) {
462 			/*
463 			 * Device is going through error recovery
464 			 * and cannot accept commands.
465 			 */
466 			goto out;
467 		}
468 
469 		if (time_after_eq(jiffies, end)) {
470 			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
471 			goto out;
472 		}
473 		cond_resched();
474 	}
475 
476 	/*
477 	 * We use writel (instead of something like memcpy_toio)
478 	 * because writes of less than 32 bits to the HCR don't work
479 	 * (and some architectures such as ia64 implement memcpy_toio
480 	 * in terms of writeb).
481 	 */
482 	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
483 	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
484 	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
485 	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
486 	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
487 	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
488 
489 	/* __raw_writel may not order writes. */
490 	wmb();
491 
492 	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
493 					       (cmd->toggle << HCR_T_BIT)	|
494 					       (event ? (1 << HCR_E_BIT) : 0)	|
495 					       (op_modifier << HCR_OPMOD_SHIFT) |
496 					       op), hcr + 6);
497 
498 	/*
499 	 * Make sure that our HCR writes don't get mixed in with
500 	 * writes from another CPU starting a FW command.
501 	 */
502 	mmiowb();
503 
504 	cmd->toggle = cmd->toggle ^ 1;
505 
506 	ret = 0;
507 
508 out:
509 	if (ret)
510 		mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
511 			  op, ret, in_param, in_modifier, op_modifier);
512 	mutex_unlock(&dev->persist->device_state_mutex);
513 
514 	return ret;
515 }
516 
517 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
518 			  int out_is_imm, u32 in_modifier, u8 op_modifier,
519 			  u16 op, unsigned long timeout)
520 {
521 	struct mlx4_priv *priv = mlx4_priv(dev);
522 	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
523 	int ret;
524 
525 	mutex_lock(&priv->cmd.slave_cmd_mutex);
526 
527 	vhcr->in_param = cpu_to_be64(in_param);
528 	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
529 	vhcr->in_modifier = cpu_to_be32(in_modifier);
530 	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
531 	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
532 	vhcr->status = 0;
533 	vhcr->flags = !!(priv->cmd.use_events) << 6;
534 
535 	if (mlx4_is_master(dev)) {
536 		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
537 		if (!ret) {
538 			if (out_is_imm) {
539 				if (out_param)
540 					*out_param =
541 						be64_to_cpu(vhcr->out_param);
542 				else {
543 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
544 						 op);
545 					vhcr->status = CMD_STAT_BAD_PARAM;
546 				}
547 			}
548 			ret = mlx4_status_to_errno(vhcr->status);
549 		}
550 		if (ret &&
551 		    dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
552 			ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
553 	} else {
554 		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
555 				    MLX4_COMM_TIME + timeout);
556 		if (!ret) {
557 			if (out_is_imm) {
558 				if (out_param)
559 					*out_param =
560 						be64_to_cpu(vhcr->out_param);
561 				else {
562 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563 						 op);
564 					vhcr->status = CMD_STAT_BAD_PARAM;
565 				}
566 			}
567 			ret = mlx4_status_to_errno(vhcr->status);
568 		} else {
569 			if (dev->persist->state &
570 			    MLX4_DEVICE_STATE_INTERNAL_ERROR)
571 				ret = mlx4_internal_err_ret_value(dev, op,
572 								  op_modifier);
573 			else
574 				mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
575 		}
576 	}
577 
578 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
579 	return ret;
580 }
581 
582 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
583 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
584 			 u16 op, unsigned long timeout)
585 {
586 	struct mlx4_priv *priv = mlx4_priv(dev);
587 	void __iomem *hcr = priv->cmd.hcr;
588 	int err = 0;
589 	unsigned long end;
590 	u32 stat;
591 
592 	down(&priv->cmd.poll_sem);
593 
594 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
595 		/*
596 		 * Device is going through error recovery
597 		 * and cannot accept commands.
598 		 */
599 		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
600 		goto out;
601 	}
602 
603 	if (out_is_imm && !out_param) {
604 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
605 			 op);
606 		err = -EINVAL;
607 		goto out;
608 	}
609 
610 	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
611 			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
612 	if (err)
613 		goto out_reset;
614 
615 	end = msecs_to_jiffies(timeout) + jiffies;
616 	while (cmd_pending(dev) && time_before(jiffies, end)) {
617 		if (pci_channel_offline(dev->persist->pdev)) {
618 			/*
619 			 * Device is going through error recovery
620 			 * and cannot accept commands.
621 			 */
622 			err = -EIO;
623 			goto out_reset;
624 		}
625 
626 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
627 			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
628 			goto out;
629 		}
630 
631 		cond_resched();
632 	}
633 
634 	if (cmd_pending(dev)) {
635 		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
636 			  op);
637 		err = -EIO;
638 		goto out_reset;
639 	}
640 
641 	if (out_is_imm)
642 		*out_param =
643 			(u64) be32_to_cpu((__force __be32)
644 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
645 			(u64) be32_to_cpu((__force __be32)
646 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
647 	stat = be32_to_cpu((__force __be32)
648 			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
649 	err = mlx4_status_to_errno(stat);
650 	if (err) {
651 		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
652 			 op, stat);
653 		if (mlx4_closing_cmd_fatal_error(op, stat))
654 			goto out_reset;
655 		goto out;
656 	}
657 
658 out_reset:
659 	if (err)
660 		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
661 out:
662 	up(&priv->cmd.poll_sem);
663 	return err;
664 }
665 
666 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
667 {
668 	struct mlx4_priv *priv = mlx4_priv(dev);
669 	struct mlx4_cmd_context *context =
670 		&priv->cmd.context[token & priv->cmd.token_mask];
671 
672 	/* previously timed out command completing at long last */
673 	if (token != context->token)
674 		return;
675 
676 	context->fw_status = status;
677 	context->result    = mlx4_status_to_errno(status);
678 	context->out_param = out_param;
679 
680 	complete(&context->done);
681 }
682 
683 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
684 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
685 			 u16 op, unsigned long timeout)
686 {
687 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 	struct mlx4_cmd_context *context;
689 	long ret_wait;
690 	int err = 0;
691 
692 	down(&cmd->event_sem);
693 
694 	spin_lock(&cmd->context_lock);
695 	BUG_ON(cmd->free_head < 0);
696 	context = &cmd->context[cmd->free_head];
697 	context->token += cmd->token_mask + 1;
698 	cmd->free_head = context->next;
699 	spin_unlock(&cmd->context_lock);
700 
701 	if (out_is_imm && !out_param) {
702 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
703 			 op);
704 		err = -EINVAL;
705 		goto out;
706 	}
707 
708 	reinit_completion(&context->done);
709 
710 	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
711 			    in_modifier, op_modifier, op, context->token, 1);
712 	if (err)
713 		goto out_reset;
714 
715 	if (op == MLX4_CMD_SENSE_PORT) {
716 		ret_wait =
717 			wait_for_completion_interruptible_timeout(&context->done,
718 								  msecs_to_jiffies(timeout));
719 		if (ret_wait < 0) {
720 			context->fw_status = 0;
721 			context->out_param = 0;
722 			context->result = 0;
723 		}
724 	} else {
725 		ret_wait = (long)wait_for_completion_timeout(&context->done,
726 							     msecs_to_jiffies(timeout));
727 	}
728 	if (!ret_wait) {
729 		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
730 			  op);
731 		if (op == MLX4_CMD_NOP) {
732 			err = -EBUSY;
733 			goto out;
734 		} else {
735 			err = -EIO;
736 			goto out_reset;
737 		}
738 	}
739 
740 	err = context->result;
741 	if (err) {
742 		/* Since we do not want to have this error message always
743 		 * displayed at driver start when there are ConnectX2 HCAs
744 		 * on the host, we deprecate the error message for this
745 		 * specific command/input_mod/opcode_mod/fw-status to be debug.
746 		 */
747 		if (op == MLX4_CMD_SET_PORT &&
748 		    (in_modifier == 1 || in_modifier == 2) &&
749 		    op_modifier == MLX4_SET_PORT_IB_OPCODE &&
750 		    context->fw_status == CMD_STAT_BAD_SIZE)
751 			mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
752 				 op, context->fw_status);
753 		else
754 			mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
755 				 op, context->fw_status);
756 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
757 			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
758 		else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
759 			goto out_reset;
760 
761 		goto out;
762 	}
763 
764 	if (out_is_imm)
765 		*out_param = context->out_param;
766 
767 out_reset:
768 	if (err)
769 		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
770 out:
771 	spin_lock(&cmd->context_lock);
772 	context->next = cmd->free_head;
773 	cmd->free_head = context - cmd->context;
774 	spin_unlock(&cmd->context_lock);
775 
776 	up(&cmd->event_sem);
777 	return err;
778 }
779 
780 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
781 	       int out_is_imm, u32 in_modifier, u8 op_modifier,
782 	       u16 op, unsigned long timeout, int native)
783 {
784 	if (pci_channel_offline(dev->persist->pdev))
785 		return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
786 
787 	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
788 		int ret;
789 
790 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
791 			return mlx4_internal_err_ret_value(dev, op,
792 							  op_modifier);
793 		down_read(&mlx4_priv(dev)->cmd.switch_sem);
794 		if (mlx4_priv(dev)->cmd.use_events)
795 			ret = mlx4_cmd_wait(dev, in_param, out_param,
796 					    out_is_imm, in_modifier,
797 					    op_modifier, op, timeout);
798 		else
799 			ret = mlx4_cmd_poll(dev, in_param, out_param,
800 					    out_is_imm, in_modifier,
801 					    op_modifier, op, timeout);
802 
803 		up_read(&mlx4_priv(dev)->cmd.switch_sem);
804 		return ret;
805 	}
806 	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
807 			      in_modifier, op_modifier, op, timeout);
808 }
809 EXPORT_SYMBOL_GPL(__mlx4_cmd);
810 
811 
812 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
813 {
814 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
815 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
816 }
817 
818 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
819 			   int slave, u64 slave_addr,
820 			   int size, int is_read)
821 {
822 	u64 in_param;
823 	u64 out_param;
824 
825 	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
826 	    (slave & ~0x7f) | (size & 0xff)) {
827 		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
828 			 slave_addr, master_addr, slave, size);
829 		return -EINVAL;
830 	}
831 
832 	if (is_read) {
833 		in_param = (u64) slave | slave_addr;
834 		out_param = (u64) dev->caps.function | master_addr;
835 	} else {
836 		in_param = (u64) dev->caps.function | master_addr;
837 		out_param = (u64) slave | slave_addr;
838 	}
839 
840 	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
841 			    MLX4_CMD_ACCESS_MEM,
842 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
843 }
844 
845 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
846 			       struct mlx4_cmd_mailbox *inbox,
847 			       struct mlx4_cmd_mailbox *outbox)
848 {
849 	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
850 	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
851 	int err;
852 	int i;
853 
854 	if (index & 0x1f)
855 		return -EINVAL;
856 
857 	in_mad->attr_mod = cpu_to_be32(index / 32);
858 
859 	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
860 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
861 			   MLX4_CMD_NATIVE);
862 	if (err)
863 		return err;
864 
865 	for (i = 0; i < 32; ++i)
866 		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
867 
868 	return err;
869 }
870 
871 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
872 			       struct mlx4_cmd_mailbox *inbox,
873 			       struct mlx4_cmd_mailbox *outbox)
874 {
875 	int i;
876 	int err;
877 
878 	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
879 		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
880 		if (err)
881 			return err;
882 	}
883 
884 	return 0;
885 }
886 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
887 #define PORT_STATE_OFFSET 32
888 
889 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
890 {
891 	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
892 		return IB_PORT_ACTIVE;
893 	else
894 		return IB_PORT_DOWN;
895 }
896 
897 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
898 				struct mlx4_vhcr *vhcr,
899 				struct mlx4_cmd_mailbox *inbox,
900 				struct mlx4_cmd_mailbox *outbox,
901 				struct mlx4_cmd_info *cmd)
902 {
903 	struct ib_smp *smp = inbox->buf;
904 	u32 index;
905 	u8 port, slave_port;
906 	u8 opcode_modifier;
907 	u16 *table;
908 	int err;
909 	int vidx, pidx;
910 	int network_view;
911 	struct mlx4_priv *priv = mlx4_priv(dev);
912 	struct ib_smp *outsmp = outbox->buf;
913 	__be16 *outtab = (__be16 *)(outsmp->data);
914 	__be32 slave_cap_mask;
915 	__be64 slave_node_guid;
916 
917 	slave_port = vhcr->in_modifier;
918 	port = mlx4_slave_convert_port(dev, slave, slave_port);
919 
920 	/* network-view bit is for driver use only, and should not be passed to FW */
921 	opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
922 	network_view = !!(vhcr->op_modifier & 0x8);
923 
924 	if (smp->base_version == 1 &&
925 	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
926 	    smp->class_version == 1) {
927 		/* host view is paravirtualized */
928 		if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
929 			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
930 				index = be32_to_cpu(smp->attr_mod);
931 				if (port < 1 || port > dev->caps.num_ports)
932 					return -EINVAL;
933 				table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
934 						sizeof(*table) * 32, GFP_KERNEL);
935 
936 				if (!table)
937 					return -ENOMEM;
938 				/* need to get the full pkey table because the paravirtualized
939 				 * pkeys may be scattered among several pkey blocks.
940 				 */
941 				err = get_full_pkey_table(dev, port, table, inbox, outbox);
942 				if (!err) {
943 					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
944 						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
945 						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
946 					}
947 				}
948 				kfree(table);
949 				return err;
950 			}
951 			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
952 				/*get the slave specific caps:*/
953 				/*do the command */
954 				smp->attr_mod = cpu_to_be32(port);
955 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
956 					    port, opcode_modifier,
957 					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
958 				/* modify the response for slaves */
959 				if (!err && slave != mlx4_master_func_num(dev)) {
960 					u8 *state = outsmp->data + PORT_STATE_OFFSET;
961 
962 					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
963 					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
964 					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
965 				}
966 				return err;
967 			}
968 			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
969 				__be64 guid = mlx4_get_admin_guid(dev, slave,
970 								  port);
971 
972 				/* set the PF admin guid to the FW/HW burned
973 				 * GUID, if it wasn't yet set
974 				 */
975 				if (slave == 0 && guid == 0) {
976 					smp->attr_mod = 0;
977 					err = mlx4_cmd_box(dev,
978 							   inbox->dma,
979 							   outbox->dma,
980 							   vhcr->in_modifier,
981 							   opcode_modifier,
982 							   vhcr->op,
983 							   MLX4_CMD_TIME_CLASS_C,
984 							   MLX4_CMD_NATIVE);
985 					if (err)
986 						return err;
987 					mlx4_set_admin_guid(dev,
988 							    *(__be64 *)outsmp->
989 							    data, slave, port);
990 				} else {
991 					memcpy(outsmp->data, &guid, 8);
992 				}
993 
994 				/* clean all other gids */
995 				memset(outsmp->data + 8, 0, 56);
996 				return 0;
997 			}
998 			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
999 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1000 					     port, opcode_modifier,
1001 					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1002 				if (!err) {
1003 					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
1004 					memcpy(outsmp->data + 12, &slave_node_guid, 8);
1005 				}
1006 				return err;
1007 			}
1008 		}
1009 	}
1010 
1011 	/* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1012 	 * These are the MADs used by ib verbs (such as ib_query_gids).
1013 	 */
1014 	if (slave != mlx4_master_func_num(dev) &&
1015 	    !mlx4_vf_smi_enabled(dev, slave, port)) {
1016 		if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1017 		      smp->method == IB_MGMT_METHOD_GET) || network_view) {
1018 			mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1019 				 slave, smp->mgmt_class, smp->method,
1020 				 network_view ? "Network" : "Host",
1021 				 be16_to_cpu(smp->attr_id));
1022 			return -EPERM;
1023 		}
1024 	}
1025 
1026 	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1027 				    vhcr->in_modifier, opcode_modifier,
1028 				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1029 }
1030 
1031 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1032 		     struct mlx4_vhcr *vhcr,
1033 		     struct mlx4_cmd_mailbox *inbox,
1034 		     struct mlx4_cmd_mailbox *outbox,
1035 		     struct mlx4_cmd_info *cmd)
1036 {
1037 	return -EPERM;
1038 }
1039 
1040 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1041 		     struct mlx4_vhcr *vhcr,
1042 		     struct mlx4_cmd_mailbox *inbox,
1043 		     struct mlx4_cmd_mailbox *outbox,
1044 		     struct mlx4_cmd_info *cmd)
1045 {
1046 	u64 in_param;
1047 	u64 out_param;
1048 	int err;
1049 
1050 	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1051 	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1052 	if (cmd->encode_slave_id) {
1053 		in_param &= 0xffffffffffffff00ll;
1054 		in_param |= slave;
1055 	}
1056 
1057 	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1058 			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1059 			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1060 
1061 	if (cmd->out_is_imm)
1062 		vhcr->out_param = out_param;
1063 
1064 	return err;
1065 }
1066 
1067 static struct mlx4_cmd_info cmd_info[] = {
1068 	{
1069 		.opcode = MLX4_CMD_QUERY_FW,
1070 		.has_inbox = false,
1071 		.has_outbox = true,
1072 		.out_is_imm = false,
1073 		.encode_slave_id = false,
1074 		.verify = NULL,
1075 		.wrapper = mlx4_QUERY_FW_wrapper
1076 	},
1077 	{
1078 		.opcode = MLX4_CMD_QUERY_HCA,
1079 		.has_inbox = false,
1080 		.has_outbox = true,
1081 		.out_is_imm = false,
1082 		.encode_slave_id = false,
1083 		.verify = NULL,
1084 		.wrapper = NULL
1085 	},
1086 	{
1087 		.opcode = MLX4_CMD_QUERY_DEV_CAP,
1088 		.has_inbox = false,
1089 		.has_outbox = true,
1090 		.out_is_imm = false,
1091 		.encode_slave_id = false,
1092 		.verify = NULL,
1093 		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
1094 	},
1095 	{
1096 		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
1097 		.has_inbox = false,
1098 		.has_outbox = true,
1099 		.out_is_imm = false,
1100 		.encode_slave_id = false,
1101 		.verify = NULL,
1102 		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1103 	},
1104 	{
1105 		.opcode = MLX4_CMD_QUERY_ADAPTER,
1106 		.has_inbox = false,
1107 		.has_outbox = true,
1108 		.out_is_imm = false,
1109 		.encode_slave_id = false,
1110 		.verify = NULL,
1111 		.wrapper = NULL
1112 	},
1113 	{
1114 		.opcode = MLX4_CMD_INIT_PORT,
1115 		.has_inbox = false,
1116 		.has_outbox = false,
1117 		.out_is_imm = false,
1118 		.encode_slave_id = false,
1119 		.verify = NULL,
1120 		.wrapper = mlx4_INIT_PORT_wrapper
1121 	},
1122 	{
1123 		.opcode = MLX4_CMD_CLOSE_PORT,
1124 		.has_inbox = false,
1125 		.has_outbox = false,
1126 		.out_is_imm  = false,
1127 		.encode_slave_id = false,
1128 		.verify = NULL,
1129 		.wrapper = mlx4_CLOSE_PORT_wrapper
1130 	},
1131 	{
1132 		.opcode = MLX4_CMD_QUERY_PORT,
1133 		.has_inbox = false,
1134 		.has_outbox = true,
1135 		.out_is_imm = false,
1136 		.encode_slave_id = false,
1137 		.verify = NULL,
1138 		.wrapper = mlx4_QUERY_PORT_wrapper
1139 	},
1140 	{
1141 		.opcode = MLX4_CMD_SET_PORT,
1142 		.has_inbox = true,
1143 		.has_outbox = false,
1144 		.out_is_imm = false,
1145 		.encode_slave_id = false,
1146 		.verify = NULL,
1147 		.wrapper = mlx4_SET_PORT_wrapper
1148 	},
1149 	{
1150 		.opcode = MLX4_CMD_MAP_EQ,
1151 		.has_inbox = false,
1152 		.has_outbox = false,
1153 		.out_is_imm = false,
1154 		.encode_slave_id = false,
1155 		.verify = NULL,
1156 		.wrapper = mlx4_MAP_EQ_wrapper
1157 	},
1158 	{
1159 		.opcode = MLX4_CMD_SW2HW_EQ,
1160 		.has_inbox = true,
1161 		.has_outbox = false,
1162 		.out_is_imm = false,
1163 		.encode_slave_id = true,
1164 		.verify = NULL,
1165 		.wrapper = mlx4_SW2HW_EQ_wrapper
1166 	},
1167 	{
1168 		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
1169 		.has_inbox = false,
1170 		.has_outbox = false,
1171 		.out_is_imm = false,
1172 		.encode_slave_id = false,
1173 		.verify = NULL,
1174 		.wrapper = NULL
1175 	},
1176 	{
1177 		.opcode = MLX4_CMD_NOP,
1178 		.has_inbox = false,
1179 		.has_outbox = false,
1180 		.out_is_imm = false,
1181 		.encode_slave_id = false,
1182 		.verify = NULL,
1183 		.wrapper = NULL
1184 	},
1185 	{
1186 		.opcode = MLX4_CMD_CONFIG_DEV,
1187 		.has_inbox = false,
1188 		.has_outbox = true,
1189 		.out_is_imm = false,
1190 		.encode_slave_id = false,
1191 		.verify = NULL,
1192 		.wrapper = mlx4_CONFIG_DEV_wrapper
1193 	},
1194 	{
1195 		.opcode = MLX4_CMD_ALLOC_RES,
1196 		.has_inbox = false,
1197 		.has_outbox = false,
1198 		.out_is_imm = true,
1199 		.encode_slave_id = false,
1200 		.verify = NULL,
1201 		.wrapper = mlx4_ALLOC_RES_wrapper
1202 	},
1203 	{
1204 		.opcode = MLX4_CMD_FREE_RES,
1205 		.has_inbox = false,
1206 		.has_outbox = false,
1207 		.out_is_imm = false,
1208 		.encode_slave_id = false,
1209 		.verify = NULL,
1210 		.wrapper = mlx4_FREE_RES_wrapper
1211 	},
1212 	{
1213 		.opcode = MLX4_CMD_SW2HW_MPT,
1214 		.has_inbox = true,
1215 		.has_outbox = false,
1216 		.out_is_imm = false,
1217 		.encode_slave_id = true,
1218 		.verify = NULL,
1219 		.wrapper = mlx4_SW2HW_MPT_wrapper
1220 	},
1221 	{
1222 		.opcode = MLX4_CMD_QUERY_MPT,
1223 		.has_inbox = false,
1224 		.has_outbox = true,
1225 		.out_is_imm = false,
1226 		.encode_slave_id = false,
1227 		.verify = NULL,
1228 		.wrapper = mlx4_QUERY_MPT_wrapper
1229 	},
1230 	{
1231 		.opcode = MLX4_CMD_HW2SW_MPT,
1232 		.has_inbox = false,
1233 		.has_outbox = false,
1234 		.out_is_imm = false,
1235 		.encode_slave_id = false,
1236 		.verify = NULL,
1237 		.wrapper = mlx4_HW2SW_MPT_wrapper
1238 	},
1239 	{
1240 		.opcode = MLX4_CMD_READ_MTT,
1241 		.has_inbox = false,
1242 		.has_outbox = true,
1243 		.out_is_imm = false,
1244 		.encode_slave_id = false,
1245 		.verify = NULL,
1246 		.wrapper = NULL
1247 	},
1248 	{
1249 		.opcode = MLX4_CMD_WRITE_MTT,
1250 		.has_inbox = true,
1251 		.has_outbox = false,
1252 		.out_is_imm = false,
1253 		.encode_slave_id = false,
1254 		.verify = NULL,
1255 		.wrapper = mlx4_WRITE_MTT_wrapper
1256 	},
1257 	{
1258 		.opcode = MLX4_CMD_SYNC_TPT,
1259 		.has_inbox = true,
1260 		.has_outbox = false,
1261 		.out_is_imm = false,
1262 		.encode_slave_id = false,
1263 		.verify = NULL,
1264 		.wrapper = NULL
1265 	},
1266 	{
1267 		.opcode = MLX4_CMD_HW2SW_EQ,
1268 		.has_inbox = false,
1269 		.has_outbox = false,
1270 		.out_is_imm = false,
1271 		.encode_slave_id = true,
1272 		.verify = NULL,
1273 		.wrapper = mlx4_HW2SW_EQ_wrapper
1274 	},
1275 	{
1276 		.opcode = MLX4_CMD_QUERY_EQ,
1277 		.has_inbox = false,
1278 		.has_outbox = true,
1279 		.out_is_imm = false,
1280 		.encode_slave_id = true,
1281 		.verify = NULL,
1282 		.wrapper = mlx4_QUERY_EQ_wrapper
1283 	},
1284 	{
1285 		.opcode = MLX4_CMD_SW2HW_CQ,
1286 		.has_inbox = true,
1287 		.has_outbox = false,
1288 		.out_is_imm = false,
1289 		.encode_slave_id = true,
1290 		.verify = NULL,
1291 		.wrapper = mlx4_SW2HW_CQ_wrapper
1292 	},
1293 	{
1294 		.opcode = MLX4_CMD_HW2SW_CQ,
1295 		.has_inbox = false,
1296 		.has_outbox = false,
1297 		.out_is_imm = false,
1298 		.encode_slave_id = false,
1299 		.verify = NULL,
1300 		.wrapper = mlx4_HW2SW_CQ_wrapper
1301 	},
1302 	{
1303 		.opcode = MLX4_CMD_QUERY_CQ,
1304 		.has_inbox = false,
1305 		.has_outbox = true,
1306 		.out_is_imm = false,
1307 		.encode_slave_id = false,
1308 		.verify = NULL,
1309 		.wrapper = mlx4_QUERY_CQ_wrapper
1310 	},
1311 	{
1312 		.opcode = MLX4_CMD_MODIFY_CQ,
1313 		.has_inbox = true,
1314 		.has_outbox = false,
1315 		.out_is_imm = true,
1316 		.encode_slave_id = false,
1317 		.verify = NULL,
1318 		.wrapper = mlx4_MODIFY_CQ_wrapper
1319 	},
1320 	{
1321 		.opcode = MLX4_CMD_SW2HW_SRQ,
1322 		.has_inbox = true,
1323 		.has_outbox = false,
1324 		.out_is_imm = false,
1325 		.encode_slave_id = true,
1326 		.verify = NULL,
1327 		.wrapper = mlx4_SW2HW_SRQ_wrapper
1328 	},
1329 	{
1330 		.opcode = MLX4_CMD_HW2SW_SRQ,
1331 		.has_inbox = false,
1332 		.has_outbox = false,
1333 		.out_is_imm = false,
1334 		.encode_slave_id = false,
1335 		.verify = NULL,
1336 		.wrapper = mlx4_HW2SW_SRQ_wrapper
1337 	},
1338 	{
1339 		.opcode = MLX4_CMD_QUERY_SRQ,
1340 		.has_inbox = false,
1341 		.has_outbox = true,
1342 		.out_is_imm = false,
1343 		.encode_slave_id = false,
1344 		.verify = NULL,
1345 		.wrapper = mlx4_QUERY_SRQ_wrapper
1346 	},
1347 	{
1348 		.opcode = MLX4_CMD_ARM_SRQ,
1349 		.has_inbox = false,
1350 		.has_outbox = false,
1351 		.out_is_imm = false,
1352 		.encode_slave_id = false,
1353 		.verify = NULL,
1354 		.wrapper = mlx4_ARM_SRQ_wrapper
1355 	},
1356 	{
1357 		.opcode = MLX4_CMD_RST2INIT_QP,
1358 		.has_inbox = true,
1359 		.has_outbox = false,
1360 		.out_is_imm = false,
1361 		.encode_slave_id = true,
1362 		.verify = NULL,
1363 		.wrapper = mlx4_RST2INIT_QP_wrapper
1364 	},
1365 	{
1366 		.opcode = MLX4_CMD_INIT2INIT_QP,
1367 		.has_inbox = true,
1368 		.has_outbox = false,
1369 		.out_is_imm = false,
1370 		.encode_slave_id = false,
1371 		.verify = NULL,
1372 		.wrapper = mlx4_INIT2INIT_QP_wrapper
1373 	},
1374 	{
1375 		.opcode = MLX4_CMD_INIT2RTR_QP,
1376 		.has_inbox = true,
1377 		.has_outbox = false,
1378 		.out_is_imm = false,
1379 		.encode_slave_id = false,
1380 		.verify = NULL,
1381 		.wrapper = mlx4_INIT2RTR_QP_wrapper
1382 	},
1383 	{
1384 		.opcode = MLX4_CMD_RTR2RTS_QP,
1385 		.has_inbox = true,
1386 		.has_outbox = false,
1387 		.out_is_imm = false,
1388 		.encode_slave_id = false,
1389 		.verify = NULL,
1390 		.wrapper = mlx4_RTR2RTS_QP_wrapper
1391 	},
1392 	{
1393 		.opcode = MLX4_CMD_RTS2RTS_QP,
1394 		.has_inbox = true,
1395 		.has_outbox = false,
1396 		.out_is_imm = false,
1397 		.encode_slave_id = false,
1398 		.verify = NULL,
1399 		.wrapper = mlx4_RTS2RTS_QP_wrapper
1400 	},
1401 	{
1402 		.opcode = MLX4_CMD_SQERR2RTS_QP,
1403 		.has_inbox = true,
1404 		.has_outbox = false,
1405 		.out_is_imm = false,
1406 		.encode_slave_id = false,
1407 		.verify = NULL,
1408 		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1409 	},
1410 	{
1411 		.opcode = MLX4_CMD_2ERR_QP,
1412 		.has_inbox = false,
1413 		.has_outbox = false,
1414 		.out_is_imm = false,
1415 		.encode_slave_id = false,
1416 		.verify = NULL,
1417 		.wrapper = mlx4_GEN_QP_wrapper
1418 	},
1419 	{
1420 		.opcode = MLX4_CMD_RTS2SQD_QP,
1421 		.has_inbox = false,
1422 		.has_outbox = false,
1423 		.out_is_imm = false,
1424 		.encode_slave_id = false,
1425 		.verify = NULL,
1426 		.wrapper = mlx4_GEN_QP_wrapper
1427 	},
1428 	{
1429 		.opcode = MLX4_CMD_SQD2SQD_QP,
1430 		.has_inbox = true,
1431 		.has_outbox = false,
1432 		.out_is_imm = false,
1433 		.encode_slave_id = false,
1434 		.verify = NULL,
1435 		.wrapper = mlx4_SQD2SQD_QP_wrapper
1436 	},
1437 	{
1438 		.opcode = MLX4_CMD_SQD2RTS_QP,
1439 		.has_inbox = true,
1440 		.has_outbox = false,
1441 		.out_is_imm = false,
1442 		.encode_slave_id = false,
1443 		.verify = NULL,
1444 		.wrapper = mlx4_SQD2RTS_QP_wrapper
1445 	},
1446 	{
1447 		.opcode = MLX4_CMD_2RST_QP,
1448 		.has_inbox = false,
1449 		.has_outbox = false,
1450 		.out_is_imm = false,
1451 		.encode_slave_id = false,
1452 		.verify = NULL,
1453 		.wrapper = mlx4_2RST_QP_wrapper
1454 	},
1455 	{
1456 		.opcode = MLX4_CMD_QUERY_QP,
1457 		.has_inbox = false,
1458 		.has_outbox = true,
1459 		.out_is_imm = false,
1460 		.encode_slave_id = false,
1461 		.verify = NULL,
1462 		.wrapper = mlx4_GEN_QP_wrapper
1463 	},
1464 	{
1465 		.opcode = MLX4_CMD_SUSPEND_QP,
1466 		.has_inbox = false,
1467 		.has_outbox = false,
1468 		.out_is_imm = false,
1469 		.encode_slave_id = false,
1470 		.verify = NULL,
1471 		.wrapper = mlx4_GEN_QP_wrapper
1472 	},
1473 	{
1474 		.opcode = MLX4_CMD_UNSUSPEND_QP,
1475 		.has_inbox = false,
1476 		.has_outbox = false,
1477 		.out_is_imm = false,
1478 		.encode_slave_id = false,
1479 		.verify = NULL,
1480 		.wrapper = mlx4_GEN_QP_wrapper
1481 	},
1482 	{
1483 		.opcode = MLX4_CMD_UPDATE_QP,
1484 		.has_inbox = true,
1485 		.has_outbox = false,
1486 		.out_is_imm = false,
1487 		.encode_slave_id = false,
1488 		.verify = NULL,
1489 		.wrapper = mlx4_UPDATE_QP_wrapper
1490 	},
1491 	{
1492 		.opcode = MLX4_CMD_GET_OP_REQ,
1493 		.has_inbox = false,
1494 		.has_outbox = false,
1495 		.out_is_imm = false,
1496 		.encode_slave_id = false,
1497 		.verify = NULL,
1498 		.wrapper = mlx4_CMD_EPERM_wrapper,
1499 	},
1500 	{
1501 		.opcode = MLX4_CMD_ALLOCATE_VPP,
1502 		.has_inbox = false,
1503 		.has_outbox = true,
1504 		.out_is_imm = false,
1505 		.encode_slave_id = false,
1506 		.verify = NULL,
1507 		.wrapper = mlx4_CMD_EPERM_wrapper,
1508 	},
1509 	{
1510 		.opcode = MLX4_CMD_SET_VPORT_QOS,
1511 		.has_inbox = false,
1512 		.has_outbox = true,
1513 		.out_is_imm = false,
1514 		.encode_slave_id = false,
1515 		.verify = NULL,
1516 		.wrapper = mlx4_CMD_EPERM_wrapper,
1517 	},
1518 	{
1519 		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1520 		.has_inbox = false,
1521 		.has_outbox = false,
1522 		.out_is_imm = false,
1523 		.encode_slave_id = false,
1524 		.verify = NULL, /* XXX verify: only demux can do this */
1525 		.wrapper = NULL
1526 	},
1527 	{
1528 		.opcode = MLX4_CMD_MAD_IFC,
1529 		.has_inbox = true,
1530 		.has_outbox = true,
1531 		.out_is_imm = false,
1532 		.encode_slave_id = false,
1533 		.verify = NULL,
1534 		.wrapper = mlx4_MAD_IFC_wrapper
1535 	},
1536 	{
1537 		.opcode = MLX4_CMD_MAD_DEMUX,
1538 		.has_inbox = false,
1539 		.has_outbox = false,
1540 		.out_is_imm = false,
1541 		.encode_slave_id = false,
1542 		.verify = NULL,
1543 		.wrapper = mlx4_CMD_EPERM_wrapper
1544 	},
1545 	{
1546 		.opcode = MLX4_CMD_QUERY_IF_STAT,
1547 		.has_inbox = false,
1548 		.has_outbox = true,
1549 		.out_is_imm = false,
1550 		.encode_slave_id = false,
1551 		.verify = NULL,
1552 		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1553 	},
1554 	{
1555 		.opcode = MLX4_CMD_ACCESS_REG,
1556 		.has_inbox = true,
1557 		.has_outbox = true,
1558 		.out_is_imm = false,
1559 		.encode_slave_id = false,
1560 		.verify = NULL,
1561 		.wrapper = mlx4_ACCESS_REG_wrapper,
1562 	},
1563 	{
1564 		.opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1565 		.has_inbox = false,
1566 		.has_outbox = false,
1567 		.out_is_imm = false,
1568 		.encode_slave_id = false,
1569 		.verify = NULL,
1570 		.wrapper = mlx4_CMD_EPERM_wrapper,
1571 	},
1572 	/* Native multicast commands are not available for guests */
1573 	{
1574 		.opcode = MLX4_CMD_QP_ATTACH,
1575 		.has_inbox = true,
1576 		.has_outbox = false,
1577 		.out_is_imm = false,
1578 		.encode_slave_id = false,
1579 		.verify = NULL,
1580 		.wrapper = mlx4_QP_ATTACH_wrapper
1581 	},
1582 	{
1583 		.opcode = MLX4_CMD_PROMISC,
1584 		.has_inbox = false,
1585 		.has_outbox = false,
1586 		.out_is_imm = false,
1587 		.encode_slave_id = false,
1588 		.verify = NULL,
1589 		.wrapper = mlx4_PROMISC_wrapper
1590 	},
1591 	/* Ethernet specific commands */
1592 	{
1593 		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1594 		.has_inbox = true,
1595 		.has_outbox = false,
1596 		.out_is_imm = false,
1597 		.encode_slave_id = false,
1598 		.verify = NULL,
1599 		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1600 	},
1601 	{
1602 		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1603 		.has_inbox = false,
1604 		.has_outbox = false,
1605 		.out_is_imm = false,
1606 		.encode_slave_id = false,
1607 		.verify = NULL,
1608 		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1609 	},
1610 	{
1611 		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1612 		.has_inbox = false,
1613 		.has_outbox = true,
1614 		.out_is_imm = false,
1615 		.encode_slave_id = false,
1616 		.verify = NULL,
1617 		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1618 	},
1619 	{
1620 		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1621 		.has_inbox = false,
1622 		.has_outbox = false,
1623 		.out_is_imm = false,
1624 		.encode_slave_id = false,
1625 		.verify = NULL,
1626 		.wrapper = NULL
1627 	},
1628 	/* flow steering commands */
1629 	{
1630 		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1631 		.has_inbox = true,
1632 		.has_outbox = false,
1633 		.out_is_imm = true,
1634 		.encode_slave_id = false,
1635 		.verify = NULL,
1636 		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1637 	},
1638 	{
1639 		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1640 		.has_inbox = false,
1641 		.has_outbox = false,
1642 		.out_is_imm = false,
1643 		.encode_slave_id = false,
1644 		.verify = NULL,
1645 		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1646 	},
1647 	{
1648 		.opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1649 		.has_inbox = false,
1650 		.has_outbox = false,
1651 		.out_is_imm = false,
1652 		.encode_slave_id = false,
1653 		.verify = NULL,
1654 		.wrapper = mlx4_CMD_EPERM_wrapper
1655 	},
1656 	{
1657 		.opcode = MLX4_CMD_VIRT_PORT_MAP,
1658 		.has_inbox = false,
1659 		.has_outbox = false,
1660 		.out_is_imm = false,
1661 		.encode_slave_id = false,
1662 		.verify = NULL,
1663 		.wrapper = mlx4_CMD_EPERM_wrapper
1664 	},
1665 };
1666 
1667 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1668 				    struct mlx4_vhcr_cmd *in_vhcr)
1669 {
1670 	struct mlx4_priv *priv = mlx4_priv(dev);
1671 	struct mlx4_cmd_info *cmd = NULL;
1672 	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1673 	struct mlx4_vhcr *vhcr;
1674 	struct mlx4_cmd_mailbox *inbox = NULL;
1675 	struct mlx4_cmd_mailbox *outbox = NULL;
1676 	u64 in_param;
1677 	u64 out_param;
1678 	int ret = 0;
1679 	int i;
1680 	int err = 0;
1681 
1682 	/* Create sw representation of Virtual HCR */
1683 	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1684 	if (!vhcr)
1685 		return -ENOMEM;
1686 
1687 	/* DMA in the vHCR */
1688 	if (!in_vhcr) {
1689 		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1690 				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1691 				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1692 					    MLX4_ACCESS_MEM_ALIGN), 1);
1693 		if (ret) {
1694 			if (!(dev->persist->state &
1695 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1696 				mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1697 					 __func__, ret);
1698 			kfree(vhcr);
1699 			return ret;
1700 		}
1701 	}
1702 
1703 	/* Fill SW VHCR fields */
1704 	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1705 	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1706 	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1707 	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1708 	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1709 	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1710 	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1711 
1712 	/* Lookup command */
1713 	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1714 		if (vhcr->op == cmd_info[i].opcode) {
1715 			cmd = &cmd_info[i];
1716 			break;
1717 		}
1718 	}
1719 	if (!cmd) {
1720 		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1721 			 vhcr->op, slave);
1722 		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1723 		goto out_status;
1724 	}
1725 
1726 	/* Read inbox */
1727 	if (cmd->has_inbox) {
1728 		vhcr->in_param &= INBOX_MASK;
1729 		inbox = mlx4_alloc_cmd_mailbox(dev);
1730 		if (IS_ERR(inbox)) {
1731 			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1732 			inbox = NULL;
1733 			goto out_status;
1734 		}
1735 
1736 		ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1737 				      vhcr->in_param,
1738 				      MLX4_MAILBOX_SIZE, 1);
1739 		if (ret) {
1740 			if (!(dev->persist->state &
1741 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1742 				mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1743 					 __func__, cmd->opcode);
1744 			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1745 			goto out_status;
1746 		}
1747 	}
1748 
1749 	/* Apply permission and bound checks if applicable */
1750 	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1751 		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1752 			  vhcr->op, slave, vhcr->in_modifier);
1753 		vhcr_cmd->status = CMD_STAT_BAD_OP;
1754 		goto out_status;
1755 	}
1756 
1757 	/* Allocate outbox */
1758 	if (cmd->has_outbox) {
1759 		outbox = mlx4_alloc_cmd_mailbox(dev);
1760 		if (IS_ERR(outbox)) {
1761 			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1762 			outbox = NULL;
1763 			goto out_status;
1764 		}
1765 	}
1766 
1767 	/* Execute the command! */
1768 	if (cmd->wrapper) {
1769 		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1770 				   cmd);
1771 		if (cmd->out_is_imm)
1772 			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1773 	} else {
1774 		in_param = cmd->has_inbox ? (u64) inbox->dma :
1775 			vhcr->in_param;
1776 		out_param = cmd->has_outbox ? (u64) outbox->dma :
1777 			vhcr->out_param;
1778 		err = __mlx4_cmd(dev, in_param, &out_param,
1779 				 cmd->out_is_imm, vhcr->in_modifier,
1780 				 vhcr->op_modifier, vhcr->op,
1781 				 MLX4_CMD_TIME_CLASS_A,
1782 				 MLX4_CMD_NATIVE);
1783 
1784 		if (cmd->out_is_imm) {
1785 			vhcr->out_param = out_param;
1786 			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1787 		}
1788 	}
1789 
1790 	if (err) {
1791 		if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1792 			mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1793 				  vhcr->op, slave, vhcr->errno, err);
1794 		vhcr_cmd->status = mlx4_errno_to_status(err);
1795 		goto out_status;
1796 	}
1797 
1798 
1799 	/* Write outbox if command completed successfully */
1800 	if (cmd->has_outbox && !vhcr_cmd->status) {
1801 		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1802 				      vhcr->out_param,
1803 				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1804 		if (ret) {
1805 			/* If we failed to write back the outbox after the
1806 			 *command was successfully executed, we must fail this
1807 			 * slave, as it is now in undefined state */
1808 			if (!(dev->persist->state &
1809 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1810 				mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1811 			goto out;
1812 		}
1813 	}
1814 
1815 out_status:
1816 	/* DMA back vhcr result */
1817 	if (!in_vhcr) {
1818 		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1819 				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1820 				      ALIGN(sizeof(struct mlx4_vhcr),
1821 					    MLX4_ACCESS_MEM_ALIGN),
1822 				      MLX4_CMD_WRAPPED);
1823 		if (ret)
1824 			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1825 				 __func__);
1826 		else if (vhcr->e_bit &&
1827 			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1828 				mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1829 					  slave);
1830 	}
1831 
1832 out:
1833 	kfree(vhcr);
1834 	mlx4_free_cmd_mailbox(dev, inbox);
1835 	mlx4_free_cmd_mailbox(dev, outbox);
1836 	return ret;
1837 }
1838 
1839 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1840 					    int slave, int port)
1841 {
1842 	struct mlx4_vport_oper_state *vp_oper;
1843 	struct mlx4_vport_state *vp_admin;
1844 	struct mlx4_vf_immed_vlan_work *work;
1845 	struct mlx4_dev *dev = &(priv->dev);
1846 	int err;
1847 	int admin_vlan_ix = NO_INDX;
1848 
1849 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1850 	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1851 
1852 	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1853 	    vp_oper->state.default_qos == vp_admin->default_qos &&
1854 	    vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
1855 	    vp_oper->state.link_state == vp_admin->link_state &&
1856 	    vp_oper->state.qos_vport == vp_admin->qos_vport)
1857 		return 0;
1858 
1859 	if (!(priv->mfunc.master.slave_state[slave].active &&
1860 	      dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1861 		/* even if the UPDATE_QP command isn't supported, we still want
1862 		 * to set this VF link according to the admin directive
1863 		 */
1864 		vp_oper->state.link_state = vp_admin->link_state;
1865 		return -1;
1866 	}
1867 
1868 	mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1869 		 slave, port);
1870 	mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1871 		 vp_admin->default_vlan, vp_admin->default_qos,
1872 		 vp_admin->link_state);
1873 
1874 	work = kzalloc(sizeof(*work), GFP_KERNEL);
1875 	if (!work)
1876 		return -ENOMEM;
1877 
1878 	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1879 		if (MLX4_VGT != vp_admin->default_vlan) {
1880 			err = __mlx4_register_vlan(&priv->dev, port,
1881 						   vp_admin->default_vlan,
1882 						   &admin_vlan_ix);
1883 			if (err) {
1884 				kfree(work);
1885 				mlx4_warn(&priv->dev,
1886 					  "No vlan resources slave %d, port %d\n",
1887 					  slave, port);
1888 				return err;
1889 			}
1890 		} else {
1891 			admin_vlan_ix = NO_INDX;
1892 		}
1893 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1894 		mlx4_dbg(&priv->dev,
1895 			 "alloc vlan %d idx  %d slave %d port %d\n",
1896 			 (int)(vp_admin->default_vlan),
1897 			 admin_vlan_ix, slave, port);
1898 	}
1899 
1900 	/* save original vlan ix and vlan id */
1901 	work->orig_vlan_id = vp_oper->state.default_vlan;
1902 	work->orig_vlan_ix = vp_oper->vlan_idx;
1903 
1904 	/* handle new qos */
1905 	if (vp_oper->state.default_qos != vp_admin->default_qos)
1906 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1907 
1908 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1909 		vp_oper->vlan_idx = admin_vlan_ix;
1910 
1911 	vp_oper->state.default_vlan = vp_admin->default_vlan;
1912 	vp_oper->state.default_qos = vp_admin->default_qos;
1913 	vp_oper->state.vlan_proto = vp_admin->vlan_proto;
1914 	vp_oper->state.link_state = vp_admin->link_state;
1915 	vp_oper->state.qos_vport = vp_admin->qos_vport;
1916 
1917 	if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1918 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1919 
1920 	/* iterate over QPs owned by this slave, using UPDATE_QP */
1921 	work->port = port;
1922 	work->slave = slave;
1923 	work->qos = vp_oper->state.default_qos;
1924 	work->qos_vport = vp_oper->state.qos_vport;
1925 	work->vlan_id = vp_oper->state.default_vlan;
1926 	work->vlan_ix = vp_oper->vlan_idx;
1927 	work->vlan_proto = vp_oper->state.vlan_proto;
1928 	work->priv = priv;
1929 	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1930 	queue_work(priv->mfunc.master.comm_wq, &work->work);
1931 
1932 	return 0;
1933 }
1934 
1935 static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1936 {
1937 	struct mlx4_qos_manager *port_qos_ctl;
1938 	struct mlx4_priv *priv = mlx4_priv(dev);
1939 
1940 	port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1941 	bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1942 
1943 	/* Enable only default prio at PF init routine */
1944 	set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1945 }
1946 
1947 static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1948 {
1949 	int i;
1950 	int err;
1951 	int num_vfs;
1952 	u16 availible_vpp;
1953 	u8 vpp_param[MLX4_NUM_UP];
1954 	struct mlx4_qos_manager *port_qos;
1955 	struct mlx4_priv *priv = mlx4_priv(dev);
1956 
1957 	err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1958 	if (err) {
1959 		mlx4_info(dev, "Failed query availible VPPs\n");
1960 		return;
1961 	}
1962 
1963 	port_qos = &priv->mfunc.master.qos_ctl[port];
1964 	num_vfs = (availible_vpp /
1965 		   bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1966 
1967 	for (i = 0; i < MLX4_NUM_UP; i++) {
1968 		if (test_bit(i, port_qos->priority_bm))
1969 			vpp_param[i] = num_vfs;
1970 	}
1971 
1972 	err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1973 	if (err) {
1974 		mlx4_info(dev, "Failed allocating VPPs\n");
1975 		return;
1976 	}
1977 
1978 	/* Query actual allocated VPP, just to make sure */
1979 	err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1980 	if (err) {
1981 		mlx4_info(dev, "Failed query availible VPPs\n");
1982 		return;
1983 	}
1984 
1985 	port_qos->num_of_qos_vfs = num_vfs;
1986 	mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
1987 
1988 	for (i = 0; i < MLX4_NUM_UP; i++)
1989 		mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1990 			 vpp_param[i]);
1991 }
1992 
1993 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1994 {
1995 	int port, err;
1996 	struct mlx4_vport_state *vp_admin;
1997 	struct mlx4_vport_oper_state *vp_oper;
1998 	struct mlx4_slave_state *slave_state =
1999 		&priv->mfunc.master.slave_state[slave];
2000 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2001 			&priv->dev, slave);
2002 	int min_port = find_first_bit(actv_ports.ports,
2003 				      priv->dev.caps.num_ports) + 1;
2004 	int max_port = min_port - 1 +
2005 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2006 
2007 	for (port = min_port; port <= max_port; port++) {
2008 		if (!test_bit(port - 1, actv_ports.ports))
2009 			continue;
2010 		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2011 			priv->mfunc.master.vf_admin[slave].enable_smi[port];
2012 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2013 		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2014 		if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
2015 		    slave_state->vst_qinq_supported) {
2016 			vp_oper->state.vlan_proto   = vp_admin->vlan_proto;
2017 			vp_oper->state.default_vlan = vp_admin->default_vlan;
2018 			vp_oper->state.default_qos  = vp_admin->default_qos;
2019 		}
2020 		vp_oper->state.link_state = vp_admin->link_state;
2021 		vp_oper->state.mac        = vp_admin->mac;
2022 		vp_oper->state.spoofchk   = vp_admin->spoofchk;
2023 		vp_oper->state.tx_rate    = vp_admin->tx_rate;
2024 		vp_oper->state.qos_vport  = vp_admin->qos_vport;
2025 		vp_oper->state.guid       = vp_admin->guid;
2026 
2027 		if (MLX4_VGT != vp_admin->default_vlan) {
2028 			err = __mlx4_register_vlan(&priv->dev, port,
2029 						   vp_admin->default_vlan, &(vp_oper->vlan_idx));
2030 			if (err) {
2031 				vp_oper->vlan_idx = NO_INDX;
2032 				vp_oper->state.default_vlan = MLX4_VGT;
2033 				vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
2034 				mlx4_warn(&priv->dev,
2035 					  "No vlan resources slave %d, port %d\n",
2036 					  slave, port);
2037 				return err;
2038 			}
2039 			mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
2040 				 (int)(vp_oper->state.default_vlan),
2041 				 vp_oper->vlan_idx, slave, port);
2042 		}
2043 		if (vp_admin->spoofchk) {
2044 			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2045 							       port,
2046 							       vp_admin->mac);
2047 			if (0 > vp_oper->mac_idx) {
2048 				err = vp_oper->mac_idx;
2049 				vp_oper->mac_idx = NO_INDX;
2050 				mlx4_warn(&priv->dev,
2051 					  "No mac resources slave %d, port %d\n",
2052 					  slave, port);
2053 				return err;
2054 			}
2055 			mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2056 				 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2057 		}
2058 	}
2059 	return 0;
2060 }
2061 
2062 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2063 {
2064 	int port;
2065 	struct mlx4_vport_oper_state *vp_oper;
2066 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2067 			&priv->dev, slave);
2068 	int min_port = find_first_bit(actv_ports.ports,
2069 				      priv->dev.caps.num_ports) + 1;
2070 	int max_port = min_port - 1 +
2071 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2072 
2073 
2074 	for (port = min_port; port <= max_port; port++) {
2075 		if (!test_bit(port - 1, actv_ports.ports))
2076 			continue;
2077 		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2078 			MLX4_VF_SMI_DISABLED;
2079 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2080 		if (NO_INDX != vp_oper->vlan_idx) {
2081 			__mlx4_unregister_vlan(&priv->dev,
2082 					       port, vp_oper->state.default_vlan);
2083 			vp_oper->vlan_idx = NO_INDX;
2084 		}
2085 		if (NO_INDX != vp_oper->mac_idx) {
2086 			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2087 			vp_oper->mac_idx = NO_INDX;
2088 		}
2089 	}
2090 	return;
2091 }
2092 
2093 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2094 			       u16 param, u8 toggle)
2095 {
2096 	struct mlx4_priv *priv = mlx4_priv(dev);
2097 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2098 	u32 reply;
2099 	u8 is_going_down = 0;
2100 	int i;
2101 	unsigned long flags;
2102 
2103 	slave_state[slave].comm_toggle ^= 1;
2104 	reply = (u32) slave_state[slave].comm_toggle << 31;
2105 	if (toggle != slave_state[slave].comm_toggle) {
2106 		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2107 			  toggle, slave);
2108 		goto reset_slave;
2109 	}
2110 	if (cmd == MLX4_COMM_CMD_RESET) {
2111 		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2112 		slave_state[slave].active = false;
2113 		slave_state[slave].old_vlan_api = false;
2114 		slave_state[slave].vst_qinq_supported = false;
2115 		mlx4_master_deactivate_admin_state(priv, slave);
2116 		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2117 				slave_state[slave].event_eq[i].eqn = -1;
2118 				slave_state[slave].event_eq[i].token = 0;
2119 		}
2120 		/*check if we are in the middle of FLR process,
2121 		if so return "retry" status to the slave*/
2122 		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2123 			goto inform_slave_state;
2124 
2125 		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2126 
2127 		/* write the version in the event field */
2128 		reply |= mlx4_comm_get_version();
2129 
2130 		goto reset_slave;
2131 	}
2132 	/*command from slave in the middle of FLR*/
2133 	if (cmd != MLX4_COMM_CMD_RESET &&
2134 	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2135 		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2136 			  slave, cmd);
2137 		return;
2138 	}
2139 
2140 	switch (cmd) {
2141 	case MLX4_COMM_CMD_VHCR0:
2142 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2143 			goto reset_slave;
2144 		slave_state[slave].vhcr_dma = ((u64) param) << 48;
2145 		priv->mfunc.master.slave_state[slave].cookie = 0;
2146 		break;
2147 	case MLX4_COMM_CMD_VHCR1:
2148 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2149 			goto reset_slave;
2150 		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2151 		break;
2152 	case MLX4_COMM_CMD_VHCR2:
2153 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2154 			goto reset_slave;
2155 		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2156 		break;
2157 	case MLX4_COMM_CMD_VHCR_EN:
2158 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2159 			goto reset_slave;
2160 		slave_state[slave].vhcr_dma |= param;
2161 		if (mlx4_master_activate_admin_state(priv, slave))
2162 				goto reset_slave;
2163 		slave_state[slave].active = true;
2164 		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2165 		break;
2166 	case MLX4_COMM_CMD_VHCR_POST:
2167 		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2168 		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2169 			mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2170 				  slave, cmd, slave_state[slave].last_cmd);
2171 			goto reset_slave;
2172 		}
2173 
2174 		mutex_lock(&priv->cmd.slave_cmd_mutex);
2175 		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2176 			mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2177 				 slave);
2178 			mutex_unlock(&priv->cmd.slave_cmd_mutex);
2179 			goto reset_slave;
2180 		}
2181 		mutex_unlock(&priv->cmd.slave_cmd_mutex);
2182 		break;
2183 	default:
2184 		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2185 		goto reset_slave;
2186 	}
2187 	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2188 	if (!slave_state[slave].is_slave_going_down)
2189 		slave_state[slave].last_cmd = cmd;
2190 	else
2191 		is_going_down = 1;
2192 	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2193 	if (is_going_down) {
2194 		mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2195 			  cmd, slave);
2196 		return;
2197 	}
2198 	__raw_writel((__force u32) cpu_to_be32(reply),
2199 		     &priv->mfunc.comm[slave].slave_read);
2200 	mmiowb();
2201 
2202 	return;
2203 
2204 reset_slave:
2205 	/* cleanup any slave resources */
2206 	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2207 		mlx4_delete_all_resources_for_slave(dev, slave);
2208 
2209 	if (cmd != MLX4_COMM_CMD_RESET) {
2210 		mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2211 			  slave, cmd);
2212 		/* Turn on internal error letting slave reset itself immeditaly,
2213 		 * otherwise it might take till timeout on command is passed
2214 		 */
2215 		reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2216 	}
2217 
2218 	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2219 	if (!slave_state[slave].is_slave_going_down)
2220 		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2221 	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2222 	/*with slave in the middle of flr, no need to clean resources again.*/
2223 inform_slave_state:
2224 	memset(&slave_state[slave].event_eq, 0,
2225 	       sizeof(struct mlx4_slave_event_eq_info));
2226 	__raw_writel((__force u32) cpu_to_be32(reply),
2227 		     &priv->mfunc.comm[slave].slave_read);
2228 	wmb();
2229 }
2230 
2231 /* master command processing */
2232 void mlx4_master_comm_channel(struct work_struct *work)
2233 {
2234 	struct mlx4_mfunc_master_ctx *master =
2235 		container_of(work,
2236 			     struct mlx4_mfunc_master_ctx,
2237 			     comm_work);
2238 	struct mlx4_mfunc *mfunc =
2239 		container_of(master, struct mlx4_mfunc, master);
2240 	struct mlx4_priv *priv =
2241 		container_of(mfunc, struct mlx4_priv, mfunc);
2242 	struct mlx4_dev *dev = &priv->dev;
2243 	__be32 *bit_vec;
2244 	u32 comm_cmd;
2245 	u32 vec;
2246 	int i, j, slave;
2247 	int toggle;
2248 	int served = 0;
2249 	int reported = 0;
2250 	u32 slt;
2251 
2252 	bit_vec = master->comm_arm_bit_vector;
2253 	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2254 		vec = be32_to_cpu(bit_vec[i]);
2255 		for (j = 0; j < 32; j++) {
2256 			if (!(vec & (1 << j)))
2257 				continue;
2258 			++reported;
2259 			slave = (i * 32) + j;
2260 			comm_cmd = swab32(readl(
2261 					  &mfunc->comm[slave].slave_write));
2262 			slt = swab32(readl(&mfunc->comm[slave].slave_read))
2263 				     >> 31;
2264 			toggle = comm_cmd >> 31;
2265 			if (toggle != slt) {
2266 				if (master->slave_state[slave].comm_toggle
2267 				    != slt) {
2268 					pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2269 						slave, slt,
2270 						master->slave_state[slave].comm_toggle);
2271 					master->slave_state[slave].comm_toggle =
2272 						slt;
2273 				}
2274 				mlx4_master_do_cmd(dev, slave,
2275 						   comm_cmd >> 16 & 0xff,
2276 						   comm_cmd & 0xffff, toggle);
2277 				++served;
2278 			}
2279 		}
2280 	}
2281 
2282 	if (reported && reported != served)
2283 		mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2284 			  reported, served);
2285 
2286 	if (mlx4_ARM_COMM_CHANNEL(dev))
2287 		mlx4_warn(dev, "Failed to arm comm channel events\n");
2288 }
2289 
2290 static int sync_toggles(struct mlx4_dev *dev)
2291 {
2292 	struct mlx4_priv *priv = mlx4_priv(dev);
2293 	u32 wr_toggle;
2294 	u32 rd_toggle;
2295 	unsigned long end;
2296 
2297 	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2298 	if (wr_toggle == 0xffffffff)
2299 		end = jiffies + msecs_to_jiffies(30000);
2300 	else
2301 		end = jiffies + msecs_to_jiffies(5000);
2302 
2303 	while (time_before(jiffies, end)) {
2304 		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2305 		if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2306 			/* PCI might be offline */
2307 			msleep(100);
2308 			wr_toggle = swab32(readl(&priv->mfunc.comm->
2309 					   slave_write));
2310 			continue;
2311 		}
2312 
2313 		if (rd_toggle >> 31 == wr_toggle >> 31) {
2314 			priv->cmd.comm_toggle = rd_toggle >> 31;
2315 			return 0;
2316 		}
2317 
2318 		cond_resched();
2319 	}
2320 
2321 	/*
2322 	 * we could reach here if for example the previous VM using this
2323 	 * function misbehaved and left the channel with unsynced state. We
2324 	 * should fix this here and give this VM a chance to use a properly
2325 	 * synced channel
2326 	 */
2327 	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2328 	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2329 	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2330 	priv->cmd.comm_toggle = 0;
2331 
2332 	return 0;
2333 }
2334 
2335 int mlx4_multi_func_init(struct mlx4_dev *dev)
2336 {
2337 	struct mlx4_priv *priv = mlx4_priv(dev);
2338 	struct mlx4_slave_state *s_state;
2339 	int i, j, err, port;
2340 
2341 	if (mlx4_is_master(dev))
2342 		priv->mfunc.comm =
2343 		ioremap(pci_resource_start(dev->persist->pdev,
2344 					   priv->fw.comm_bar) +
2345 			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2346 	else
2347 		priv->mfunc.comm =
2348 		ioremap(pci_resource_start(dev->persist->pdev, 2) +
2349 			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2350 	if (!priv->mfunc.comm) {
2351 		mlx4_err(dev, "Couldn't map communication vector\n");
2352 		goto err_vhcr;
2353 	}
2354 
2355 	if (mlx4_is_master(dev)) {
2356 		struct mlx4_vf_oper_state *vf_oper;
2357 		struct mlx4_vf_admin_state *vf_admin;
2358 
2359 		priv->mfunc.master.slave_state =
2360 			kzalloc(dev->num_slaves *
2361 				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2362 		if (!priv->mfunc.master.slave_state)
2363 			goto err_comm;
2364 
2365 		priv->mfunc.master.vf_admin =
2366 			kzalloc(dev->num_slaves *
2367 				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2368 		if (!priv->mfunc.master.vf_admin)
2369 			goto err_comm_admin;
2370 
2371 		priv->mfunc.master.vf_oper =
2372 			kzalloc(dev->num_slaves *
2373 				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2374 		if (!priv->mfunc.master.vf_oper)
2375 			goto err_comm_oper;
2376 
2377 		for (i = 0; i < dev->num_slaves; ++i) {
2378 			vf_admin = &priv->mfunc.master.vf_admin[i];
2379 			vf_oper = &priv->mfunc.master.vf_oper[i];
2380 			s_state = &priv->mfunc.master.slave_state[i];
2381 			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2382 			s_state->vst_qinq_supported = false;
2383 			mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2384 			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2385 				s_state->event_eq[j].eqn = -1;
2386 			__raw_writel((__force u32) 0,
2387 				     &priv->mfunc.comm[i].slave_write);
2388 			__raw_writel((__force u32) 0,
2389 				     &priv->mfunc.comm[i].slave_read);
2390 			mmiowb();
2391 			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2392 				struct mlx4_vport_state *admin_vport;
2393 				struct mlx4_vport_state *oper_vport;
2394 
2395 				s_state->vlan_filter[port] =
2396 					kzalloc(sizeof(struct mlx4_vlan_fltr),
2397 						GFP_KERNEL);
2398 				if (!s_state->vlan_filter[port]) {
2399 					if (--port)
2400 						kfree(s_state->vlan_filter[port]);
2401 					goto err_slaves;
2402 				}
2403 
2404 				admin_vport = &vf_admin->vport[port];
2405 				oper_vport = &vf_oper->vport[port].state;
2406 				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2407 				admin_vport->default_vlan = MLX4_VGT;
2408 				oper_vport->default_vlan = MLX4_VGT;
2409 				admin_vport->qos_vport =
2410 						MLX4_VPP_DEFAULT_VPORT;
2411 				oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2412 				admin_vport->vlan_proto = htons(ETH_P_8021Q);
2413 				oper_vport->vlan_proto = htons(ETH_P_8021Q);
2414 				vf_oper->vport[port].vlan_idx = NO_INDX;
2415 				vf_oper->vport[port].mac_idx = NO_INDX;
2416 				mlx4_set_random_admin_guid(dev, i, port);
2417 			}
2418 			spin_lock_init(&s_state->lock);
2419 		}
2420 
2421 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2422 			for (port = 1; port <= dev->caps.num_ports; port++) {
2423 				if (mlx4_is_eth(dev, port)) {
2424 					mlx4_set_default_port_qos(dev, port);
2425 					mlx4_allocate_port_vpps(dev, port);
2426 				}
2427 			}
2428 		}
2429 
2430 		memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2431 		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2432 		INIT_WORK(&priv->mfunc.master.comm_work,
2433 			  mlx4_master_comm_channel);
2434 		INIT_WORK(&priv->mfunc.master.slave_event_work,
2435 			  mlx4_gen_slave_eqe);
2436 		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2437 			  mlx4_master_handle_slave_flr);
2438 		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2439 		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2440 		priv->mfunc.master.comm_wq =
2441 			create_singlethread_workqueue("mlx4_comm");
2442 		if (!priv->mfunc.master.comm_wq)
2443 			goto err_slaves;
2444 
2445 		if (mlx4_init_resource_tracker(dev))
2446 			goto err_thread;
2447 
2448 	} else {
2449 		err = sync_toggles(dev);
2450 		if (err) {
2451 			mlx4_err(dev, "Couldn't sync toggles\n");
2452 			goto err_comm;
2453 		}
2454 	}
2455 	return 0;
2456 
2457 err_thread:
2458 	flush_workqueue(priv->mfunc.master.comm_wq);
2459 	destroy_workqueue(priv->mfunc.master.comm_wq);
2460 err_slaves:
2461 	while (i--) {
2462 		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2463 			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2464 	}
2465 	kfree(priv->mfunc.master.vf_oper);
2466 err_comm_oper:
2467 	kfree(priv->mfunc.master.vf_admin);
2468 err_comm_admin:
2469 	kfree(priv->mfunc.master.slave_state);
2470 err_comm:
2471 	iounmap(priv->mfunc.comm);
2472 	priv->mfunc.comm = NULL;
2473 err_vhcr:
2474 	dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2475 			  priv->mfunc.vhcr,
2476 			  priv->mfunc.vhcr_dma);
2477 	priv->mfunc.vhcr = NULL;
2478 	return -ENOMEM;
2479 }
2480 
2481 int mlx4_cmd_init(struct mlx4_dev *dev)
2482 {
2483 	struct mlx4_priv *priv = mlx4_priv(dev);
2484 	int flags = 0;
2485 
2486 	if (!priv->cmd.initialized) {
2487 		init_rwsem(&priv->cmd.switch_sem);
2488 		mutex_init(&priv->cmd.slave_cmd_mutex);
2489 		sema_init(&priv->cmd.poll_sem, 1);
2490 		priv->cmd.use_events = 0;
2491 		priv->cmd.toggle     = 1;
2492 		priv->cmd.initialized = 1;
2493 		flags |= MLX4_CMD_CLEANUP_STRUCT;
2494 	}
2495 
2496 	if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2497 		priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2498 					0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2499 		if (!priv->cmd.hcr) {
2500 			mlx4_err(dev, "Couldn't map command register\n");
2501 			goto err;
2502 		}
2503 		flags |= MLX4_CMD_CLEANUP_HCR;
2504 	}
2505 
2506 	if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2507 		priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2508 						      PAGE_SIZE,
2509 						      &priv->mfunc.vhcr_dma,
2510 						      GFP_KERNEL);
2511 		if (!priv->mfunc.vhcr)
2512 			goto err;
2513 
2514 		flags |= MLX4_CMD_CLEANUP_VHCR;
2515 	}
2516 
2517 	if (!priv->cmd.pool) {
2518 		priv->cmd.pool = pci_pool_create("mlx4_cmd",
2519 						 dev->persist->pdev,
2520 						 MLX4_MAILBOX_SIZE,
2521 						 MLX4_MAILBOX_SIZE, 0);
2522 		if (!priv->cmd.pool)
2523 			goto err;
2524 
2525 		flags |= MLX4_CMD_CLEANUP_POOL;
2526 	}
2527 
2528 	return 0;
2529 
2530 err:
2531 	mlx4_cmd_cleanup(dev, flags);
2532 	return -ENOMEM;
2533 }
2534 
2535 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2536 {
2537 	struct mlx4_priv *priv = mlx4_priv(dev);
2538 	int slave;
2539 	u32 slave_read;
2540 
2541 	/* If the comm channel has not yet been initialized,
2542 	 * skip reporting the internal error event to all
2543 	 * the communication channels.
2544 	 */
2545 	if (!priv->mfunc.comm)
2546 		return;
2547 
2548 	/* Report an internal error event to all
2549 	 * communication channels.
2550 	 */
2551 	for (slave = 0; slave < dev->num_slaves; slave++) {
2552 		slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2553 		slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2554 		__raw_writel((__force u32)cpu_to_be32(slave_read),
2555 			     &priv->mfunc.comm[slave].slave_read);
2556 		/* Make sure that our comm channel write doesn't
2557 		 * get mixed in with writes from another CPU.
2558 		 */
2559 		mmiowb();
2560 	}
2561 }
2562 
2563 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2564 {
2565 	struct mlx4_priv *priv = mlx4_priv(dev);
2566 	int i, port;
2567 
2568 	if (mlx4_is_master(dev)) {
2569 		flush_workqueue(priv->mfunc.master.comm_wq);
2570 		destroy_workqueue(priv->mfunc.master.comm_wq);
2571 		for (i = 0; i < dev->num_slaves; i++) {
2572 			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2573 				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2574 		}
2575 		kfree(priv->mfunc.master.slave_state);
2576 		kfree(priv->mfunc.master.vf_admin);
2577 		kfree(priv->mfunc.master.vf_oper);
2578 		dev->num_slaves = 0;
2579 	}
2580 
2581 	iounmap(priv->mfunc.comm);
2582 	priv->mfunc.comm = NULL;
2583 }
2584 
2585 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2586 {
2587 	struct mlx4_priv *priv = mlx4_priv(dev);
2588 
2589 	if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2590 		pci_pool_destroy(priv->cmd.pool);
2591 		priv->cmd.pool = NULL;
2592 	}
2593 
2594 	if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2595 	    (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2596 		iounmap(priv->cmd.hcr);
2597 		priv->cmd.hcr = NULL;
2598 	}
2599 	if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2600 	    (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2601 		dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2602 				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2603 		priv->mfunc.vhcr = NULL;
2604 	}
2605 	if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2606 		priv->cmd.initialized = 0;
2607 }
2608 
2609 /*
2610  * Switch to using events to issue FW commands (can only be called
2611  * after event queue for command events has been initialized).
2612  */
2613 int mlx4_cmd_use_events(struct mlx4_dev *dev)
2614 {
2615 	struct mlx4_priv *priv = mlx4_priv(dev);
2616 	int i;
2617 	int err = 0;
2618 
2619 	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2620 				   sizeof (struct mlx4_cmd_context),
2621 				   GFP_KERNEL);
2622 	if (!priv->cmd.context)
2623 		return -ENOMEM;
2624 
2625 	down_write(&priv->cmd.switch_sem);
2626 	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2627 		priv->cmd.context[i].token = i;
2628 		priv->cmd.context[i].next  = i + 1;
2629 		/* To support fatal error flow, initialize all
2630 		 * cmd contexts to allow simulating completions
2631 		 * with complete() at any time.
2632 		 */
2633 		init_completion(&priv->cmd.context[i].done);
2634 	}
2635 
2636 	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2637 	priv->cmd.free_head = 0;
2638 
2639 	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2640 
2641 	for (priv->cmd.token_mask = 1;
2642 	     priv->cmd.token_mask < priv->cmd.max_cmds;
2643 	     priv->cmd.token_mask <<= 1)
2644 		; /* nothing */
2645 	--priv->cmd.token_mask;
2646 
2647 	down(&priv->cmd.poll_sem);
2648 	priv->cmd.use_events = 1;
2649 	up_write(&priv->cmd.switch_sem);
2650 
2651 	return err;
2652 }
2653 
2654 /*
2655  * Switch back to polling (used when shutting down the device)
2656  */
2657 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2658 {
2659 	struct mlx4_priv *priv = mlx4_priv(dev);
2660 	int i;
2661 
2662 	down_write(&priv->cmd.switch_sem);
2663 	priv->cmd.use_events = 0;
2664 
2665 	for (i = 0; i < priv->cmd.max_cmds; ++i)
2666 		down(&priv->cmd.event_sem);
2667 
2668 	kfree(priv->cmd.context);
2669 
2670 	up(&priv->cmd.poll_sem);
2671 	up_write(&priv->cmd.switch_sem);
2672 }
2673 
2674 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2675 {
2676 	struct mlx4_cmd_mailbox *mailbox;
2677 
2678 	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2679 	if (!mailbox)
2680 		return ERR_PTR(-ENOMEM);
2681 
2682 	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2683 				      &mailbox->dma);
2684 	if (!mailbox->buf) {
2685 		kfree(mailbox);
2686 		return ERR_PTR(-ENOMEM);
2687 	}
2688 
2689 	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2690 
2691 	return mailbox;
2692 }
2693 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2694 
2695 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2696 			   struct mlx4_cmd_mailbox *mailbox)
2697 {
2698 	if (!mailbox)
2699 		return;
2700 
2701 	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2702 	kfree(mailbox);
2703 }
2704 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2705 
2706 u32 mlx4_comm_get_version(void)
2707 {
2708 	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2709 }
2710 
2711 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2712 {
2713 	if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2714 		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2715 			 vf, dev->persist->num_vfs);
2716 		return -EINVAL;
2717 	}
2718 
2719 	return vf+1;
2720 }
2721 
2722 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2723 {
2724 	if (slave < 1 || slave > dev->persist->num_vfs) {
2725 		mlx4_err(dev,
2726 			 "Bad slave number:%d (number of activated slaves: %lu)\n",
2727 			 slave, dev->num_slaves);
2728 		return -EINVAL;
2729 	}
2730 	return slave - 1;
2731 }
2732 
2733 void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2734 {
2735 	struct mlx4_priv *priv = mlx4_priv(dev);
2736 	struct mlx4_cmd_context *context;
2737 	int i;
2738 
2739 	spin_lock(&priv->cmd.context_lock);
2740 	if (priv->cmd.context) {
2741 		for (i = 0; i < priv->cmd.max_cmds; ++i) {
2742 			context = &priv->cmd.context[i];
2743 			context->fw_status = CMD_STAT_INTERNAL_ERR;
2744 			context->result    =
2745 				mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2746 			complete(&context->done);
2747 		}
2748 	}
2749 	spin_unlock(&priv->cmd.context_lock);
2750 }
2751 
2752 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2753 {
2754 	struct mlx4_active_ports actv_ports;
2755 	int vf;
2756 
2757 	bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2758 
2759 	if (slave == 0) {
2760 		bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2761 		return actv_ports;
2762 	}
2763 
2764 	vf = mlx4_get_vf_indx(dev, slave);
2765 	if (vf < 0)
2766 		return actv_ports;
2767 
2768 	bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2769 		   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2770 		   dev->caps.num_ports));
2771 
2772 	return actv_ports;
2773 }
2774 EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2775 
2776 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2777 {
2778 	unsigned n;
2779 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2780 	unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2781 
2782 	if (port <= 0 || port > m)
2783 		return -EINVAL;
2784 
2785 	n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2786 	if (port <= n)
2787 		port = n + 1;
2788 
2789 	return port;
2790 }
2791 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2792 
2793 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2794 {
2795 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2796 	if (test_bit(port - 1, actv_ports.ports))
2797 		return port -
2798 			find_first_bit(actv_ports.ports, dev->caps.num_ports);
2799 
2800 	return -1;
2801 }
2802 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2803 
2804 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2805 						   int port)
2806 {
2807 	unsigned i;
2808 	struct mlx4_slaves_pport slaves_pport;
2809 
2810 	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2811 
2812 	if (port <= 0 || port > dev->caps.num_ports)
2813 		return slaves_pport;
2814 
2815 	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2816 		struct mlx4_active_ports actv_ports =
2817 			mlx4_get_active_ports(dev, i);
2818 		if (test_bit(port - 1, actv_ports.ports))
2819 			set_bit(i, slaves_pport.slaves);
2820 	}
2821 
2822 	return slaves_pport;
2823 }
2824 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2825 
2826 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2827 		struct mlx4_dev *dev,
2828 		const struct mlx4_active_ports *crit_ports)
2829 {
2830 	unsigned i;
2831 	struct mlx4_slaves_pport slaves_pport;
2832 
2833 	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2834 
2835 	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2836 		struct mlx4_active_ports actv_ports =
2837 			mlx4_get_active_ports(dev, i);
2838 		if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2839 				 dev->caps.num_ports))
2840 			set_bit(i, slaves_pport.slaves);
2841 	}
2842 
2843 	return slaves_pport;
2844 }
2845 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2846 
2847 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2848 {
2849 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2850 	int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2851 			+ 1;
2852 	int max_port = min_port +
2853 		bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2854 
2855 	if (port < min_port)
2856 		port = min_port;
2857 	else if (port >= max_port)
2858 		port = max_port - 1;
2859 
2860 	return port;
2861 }
2862 
2863 static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2864 			      int max_tx_rate)
2865 {
2866 	int i;
2867 	int err;
2868 	struct mlx4_qos_manager *port_qos;
2869 	struct mlx4_dev *dev = &priv->dev;
2870 	struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2871 
2872 	port_qos = &priv->mfunc.master.qos_ctl[port];
2873 	memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2874 
2875 	if (slave > port_qos->num_of_qos_vfs) {
2876 		mlx4_info(dev, "No availible VPP resources for this VF\n");
2877 		return -EINVAL;
2878 	}
2879 
2880 	/* Query for default QoS values from Vport 0 is needed */
2881 	err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2882 	if (err) {
2883 		mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2884 		return err;
2885 	}
2886 
2887 	for (i = 0; i < MLX4_NUM_UP; i++) {
2888 		if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2889 			vpp_qos[i].max_avg_bw = max_tx_rate;
2890 			vpp_qos[i].enable = 1;
2891 		} else {
2892 			/* if user supplied tx_rate == 0, meaning no rate limit
2893 			 * configuration is required. so we are leaving the
2894 			 * value of max_avg_bw as queried from Vport 0.
2895 			 */
2896 			vpp_qos[i].enable = 0;
2897 		}
2898 	}
2899 
2900 	err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2901 	if (err) {
2902 		mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2903 		return err;
2904 	}
2905 
2906 	return 0;
2907 }
2908 
2909 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2910 					struct mlx4_vport_state *vf_admin)
2911 {
2912 	struct mlx4_qos_manager *info;
2913 	struct mlx4_priv *priv = mlx4_priv(dev);
2914 
2915 	if (!mlx4_is_master(dev) ||
2916 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2917 		return false;
2918 
2919 	info = &priv->mfunc.master.qos_ctl[port];
2920 
2921 	if (vf_admin->default_vlan != MLX4_VGT &&
2922 	    test_bit(vf_admin->default_qos, info->priority_bm))
2923 		return true;
2924 
2925 	return false;
2926 }
2927 
2928 static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2929 				       struct mlx4_vport_state *vf_admin,
2930 				       int vlan, int qos)
2931 {
2932 	struct mlx4_vport_state dummy_admin = {0};
2933 
2934 	if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2935 	    !vf_admin->tx_rate)
2936 		return true;
2937 
2938 	dummy_admin.default_qos = qos;
2939 	dummy_admin.default_vlan = vlan;
2940 
2941 	/* VF wants to move to other VST state which is valid with current
2942 	 * rate limit. Either differnt default vlan in VST or other
2943 	 * supported QoS priority. Otherwise we don't allow this change when
2944 	 * the TX rate is still configured.
2945 	 */
2946 	if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2947 		return true;
2948 
2949 	mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2950 		  (vlan == MLX4_VGT) ? "VGT" : "VST");
2951 
2952 	if (vlan != MLX4_VGT)
2953 		mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2954 
2955 	mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2956 
2957 	return false;
2958 }
2959 
2960 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2961 {
2962 	struct mlx4_priv *priv = mlx4_priv(dev);
2963 	struct mlx4_vport_state *s_info;
2964 	int slave;
2965 
2966 	if (!mlx4_is_master(dev))
2967 		return -EPROTONOSUPPORT;
2968 
2969 	slave = mlx4_get_slave_indx(dev, vf);
2970 	if (slave < 0)
2971 		return -EINVAL;
2972 
2973 	port = mlx4_slaves_closest_port(dev, slave, port);
2974 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2975 	s_info->mac = mac;
2976 	mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
2977 		  vf, port, s_info->mac);
2978 	return 0;
2979 }
2980 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2981 
2982 
2983 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
2984 		     __be16 proto)
2985 {
2986 	struct mlx4_priv *priv = mlx4_priv(dev);
2987 	struct mlx4_vport_state *vf_admin;
2988 	struct mlx4_slave_state *slave_state;
2989 	struct mlx4_vport_oper_state *vf_oper;
2990 	int slave;
2991 
2992 	if ((!mlx4_is_master(dev)) ||
2993 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2994 		return -EPROTONOSUPPORT;
2995 
2996 	if ((vlan > 4095) || (qos > 7))
2997 		return -EINVAL;
2998 
2999 	if (proto == htons(ETH_P_8021AD) &&
3000 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
3001 		return -EPROTONOSUPPORT;
3002 
3003 	if (proto != htons(ETH_P_8021Q) &&
3004 	    proto != htons(ETH_P_8021AD))
3005 		return -EINVAL;
3006 
3007 	if ((proto == htons(ETH_P_8021AD)) &&
3008 	    ((vlan == 0) || (vlan == MLX4_VGT)))
3009 		return -EINVAL;
3010 
3011 	slave = mlx4_get_slave_indx(dev, vf);
3012 	if (slave < 0)
3013 		return -EINVAL;
3014 
3015 	slave_state = &priv->mfunc.master.slave_state[slave];
3016 	if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
3017 	    (!slave_state->vst_qinq_supported)) {
3018 		mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
3019 		return -EPROTONOSUPPORT;
3020 	}
3021 	port = mlx4_slaves_closest_port(dev, slave, port);
3022 	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3023 	vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3024 
3025 	if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
3026 		return -EPERM;
3027 
3028 	if ((0 == vlan) && (0 == qos))
3029 		vf_admin->default_vlan = MLX4_VGT;
3030 	else
3031 		vf_admin->default_vlan = vlan;
3032 	vf_admin->default_qos = qos;
3033 	vf_admin->vlan_proto = proto;
3034 
3035 	/* If rate was configured prior to VST, we saved the configured rate
3036 	 * in vf_admin->rate and now, if priority supported we enforce the QoS
3037 	 */
3038 	if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
3039 	    vf_admin->tx_rate)
3040 		vf_admin->qos_vport = slave;
3041 
3042 	/* Try to activate new vf state without restart,
3043 	 * this option is not supported while moving to VST QinQ mode.
3044 	 */
3045 	if ((proto == htons(ETH_P_8021AD) &&
3046 	     vf_oper->state.vlan_proto != proto) ||
3047 	    mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3048 		mlx4_info(dev,
3049 			  "updating vf %d port %d config will take effect on next VF restart\n",
3050 			  vf, port);
3051 	return 0;
3052 }
3053 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
3054 
3055 int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
3056 		     int max_tx_rate)
3057 {
3058 	int err;
3059 	int slave;
3060 	struct mlx4_vport_state *vf_admin;
3061 	struct mlx4_priv *priv = mlx4_priv(dev);
3062 
3063 	if (!mlx4_is_master(dev) ||
3064 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
3065 		return -EPROTONOSUPPORT;
3066 
3067 	if (min_tx_rate) {
3068 		mlx4_info(dev, "Minimum BW share not supported\n");
3069 		return -EPROTONOSUPPORT;
3070 	}
3071 
3072 	slave = mlx4_get_slave_indx(dev, vf);
3073 	if (slave < 0)
3074 		return -EINVAL;
3075 
3076 	port = mlx4_slaves_closest_port(dev, slave, port);
3077 	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3078 
3079 	err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
3080 	if (err) {
3081 		mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
3082 			  max_tx_rate);
3083 		return err;
3084 	}
3085 
3086 	vf_admin->tx_rate = max_tx_rate;
3087 	/* if VF is not in supported mode (VST with supported prio),
3088 	 * we do not change vport configuration for its QPs, but save
3089 	 * the rate, so it will be enforced when it moves to supported
3090 	 * mode next time.
3091 	 */
3092 	if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3093 		mlx4_info(dev,
3094 			  "rate set for VF %d when not in valid state\n", vf);
3095 
3096 		if (vf_admin->default_vlan != MLX4_VGT)
3097 			mlx4_info(dev, "VST priority not supported by QoS\n");
3098 		else
3099 			mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3100 
3101 		mlx4_info(dev,
3102 			  "rate %d take affect when VF moves to valid state\n",
3103 			  max_tx_rate);
3104 		return 0;
3105 	}
3106 
3107 	/* If user sets rate 0 assigning default vport for its QPs */
3108 	vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3109 
3110 	if (priv->mfunc.master.slave_state[slave].active &&
3111 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3112 		mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3113 
3114 	return 0;
3115 }
3116 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3117 
3118  /* mlx4_get_slave_default_vlan -
3119  * return true if VST ( default vlan)
3120  * if VST, will return vlan & qos (if not NULL)
3121  */
3122 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3123 				 u16 *vlan, u8 *qos)
3124 {
3125 	struct mlx4_vport_oper_state *vp_oper;
3126 	struct mlx4_priv *priv;
3127 
3128 	priv = mlx4_priv(dev);
3129 	port = mlx4_slaves_closest_port(dev, slave, port);
3130 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3131 
3132 	if (MLX4_VGT != vp_oper->state.default_vlan) {
3133 		if (vlan)
3134 			*vlan = vp_oper->state.default_vlan;
3135 		if (qos)
3136 			*qos = vp_oper->state.default_qos;
3137 		return true;
3138 	}
3139 	return false;
3140 }
3141 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3142 
3143 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3144 {
3145 	struct mlx4_priv *priv = mlx4_priv(dev);
3146 	struct mlx4_vport_state *s_info;
3147 	int slave;
3148 
3149 	if ((!mlx4_is_master(dev)) ||
3150 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3151 		return -EPROTONOSUPPORT;
3152 
3153 	slave = mlx4_get_slave_indx(dev, vf);
3154 	if (slave < 0)
3155 		return -EINVAL;
3156 
3157 	port = mlx4_slaves_closest_port(dev, slave, port);
3158 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3159 	s_info->spoofchk = setting;
3160 
3161 	return 0;
3162 }
3163 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3164 
3165 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
3166 {
3167 	struct mlx4_priv *priv = mlx4_priv(dev);
3168 	struct mlx4_vport_state *s_info;
3169 	int slave;
3170 
3171 	if (!mlx4_is_master(dev))
3172 		return -EPROTONOSUPPORT;
3173 
3174 	slave = mlx4_get_slave_indx(dev, vf);
3175 	if (slave < 0)
3176 		return -EINVAL;
3177 
3178 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3179 	ivf->vf = vf;
3180 
3181 	/* need to convert it to a func */
3182 	ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
3183 	ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
3184 	ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
3185 	ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
3186 	ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
3187 	ivf->mac[5] = ((s_info->mac)  & 0xff);
3188 
3189 	ivf->vlan		= s_info->default_vlan;
3190 	ivf->qos		= s_info->default_qos;
3191 	ivf->vlan_proto		= s_info->vlan_proto;
3192 
3193 	if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3194 		ivf->max_tx_rate = s_info->tx_rate;
3195 	else
3196 		ivf->max_tx_rate = 0;
3197 
3198 	ivf->min_tx_rate	= 0;
3199 	ivf->spoofchk		= s_info->spoofchk;
3200 	ivf->linkstate		= s_info->link_state;
3201 
3202 	return 0;
3203 }
3204 EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
3205 
3206 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
3207 {
3208 	struct mlx4_priv *priv = mlx4_priv(dev);
3209 	struct mlx4_vport_state *s_info;
3210 	int slave;
3211 	u8 link_stat_event;
3212 
3213 	slave = mlx4_get_slave_indx(dev, vf);
3214 	if (slave < 0)
3215 		return -EINVAL;
3216 
3217 	port = mlx4_slaves_closest_port(dev, slave, port);
3218 	switch (link_state) {
3219 	case IFLA_VF_LINK_STATE_AUTO:
3220 		/* get current link state */
3221 		if (!priv->sense.do_sense_port[port])
3222 			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3223 		else
3224 			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3225 	    break;
3226 
3227 	case IFLA_VF_LINK_STATE_ENABLE:
3228 		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3229 	    break;
3230 
3231 	case IFLA_VF_LINK_STATE_DISABLE:
3232 		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3233 	    break;
3234 
3235 	default:
3236 		mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
3237 			  link_state, slave, port);
3238 		return -EINVAL;
3239 	};
3240 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3241 	s_info->link_state = link_state;
3242 
3243 	/* send event */
3244 	mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
3245 
3246 	if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3247 		mlx4_dbg(dev,
3248 			 "updating vf %d port %d no link state HW enforcment\n",
3249 			 vf, port);
3250 	return 0;
3251 }
3252 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
3253 
3254 int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3255 			   struct mlx4_counter *counter_stats, int reset)
3256 {
3257 	struct mlx4_cmd_mailbox *mailbox = NULL;
3258 	struct mlx4_counter *tmp_counter;
3259 	int err;
3260 	u32 if_stat_in_mod;
3261 
3262 	if (!counter_stats)
3263 		return -EINVAL;
3264 
3265 	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3266 		return 0;
3267 
3268 	mailbox = mlx4_alloc_cmd_mailbox(dev);
3269 	if (IS_ERR(mailbox))
3270 		return PTR_ERR(mailbox);
3271 
3272 	memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3273 	if_stat_in_mod = counter_index;
3274 	if (reset)
3275 		if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3276 	err = mlx4_cmd_box(dev, 0, mailbox->dma,
3277 			   if_stat_in_mod, 0,
3278 			   MLX4_CMD_QUERY_IF_STAT,
3279 			   MLX4_CMD_TIME_CLASS_C,
3280 			   MLX4_CMD_NATIVE);
3281 	if (err) {
3282 		mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3283 			 __func__, counter_index);
3284 		goto if_stat_out;
3285 	}
3286 	tmp_counter = (struct mlx4_counter *)mailbox->buf;
3287 	counter_stats->counter_mode = tmp_counter->counter_mode;
3288 	if (counter_stats->counter_mode == 0) {
3289 		counter_stats->rx_frames =
3290 			cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3291 				    be64_to_cpu(tmp_counter->rx_frames));
3292 		counter_stats->tx_frames =
3293 			cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3294 				    be64_to_cpu(tmp_counter->tx_frames));
3295 		counter_stats->rx_bytes =
3296 			cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3297 				    be64_to_cpu(tmp_counter->rx_bytes));
3298 		counter_stats->tx_bytes =
3299 			cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3300 				    be64_to_cpu(tmp_counter->tx_bytes));
3301 	}
3302 
3303 if_stat_out:
3304 	mlx4_free_cmd_mailbox(dev, mailbox);
3305 
3306 	return err;
3307 }
3308 EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3309 
3310 int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
3311 		      struct ifla_vf_stats *vf_stats)
3312 {
3313 	struct mlx4_counter tmp_vf_stats;
3314 	int slave;
3315 	int err = 0;
3316 
3317 	if (!vf_stats)
3318 		return -EINVAL;
3319 
3320 	if (!mlx4_is_master(dev))
3321 		return -EPROTONOSUPPORT;
3322 
3323 	slave = mlx4_get_slave_indx(dev, vf_idx);
3324 	if (slave < 0)
3325 		return -EINVAL;
3326 
3327 	port = mlx4_slaves_closest_port(dev, slave, port);
3328 	err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
3329 	if (!err && tmp_vf_stats.counter_mode == 0) {
3330 		vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
3331 		vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
3332 		vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
3333 		vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
3334 	}
3335 
3336 	return err;
3337 }
3338 EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
3339 
3340 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3341 {
3342 	struct mlx4_priv *priv = mlx4_priv(dev);
3343 
3344 	if (slave < 1 || slave >= dev->num_slaves ||
3345 	    port < 1 || port > MLX4_MAX_PORTS)
3346 		return 0;
3347 
3348 	return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3349 		MLX4_VF_SMI_ENABLED;
3350 }
3351 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3352 
3353 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3354 {
3355 	struct mlx4_priv *priv = mlx4_priv(dev);
3356 
3357 	if (slave == mlx4_master_func_num(dev))
3358 		return 1;
3359 
3360 	if (slave < 1 || slave >= dev->num_slaves ||
3361 	    port < 1 || port > MLX4_MAX_PORTS)
3362 		return 0;
3363 
3364 	return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3365 		MLX4_VF_SMI_ENABLED;
3366 }
3367 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3368 
3369 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3370 				 int enabled)
3371 {
3372 	struct mlx4_priv *priv = mlx4_priv(dev);
3373 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3374 			&priv->dev, slave);
3375 	int min_port = find_first_bit(actv_ports.ports,
3376 				      priv->dev.caps.num_ports) + 1;
3377 	int max_port = min_port - 1 +
3378 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3379 
3380 	if (slave == mlx4_master_func_num(dev))
3381 		return 0;
3382 
3383 	if (slave < 1 || slave >= dev->num_slaves ||
3384 	    port < 1 || port > MLX4_MAX_PORTS ||
3385 	    enabled < 0 || enabled > 1)
3386 		return -EINVAL;
3387 
3388 	if (min_port == max_port && dev->caps.num_ports > 1) {
3389 		mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3390 		return -EPROTONOSUPPORT;
3391 	}
3392 
3393 	priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3394 	return 0;
3395 }
3396 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3397