1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
40 
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
46 
47 #include <asm/io.h>
48 
49 #include "mlx4.h"
50 #include "fw.h"
51 
52 #define CMD_POLL_TOKEN 0xffff
53 #define INBOX_MASK	0xffffffffffffff00ULL
54 
55 #define CMD_CHAN_VER 1
56 #define CMD_CHAN_IF_REV 1
57 
58 enum {
59 	/* command completed successfully: */
60 	CMD_STAT_OK		= 0x00,
61 	/* Internal error (such as a bus error) occurred while processing command: */
62 	CMD_STAT_INTERNAL_ERR	= 0x01,
63 	/* Operation/command not supported or opcode modifier not supported: */
64 	CMD_STAT_BAD_OP		= 0x02,
65 	/* Parameter not supported or parameter out of range: */
66 	CMD_STAT_BAD_PARAM	= 0x03,
67 	/* System not enabled or bad system state: */
68 	CMD_STAT_BAD_SYS_STATE	= 0x04,
69 	/* Attempt to access reserved or unallocaterd resource: */
70 	CMD_STAT_BAD_RESOURCE	= 0x05,
71 	/* Requested resource is currently executing a command, or is otherwise busy: */
72 	CMD_STAT_RESOURCE_BUSY	= 0x06,
73 	/* Required capability exceeds device limits: */
74 	CMD_STAT_EXCEED_LIM	= 0x08,
75 	/* Resource is not in the appropriate state or ownership: */
76 	CMD_STAT_BAD_RES_STATE	= 0x09,
77 	/* Index out of range: */
78 	CMD_STAT_BAD_INDEX	= 0x0a,
79 	/* FW image corrupted: */
80 	CMD_STAT_BAD_NVMEM	= 0x0b,
81 	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
82 	CMD_STAT_ICM_ERROR	= 0x0c,
83 	/* Attempt to modify a QP/EE which is not in the presumed state: */
84 	CMD_STAT_BAD_QP_STATE   = 0x10,
85 	/* Bad segment parameters (Address/Size): */
86 	CMD_STAT_BAD_SEG_PARAM	= 0x20,
87 	/* Memory Region has Memory Windows bound to: */
88 	CMD_STAT_REG_BOUND	= 0x21,
89 	/* HCA local attached memory not present: */
90 	CMD_STAT_LAM_NOT_PRE	= 0x22,
91 	/* Bad management packet (silently discarded): */
92 	CMD_STAT_BAD_PKT	= 0x30,
93 	/* More outstanding CQEs in CQ than new CQ size: */
94 	CMD_STAT_BAD_SIZE	= 0x40,
95 	/* Multi Function device support required: */
96 	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
97 };
98 
99 enum {
100 	HCR_IN_PARAM_OFFSET	= 0x00,
101 	HCR_IN_MODIFIER_OFFSET	= 0x08,
102 	HCR_OUT_PARAM_OFFSET	= 0x0c,
103 	HCR_TOKEN_OFFSET	= 0x14,
104 	HCR_STATUS_OFFSET	= 0x18,
105 
106 	HCR_OPMOD_SHIFT		= 12,
107 	HCR_T_BIT		= 21,
108 	HCR_E_BIT		= 22,
109 	HCR_GO_BIT		= 23
110 };
111 
112 enum {
113 	GO_BIT_TIMEOUT_MSECS	= 10000
114 };
115 
116 enum mlx4_vlan_transition {
117 	MLX4_VLAN_TRANSITION_VST_VST = 0,
118 	MLX4_VLAN_TRANSITION_VST_VGT = 1,
119 	MLX4_VLAN_TRANSITION_VGT_VST = 2,
120 	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
121 };
122 
123 
124 struct mlx4_cmd_context {
125 	struct completion	done;
126 	int			result;
127 	int			next;
128 	u64			out_param;
129 	u16			token;
130 	u8			fw_status;
131 };
132 
133 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
134 				    struct mlx4_vhcr_cmd *in_vhcr);
135 
136 static int mlx4_status_to_errno(u8 status)
137 {
138 	static const int trans_table[] = {
139 		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
140 		[CMD_STAT_BAD_OP]	  = -EPERM,
141 		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
142 		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
143 		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
144 		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
145 		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
146 		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
147 		[CMD_STAT_BAD_INDEX]	  = -EBADF,
148 		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
149 		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
150 		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
151 		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
152 		[CMD_STAT_REG_BOUND]	  = -EBUSY,
153 		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
154 		[CMD_STAT_BAD_PKT]	  = -EINVAL,
155 		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
156 		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
157 	};
158 
159 	if (status >= ARRAY_SIZE(trans_table) ||
160 	    (status != CMD_STAT_OK && trans_table[status] == 0))
161 		return -EIO;
162 
163 	return trans_table[status];
164 }
165 
166 static u8 mlx4_errno_to_status(int errno)
167 {
168 	switch (errno) {
169 	case -EPERM:
170 		return CMD_STAT_BAD_OP;
171 	case -EINVAL:
172 		return CMD_STAT_BAD_PARAM;
173 	case -ENXIO:
174 		return CMD_STAT_BAD_SYS_STATE;
175 	case -EBUSY:
176 		return CMD_STAT_RESOURCE_BUSY;
177 	case -ENOMEM:
178 		return CMD_STAT_EXCEED_LIM;
179 	case -ENFILE:
180 		return CMD_STAT_ICM_ERROR;
181 	default:
182 		return CMD_STAT_INTERNAL_ERR;
183 	}
184 }
185 
186 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
187 				       u8 op_modifier)
188 {
189 	switch (op) {
190 	case MLX4_CMD_UNMAP_ICM:
191 	case MLX4_CMD_UNMAP_ICM_AUX:
192 	case MLX4_CMD_UNMAP_FA:
193 	case MLX4_CMD_2RST_QP:
194 	case MLX4_CMD_HW2SW_EQ:
195 	case MLX4_CMD_HW2SW_CQ:
196 	case MLX4_CMD_HW2SW_SRQ:
197 	case MLX4_CMD_HW2SW_MPT:
198 	case MLX4_CMD_CLOSE_HCA:
199 	case MLX4_QP_FLOW_STEERING_DETACH:
200 	case MLX4_CMD_FREE_RES:
201 	case MLX4_CMD_CLOSE_PORT:
202 		return CMD_STAT_OK;
203 
204 	case MLX4_CMD_QP_ATTACH:
205 		/* On Detach case return success */
206 		if (op_modifier == 0)
207 			return CMD_STAT_OK;
208 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
209 
210 	default:
211 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
212 	}
213 }
214 
215 static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
216 {
217 	/* Any error during the closing commands below is considered fatal */
218 	if (op == MLX4_CMD_CLOSE_HCA ||
219 	    op == MLX4_CMD_HW2SW_EQ ||
220 	    op == MLX4_CMD_HW2SW_CQ ||
221 	    op == MLX4_CMD_2RST_QP ||
222 	    op == MLX4_CMD_HW2SW_SRQ ||
223 	    op == MLX4_CMD_SYNC_TPT ||
224 	    op == MLX4_CMD_UNMAP_ICM ||
225 	    op == MLX4_CMD_UNMAP_ICM_AUX ||
226 	    op == MLX4_CMD_UNMAP_FA)
227 		return 1;
228 	/* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
229 	  * CMD_STAT_REG_BOUND.
230 	  * This status indicates that memory region has memory windows bound to it
231 	  * which may result from invalid user space usage and is not fatal.
232 	  */
233 	if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
234 		return 1;
235 	return 0;
236 }
237 
238 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
239 			       int err)
240 {
241 	/* Only if reset flow is really active return code is based on
242 	  * command, otherwise current error code is returned.
243 	  */
244 	if (mlx4_internal_err_reset) {
245 		mlx4_enter_error_state(dev->persist);
246 		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
247 	}
248 
249 	return err;
250 }
251 
252 static int comm_pending(struct mlx4_dev *dev)
253 {
254 	struct mlx4_priv *priv = mlx4_priv(dev);
255 	u32 status = readl(&priv->mfunc.comm->slave_read);
256 
257 	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
258 }
259 
260 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
261 {
262 	struct mlx4_priv *priv = mlx4_priv(dev);
263 	u32 val;
264 
265 	/* To avoid writing to unknown addresses after the device state was
266 	 * changed to internal error and the function was rest,
267 	 * check the INTERNAL_ERROR flag which is updated under
268 	 * device_state_mutex lock.
269 	 */
270 	mutex_lock(&dev->persist->device_state_mutex);
271 
272 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
273 		mutex_unlock(&dev->persist->device_state_mutex);
274 		return -EIO;
275 	}
276 
277 	priv->cmd.comm_toggle ^= 1;
278 	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
279 	__raw_writel((__force u32) cpu_to_be32(val),
280 		     &priv->mfunc.comm->slave_write);
281 	mmiowb();
282 	mutex_unlock(&dev->persist->device_state_mutex);
283 	return 0;
284 }
285 
286 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
287 		       unsigned long timeout)
288 {
289 	struct mlx4_priv *priv = mlx4_priv(dev);
290 	unsigned long end;
291 	int err = 0;
292 	int ret_from_pending = 0;
293 
294 	/* First, verify that the master reports correct status */
295 	if (comm_pending(dev)) {
296 		mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
297 			  priv->cmd.comm_toggle, cmd);
298 		return -EAGAIN;
299 	}
300 
301 	/* Write command */
302 	down(&priv->cmd.poll_sem);
303 	if (mlx4_comm_cmd_post(dev, cmd, param)) {
304 		/* Only in case the device state is INTERNAL_ERROR,
305 		 * mlx4_comm_cmd_post returns with an error
306 		 */
307 		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
308 		goto out;
309 	}
310 
311 	end = msecs_to_jiffies(timeout) + jiffies;
312 	while (comm_pending(dev) && time_before(jiffies, end))
313 		cond_resched();
314 	ret_from_pending = comm_pending(dev);
315 	if (ret_from_pending) {
316 		/* check if the slave is trying to boot in the middle of
317 		 * FLR process. The only non-zero result in the RESET command
318 		 * is MLX4_DELAY_RESET_SLAVE*/
319 		if ((MLX4_COMM_CMD_RESET == cmd)) {
320 			err = MLX4_DELAY_RESET_SLAVE;
321 			goto out;
322 		} else {
323 			mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
324 				  cmd);
325 			err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
326 		}
327 	}
328 
329 	if (err)
330 		mlx4_enter_error_state(dev->persist);
331 out:
332 	up(&priv->cmd.poll_sem);
333 	return err;
334 }
335 
336 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
337 			      u16 param, u16 op, unsigned long timeout)
338 {
339 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
340 	struct mlx4_cmd_context *context;
341 	unsigned long end;
342 	int err = 0;
343 
344 	down(&cmd->event_sem);
345 
346 	spin_lock(&cmd->context_lock);
347 	BUG_ON(cmd->free_head < 0);
348 	context = &cmd->context[cmd->free_head];
349 	context->token += cmd->token_mask + 1;
350 	cmd->free_head = context->next;
351 	spin_unlock(&cmd->context_lock);
352 
353 	reinit_completion(&context->done);
354 
355 	if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
356 		/* Only in case the device state is INTERNAL_ERROR,
357 		 * mlx4_comm_cmd_post returns with an error
358 		 */
359 		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
360 		goto out;
361 	}
362 
363 	if (!wait_for_completion_timeout(&context->done,
364 					 msecs_to_jiffies(timeout))) {
365 		mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
366 			  vhcr_cmd, op);
367 		goto out_reset;
368 	}
369 
370 	err = context->result;
371 	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
372 		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
373 			 vhcr_cmd, context->fw_status);
374 		if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
375 			goto out_reset;
376 	}
377 
378 	/* wait for comm channel ready
379 	 * this is necessary for prevention the race
380 	 * when switching between event to polling mode
381 	 * Skipping this section in case the device is in FATAL_ERROR state,
382 	 * In this state, no commands are sent via the comm channel until
383 	 * the device has returned from reset.
384 	 */
385 	if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
386 		end = msecs_to_jiffies(timeout) + jiffies;
387 		while (comm_pending(dev) && time_before(jiffies, end))
388 			cond_resched();
389 	}
390 	goto out;
391 
392 out_reset:
393 	err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
394 	mlx4_enter_error_state(dev->persist);
395 out:
396 	spin_lock(&cmd->context_lock);
397 	context->next = cmd->free_head;
398 	cmd->free_head = context - cmd->context;
399 	spin_unlock(&cmd->context_lock);
400 
401 	up(&cmd->event_sem);
402 	return err;
403 }
404 
405 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
406 		  u16 op, unsigned long timeout)
407 {
408 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
409 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
410 
411 	if (mlx4_priv(dev)->cmd.use_events)
412 		return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
413 	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
414 }
415 
416 static int cmd_pending(struct mlx4_dev *dev)
417 {
418 	u32 status;
419 
420 	if (pci_channel_offline(dev->persist->pdev))
421 		return -EIO;
422 
423 	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
424 
425 	return (status & swab32(1 << HCR_GO_BIT)) ||
426 		(mlx4_priv(dev)->cmd.toggle ==
427 		 !!(status & swab32(1 << HCR_T_BIT)));
428 }
429 
430 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
431 			 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
432 			 int event)
433 {
434 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
435 	u32 __iomem *hcr = cmd->hcr;
436 	int ret = -EIO;
437 	unsigned long end;
438 
439 	mutex_lock(&dev->persist->device_state_mutex);
440 	/* To avoid writing to unknown addresses after the device state was
441 	  * changed to internal error and the chip was reset,
442 	  * check the INTERNAL_ERROR flag which is updated under
443 	  * device_state_mutex lock.
444 	  */
445 	if (pci_channel_offline(dev->persist->pdev) ||
446 	    (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
447 		/*
448 		 * Device is going through error recovery
449 		 * and cannot accept commands.
450 		 */
451 		goto out;
452 	}
453 
454 	end = jiffies;
455 	if (event)
456 		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
457 
458 	while (cmd_pending(dev)) {
459 		if (pci_channel_offline(dev->persist->pdev)) {
460 			/*
461 			 * Device is going through error recovery
462 			 * and cannot accept commands.
463 			 */
464 			goto out;
465 		}
466 
467 		if (time_after_eq(jiffies, end)) {
468 			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
469 			goto out;
470 		}
471 		cond_resched();
472 	}
473 
474 	/*
475 	 * We use writel (instead of something like memcpy_toio)
476 	 * because writes of less than 32 bits to the HCR don't work
477 	 * (and some architectures such as ia64 implement memcpy_toio
478 	 * in terms of writeb).
479 	 */
480 	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
481 	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
482 	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
483 	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
484 	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
485 	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
486 
487 	/* __raw_writel may not order writes. */
488 	wmb();
489 
490 	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
491 					       (cmd->toggle << HCR_T_BIT)	|
492 					       (event ? (1 << HCR_E_BIT) : 0)	|
493 					       (op_modifier << HCR_OPMOD_SHIFT) |
494 					       op), hcr + 6);
495 
496 	/*
497 	 * Make sure that our HCR writes don't get mixed in with
498 	 * writes from another CPU starting a FW command.
499 	 */
500 	mmiowb();
501 
502 	cmd->toggle = cmd->toggle ^ 1;
503 
504 	ret = 0;
505 
506 out:
507 	if (ret)
508 		mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
509 			  op, ret, in_param, in_modifier, op_modifier);
510 	mutex_unlock(&dev->persist->device_state_mutex);
511 
512 	return ret;
513 }
514 
515 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
516 			  int out_is_imm, u32 in_modifier, u8 op_modifier,
517 			  u16 op, unsigned long timeout)
518 {
519 	struct mlx4_priv *priv = mlx4_priv(dev);
520 	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
521 	int ret;
522 
523 	mutex_lock(&priv->cmd.slave_cmd_mutex);
524 
525 	vhcr->in_param = cpu_to_be64(in_param);
526 	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
527 	vhcr->in_modifier = cpu_to_be32(in_modifier);
528 	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
529 	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
530 	vhcr->status = 0;
531 	vhcr->flags = !!(priv->cmd.use_events) << 6;
532 
533 	if (mlx4_is_master(dev)) {
534 		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
535 		if (!ret) {
536 			if (out_is_imm) {
537 				if (out_param)
538 					*out_param =
539 						be64_to_cpu(vhcr->out_param);
540 				else {
541 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
542 						 op);
543 					vhcr->status = CMD_STAT_BAD_PARAM;
544 				}
545 			}
546 			ret = mlx4_status_to_errno(vhcr->status);
547 		}
548 		if (ret &&
549 		    dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
550 			ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
551 	} else {
552 		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
553 				    MLX4_COMM_TIME + timeout);
554 		if (!ret) {
555 			if (out_is_imm) {
556 				if (out_param)
557 					*out_param =
558 						be64_to_cpu(vhcr->out_param);
559 				else {
560 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
561 						 op);
562 					vhcr->status = CMD_STAT_BAD_PARAM;
563 				}
564 			}
565 			ret = mlx4_status_to_errno(vhcr->status);
566 		} else {
567 			if (dev->persist->state &
568 			    MLX4_DEVICE_STATE_INTERNAL_ERROR)
569 				ret = mlx4_internal_err_ret_value(dev, op,
570 								  op_modifier);
571 			else
572 				mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
573 		}
574 	}
575 
576 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
577 	return ret;
578 }
579 
580 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
581 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
582 			 u16 op, unsigned long timeout)
583 {
584 	struct mlx4_priv *priv = mlx4_priv(dev);
585 	void __iomem *hcr = priv->cmd.hcr;
586 	int err = 0;
587 	unsigned long end;
588 	u32 stat;
589 
590 	down(&priv->cmd.poll_sem);
591 
592 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
593 		/*
594 		 * Device is going through error recovery
595 		 * and cannot accept commands.
596 		 */
597 		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
598 		goto out;
599 	}
600 
601 	if (out_is_imm && !out_param) {
602 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
603 			 op);
604 		err = -EINVAL;
605 		goto out;
606 	}
607 
608 	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
609 			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
610 	if (err)
611 		goto out_reset;
612 
613 	end = msecs_to_jiffies(timeout) + jiffies;
614 	while (cmd_pending(dev) && time_before(jiffies, end)) {
615 		if (pci_channel_offline(dev->persist->pdev)) {
616 			/*
617 			 * Device is going through error recovery
618 			 * and cannot accept commands.
619 			 */
620 			err = -EIO;
621 			goto out_reset;
622 		}
623 
624 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
625 			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
626 			goto out;
627 		}
628 
629 		cond_resched();
630 	}
631 
632 	if (cmd_pending(dev)) {
633 		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
634 			  op);
635 		err = -EIO;
636 		goto out_reset;
637 	}
638 
639 	if (out_is_imm)
640 		*out_param =
641 			(u64) be32_to_cpu((__force __be32)
642 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
643 			(u64) be32_to_cpu((__force __be32)
644 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
645 	stat = be32_to_cpu((__force __be32)
646 			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
647 	err = mlx4_status_to_errno(stat);
648 	if (err) {
649 		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
650 			 op, stat);
651 		if (mlx4_closing_cmd_fatal_error(op, stat))
652 			goto out_reset;
653 		goto out;
654 	}
655 
656 out_reset:
657 	if (err)
658 		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
659 out:
660 	up(&priv->cmd.poll_sem);
661 	return err;
662 }
663 
664 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
665 {
666 	struct mlx4_priv *priv = mlx4_priv(dev);
667 	struct mlx4_cmd_context *context =
668 		&priv->cmd.context[token & priv->cmd.token_mask];
669 
670 	/* previously timed out command completing at long last */
671 	if (token != context->token)
672 		return;
673 
674 	context->fw_status = status;
675 	context->result    = mlx4_status_to_errno(status);
676 	context->out_param = out_param;
677 
678 	complete(&context->done);
679 }
680 
681 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
682 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
683 			 u16 op, unsigned long timeout)
684 {
685 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
686 	struct mlx4_cmd_context *context;
687 	int err = 0;
688 
689 	down(&cmd->event_sem);
690 
691 	spin_lock(&cmd->context_lock);
692 	BUG_ON(cmd->free_head < 0);
693 	context = &cmd->context[cmd->free_head];
694 	context->token += cmd->token_mask + 1;
695 	cmd->free_head = context->next;
696 	spin_unlock(&cmd->context_lock);
697 
698 	if (out_is_imm && !out_param) {
699 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
700 			 op);
701 		err = -EINVAL;
702 		goto out;
703 	}
704 
705 	reinit_completion(&context->done);
706 
707 	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
708 			    in_modifier, op_modifier, op, context->token, 1);
709 	if (err)
710 		goto out_reset;
711 
712 	if (!wait_for_completion_timeout(&context->done,
713 					 msecs_to_jiffies(timeout))) {
714 		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
715 			  op);
716 		err = -EIO;
717 		goto out_reset;
718 	}
719 
720 	err = context->result;
721 	if (err) {
722 		/* Since we do not want to have this error message always
723 		 * displayed at driver start when there are ConnectX2 HCAs
724 		 * on the host, we deprecate the error message for this
725 		 * specific command/input_mod/opcode_mod/fw-status to be debug.
726 		 */
727 		if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
728 		    op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
729 			mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
730 				 op, context->fw_status);
731 		else
732 			mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
733 				 op, context->fw_status);
734 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
735 			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
736 		else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
737 			goto out_reset;
738 
739 		goto out;
740 	}
741 
742 	if (out_is_imm)
743 		*out_param = context->out_param;
744 
745 out_reset:
746 	if (err)
747 		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
748 out:
749 	spin_lock(&cmd->context_lock);
750 	context->next = cmd->free_head;
751 	cmd->free_head = context - cmd->context;
752 	spin_unlock(&cmd->context_lock);
753 
754 	up(&cmd->event_sem);
755 	return err;
756 }
757 
758 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
759 	       int out_is_imm, u32 in_modifier, u8 op_modifier,
760 	       u16 op, unsigned long timeout, int native)
761 {
762 	if (pci_channel_offline(dev->persist->pdev))
763 		return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
764 
765 	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
766 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
767 			return mlx4_internal_err_ret_value(dev, op,
768 							  op_modifier);
769 		if (mlx4_priv(dev)->cmd.use_events)
770 			return mlx4_cmd_wait(dev, in_param, out_param,
771 					     out_is_imm, in_modifier,
772 					     op_modifier, op, timeout);
773 		else
774 			return mlx4_cmd_poll(dev, in_param, out_param,
775 					     out_is_imm, in_modifier,
776 					     op_modifier, op, timeout);
777 	}
778 	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
779 			      in_modifier, op_modifier, op, timeout);
780 }
781 EXPORT_SYMBOL_GPL(__mlx4_cmd);
782 
783 
784 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
785 {
786 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
787 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
788 }
789 
790 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
791 			   int slave, u64 slave_addr,
792 			   int size, int is_read)
793 {
794 	u64 in_param;
795 	u64 out_param;
796 
797 	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
798 	    (slave & ~0x7f) | (size & 0xff)) {
799 		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
800 			 slave_addr, master_addr, slave, size);
801 		return -EINVAL;
802 	}
803 
804 	if (is_read) {
805 		in_param = (u64) slave | slave_addr;
806 		out_param = (u64) dev->caps.function | master_addr;
807 	} else {
808 		in_param = (u64) dev->caps.function | master_addr;
809 		out_param = (u64) slave | slave_addr;
810 	}
811 
812 	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
813 			    MLX4_CMD_ACCESS_MEM,
814 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
815 }
816 
817 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
818 			       struct mlx4_cmd_mailbox *inbox,
819 			       struct mlx4_cmd_mailbox *outbox)
820 {
821 	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
822 	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
823 	int err;
824 	int i;
825 
826 	if (index & 0x1f)
827 		return -EINVAL;
828 
829 	in_mad->attr_mod = cpu_to_be32(index / 32);
830 
831 	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
832 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
833 			   MLX4_CMD_NATIVE);
834 	if (err)
835 		return err;
836 
837 	for (i = 0; i < 32; ++i)
838 		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
839 
840 	return err;
841 }
842 
843 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
844 			       struct mlx4_cmd_mailbox *inbox,
845 			       struct mlx4_cmd_mailbox *outbox)
846 {
847 	int i;
848 	int err;
849 
850 	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
851 		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
852 		if (err)
853 			return err;
854 	}
855 
856 	return 0;
857 }
858 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
859 #define PORT_STATE_OFFSET 32
860 
861 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
862 {
863 	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
864 		return IB_PORT_ACTIVE;
865 	else
866 		return IB_PORT_DOWN;
867 }
868 
869 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
870 				struct mlx4_vhcr *vhcr,
871 				struct mlx4_cmd_mailbox *inbox,
872 				struct mlx4_cmd_mailbox *outbox,
873 				struct mlx4_cmd_info *cmd)
874 {
875 	struct ib_smp *smp = inbox->buf;
876 	u32 index;
877 	u8 port;
878 	u8 opcode_modifier;
879 	u16 *table;
880 	int err;
881 	int vidx, pidx;
882 	int network_view;
883 	struct mlx4_priv *priv = mlx4_priv(dev);
884 	struct ib_smp *outsmp = outbox->buf;
885 	__be16 *outtab = (__be16 *)(outsmp->data);
886 	__be32 slave_cap_mask;
887 	__be64 slave_node_guid;
888 
889 	port = vhcr->in_modifier;
890 
891 	/* network-view bit is for driver use only, and should not be passed to FW */
892 	opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
893 	network_view = !!(vhcr->op_modifier & 0x8);
894 
895 	if (smp->base_version == 1 &&
896 	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
897 	    smp->class_version == 1) {
898 		/* host view is paravirtualized */
899 		if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
900 			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
901 				index = be32_to_cpu(smp->attr_mod);
902 				if (port < 1 || port > dev->caps.num_ports)
903 					return -EINVAL;
904 				table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
905 						sizeof(*table) * 32, GFP_KERNEL);
906 
907 				if (!table)
908 					return -ENOMEM;
909 				/* need to get the full pkey table because the paravirtualized
910 				 * pkeys may be scattered among several pkey blocks.
911 				 */
912 				err = get_full_pkey_table(dev, port, table, inbox, outbox);
913 				if (!err) {
914 					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
915 						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
916 						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
917 					}
918 				}
919 				kfree(table);
920 				return err;
921 			}
922 			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
923 				/*get the slave specific caps:*/
924 				/*do the command */
925 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
926 					    vhcr->in_modifier, opcode_modifier,
927 					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
928 				/* modify the response for slaves */
929 				if (!err && slave != mlx4_master_func_num(dev)) {
930 					u8 *state = outsmp->data + PORT_STATE_OFFSET;
931 
932 					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
933 					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
934 					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
935 				}
936 				return err;
937 			}
938 			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
939 				/* compute slave's gid block */
940 				smp->attr_mod = cpu_to_be32(slave / 8);
941 				/* execute cmd */
942 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
943 					     vhcr->in_modifier, opcode_modifier,
944 					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
945 				if (!err) {
946 					/* if needed, move slave gid to index 0 */
947 					if (slave % 8)
948 						memcpy(outsmp->data,
949 						       outsmp->data + (slave % 8) * 8, 8);
950 					/* delete all other gids */
951 					memset(outsmp->data + 8, 0, 56);
952 				}
953 				return err;
954 			}
955 			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
956 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
957 					     vhcr->in_modifier, opcode_modifier,
958 					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
959 				if (!err) {
960 					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
961 					memcpy(outsmp->data + 12, &slave_node_guid, 8);
962 				}
963 				return err;
964 			}
965 		}
966 	}
967 
968 	/* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
969 	 * These are the MADs used by ib verbs (such as ib_query_gids).
970 	 */
971 	if (slave != mlx4_master_func_num(dev) &&
972 	    !mlx4_vf_smi_enabled(dev, slave, port)) {
973 		if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
974 		      smp->method == IB_MGMT_METHOD_GET) || network_view) {
975 			mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
976 				 slave, smp->method, smp->mgmt_class,
977 				 network_view ? "Network" : "Host",
978 				 be16_to_cpu(smp->attr_id));
979 			return -EPERM;
980 		}
981 	}
982 
983 	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
984 				    vhcr->in_modifier, opcode_modifier,
985 				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
986 }
987 
988 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
989 		     struct mlx4_vhcr *vhcr,
990 		     struct mlx4_cmd_mailbox *inbox,
991 		     struct mlx4_cmd_mailbox *outbox,
992 		     struct mlx4_cmd_info *cmd)
993 {
994 	return -EPERM;
995 }
996 
997 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
998 		     struct mlx4_vhcr *vhcr,
999 		     struct mlx4_cmd_mailbox *inbox,
1000 		     struct mlx4_cmd_mailbox *outbox,
1001 		     struct mlx4_cmd_info *cmd)
1002 {
1003 	u64 in_param;
1004 	u64 out_param;
1005 	int err;
1006 
1007 	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1008 	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1009 	if (cmd->encode_slave_id) {
1010 		in_param &= 0xffffffffffffff00ll;
1011 		in_param |= slave;
1012 	}
1013 
1014 	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1015 			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1016 			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1017 
1018 	if (cmd->out_is_imm)
1019 		vhcr->out_param = out_param;
1020 
1021 	return err;
1022 }
1023 
1024 static struct mlx4_cmd_info cmd_info[] = {
1025 	{
1026 		.opcode = MLX4_CMD_QUERY_FW,
1027 		.has_inbox = false,
1028 		.has_outbox = true,
1029 		.out_is_imm = false,
1030 		.encode_slave_id = false,
1031 		.verify = NULL,
1032 		.wrapper = mlx4_QUERY_FW_wrapper
1033 	},
1034 	{
1035 		.opcode = MLX4_CMD_QUERY_HCA,
1036 		.has_inbox = false,
1037 		.has_outbox = true,
1038 		.out_is_imm = false,
1039 		.encode_slave_id = false,
1040 		.verify = NULL,
1041 		.wrapper = NULL
1042 	},
1043 	{
1044 		.opcode = MLX4_CMD_QUERY_DEV_CAP,
1045 		.has_inbox = false,
1046 		.has_outbox = true,
1047 		.out_is_imm = false,
1048 		.encode_slave_id = false,
1049 		.verify = NULL,
1050 		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
1051 	},
1052 	{
1053 		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
1054 		.has_inbox = false,
1055 		.has_outbox = true,
1056 		.out_is_imm = false,
1057 		.encode_slave_id = false,
1058 		.verify = NULL,
1059 		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1060 	},
1061 	{
1062 		.opcode = MLX4_CMD_QUERY_ADAPTER,
1063 		.has_inbox = false,
1064 		.has_outbox = true,
1065 		.out_is_imm = false,
1066 		.encode_slave_id = false,
1067 		.verify = NULL,
1068 		.wrapper = NULL
1069 	},
1070 	{
1071 		.opcode = MLX4_CMD_INIT_PORT,
1072 		.has_inbox = false,
1073 		.has_outbox = false,
1074 		.out_is_imm = false,
1075 		.encode_slave_id = false,
1076 		.verify = NULL,
1077 		.wrapper = mlx4_INIT_PORT_wrapper
1078 	},
1079 	{
1080 		.opcode = MLX4_CMD_CLOSE_PORT,
1081 		.has_inbox = false,
1082 		.has_outbox = false,
1083 		.out_is_imm  = false,
1084 		.encode_slave_id = false,
1085 		.verify = NULL,
1086 		.wrapper = mlx4_CLOSE_PORT_wrapper
1087 	},
1088 	{
1089 		.opcode = MLX4_CMD_QUERY_PORT,
1090 		.has_inbox = false,
1091 		.has_outbox = true,
1092 		.out_is_imm = false,
1093 		.encode_slave_id = false,
1094 		.verify = NULL,
1095 		.wrapper = mlx4_QUERY_PORT_wrapper
1096 	},
1097 	{
1098 		.opcode = MLX4_CMD_SET_PORT,
1099 		.has_inbox = true,
1100 		.has_outbox = false,
1101 		.out_is_imm = false,
1102 		.encode_slave_id = false,
1103 		.verify = NULL,
1104 		.wrapper = mlx4_SET_PORT_wrapper
1105 	},
1106 	{
1107 		.opcode = MLX4_CMD_MAP_EQ,
1108 		.has_inbox = false,
1109 		.has_outbox = false,
1110 		.out_is_imm = false,
1111 		.encode_slave_id = false,
1112 		.verify = NULL,
1113 		.wrapper = mlx4_MAP_EQ_wrapper
1114 	},
1115 	{
1116 		.opcode = MLX4_CMD_SW2HW_EQ,
1117 		.has_inbox = true,
1118 		.has_outbox = false,
1119 		.out_is_imm = false,
1120 		.encode_slave_id = true,
1121 		.verify = NULL,
1122 		.wrapper = mlx4_SW2HW_EQ_wrapper
1123 	},
1124 	{
1125 		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
1126 		.has_inbox = false,
1127 		.has_outbox = false,
1128 		.out_is_imm = false,
1129 		.encode_slave_id = false,
1130 		.verify = NULL,
1131 		.wrapper = NULL
1132 	},
1133 	{
1134 		.opcode = MLX4_CMD_NOP,
1135 		.has_inbox = false,
1136 		.has_outbox = false,
1137 		.out_is_imm = false,
1138 		.encode_slave_id = false,
1139 		.verify = NULL,
1140 		.wrapper = NULL
1141 	},
1142 	{
1143 		.opcode = MLX4_CMD_CONFIG_DEV,
1144 		.has_inbox = false,
1145 		.has_outbox = true,
1146 		.out_is_imm = false,
1147 		.encode_slave_id = false,
1148 		.verify = NULL,
1149 		.wrapper = mlx4_CONFIG_DEV_wrapper
1150 	},
1151 	{
1152 		.opcode = MLX4_CMD_ALLOC_RES,
1153 		.has_inbox = false,
1154 		.has_outbox = false,
1155 		.out_is_imm = true,
1156 		.encode_slave_id = false,
1157 		.verify = NULL,
1158 		.wrapper = mlx4_ALLOC_RES_wrapper
1159 	},
1160 	{
1161 		.opcode = MLX4_CMD_FREE_RES,
1162 		.has_inbox = false,
1163 		.has_outbox = false,
1164 		.out_is_imm = false,
1165 		.encode_slave_id = false,
1166 		.verify = NULL,
1167 		.wrapper = mlx4_FREE_RES_wrapper
1168 	},
1169 	{
1170 		.opcode = MLX4_CMD_SW2HW_MPT,
1171 		.has_inbox = true,
1172 		.has_outbox = false,
1173 		.out_is_imm = false,
1174 		.encode_slave_id = true,
1175 		.verify = NULL,
1176 		.wrapper = mlx4_SW2HW_MPT_wrapper
1177 	},
1178 	{
1179 		.opcode = MLX4_CMD_QUERY_MPT,
1180 		.has_inbox = false,
1181 		.has_outbox = true,
1182 		.out_is_imm = false,
1183 		.encode_slave_id = false,
1184 		.verify = NULL,
1185 		.wrapper = mlx4_QUERY_MPT_wrapper
1186 	},
1187 	{
1188 		.opcode = MLX4_CMD_HW2SW_MPT,
1189 		.has_inbox = false,
1190 		.has_outbox = false,
1191 		.out_is_imm = false,
1192 		.encode_slave_id = false,
1193 		.verify = NULL,
1194 		.wrapper = mlx4_HW2SW_MPT_wrapper
1195 	},
1196 	{
1197 		.opcode = MLX4_CMD_READ_MTT,
1198 		.has_inbox = false,
1199 		.has_outbox = true,
1200 		.out_is_imm = false,
1201 		.encode_slave_id = false,
1202 		.verify = NULL,
1203 		.wrapper = NULL
1204 	},
1205 	{
1206 		.opcode = MLX4_CMD_WRITE_MTT,
1207 		.has_inbox = true,
1208 		.has_outbox = false,
1209 		.out_is_imm = false,
1210 		.encode_slave_id = false,
1211 		.verify = NULL,
1212 		.wrapper = mlx4_WRITE_MTT_wrapper
1213 	},
1214 	{
1215 		.opcode = MLX4_CMD_SYNC_TPT,
1216 		.has_inbox = true,
1217 		.has_outbox = false,
1218 		.out_is_imm = false,
1219 		.encode_slave_id = false,
1220 		.verify = NULL,
1221 		.wrapper = NULL
1222 	},
1223 	{
1224 		.opcode = MLX4_CMD_HW2SW_EQ,
1225 		.has_inbox = false,
1226 		.has_outbox = false,
1227 		.out_is_imm = false,
1228 		.encode_slave_id = true,
1229 		.verify = NULL,
1230 		.wrapper = mlx4_HW2SW_EQ_wrapper
1231 	},
1232 	{
1233 		.opcode = MLX4_CMD_QUERY_EQ,
1234 		.has_inbox = false,
1235 		.has_outbox = true,
1236 		.out_is_imm = false,
1237 		.encode_slave_id = true,
1238 		.verify = NULL,
1239 		.wrapper = mlx4_QUERY_EQ_wrapper
1240 	},
1241 	{
1242 		.opcode = MLX4_CMD_SW2HW_CQ,
1243 		.has_inbox = true,
1244 		.has_outbox = false,
1245 		.out_is_imm = false,
1246 		.encode_slave_id = true,
1247 		.verify = NULL,
1248 		.wrapper = mlx4_SW2HW_CQ_wrapper
1249 	},
1250 	{
1251 		.opcode = MLX4_CMD_HW2SW_CQ,
1252 		.has_inbox = false,
1253 		.has_outbox = false,
1254 		.out_is_imm = false,
1255 		.encode_slave_id = false,
1256 		.verify = NULL,
1257 		.wrapper = mlx4_HW2SW_CQ_wrapper
1258 	},
1259 	{
1260 		.opcode = MLX4_CMD_QUERY_CQ,
1261 		.has_inbox = false,
1262 		.has_outbox = true,
1263 		.out_is_imm = false,
1264 		.encode_slave_id = false,
1265 		.verify = NULL,
1266 		.wrapper = mlx4_QUERY_CQ_wrapper
1267 	},
1268 	{
1269 		.opcode = MLX4_CMD_MODIFY_CQ,
1270 		.has_inbox = true,
1271 		.has_outbox = false,
1272 		.out_is_imm = true,
1273 		.encode_slave_id = false,
1274 		.verify = NULL,
1275 		.wrapper = mlx4_MODIFY_CQ_wrapper
1276 	},
1277 	{
1278 		.opcode = MLX4_CMD_SW2HW_SRQ,
1279 		.has_inbox = true,
1280 		.has_outbox = false,
1281 		.out_is_imm = false,
1282 		.encode_slave_id = true,
1283 		.verify = NULL,
1284 		.wrapper = mlx4_SW2HW_SRQ_wrapper
1285 	},
1286 	{
1287 		.opcode = MLX4_CMD_HW2SW_SRQ,
1288 		.has_inbox = false,
1289 		.has_outbox = false,
1290 		.out_is_imm = false,
1291 		.encode_slave_id = false,
1292 		.verify = NULL,
1293 		.wrapper = mlx4_HW2SW_SRQ_wrapper
1294 	},
1295 	{
1296 		.opcode = MLX4_CMD_QUERY_SRQ,
1297 		.has_inbox = false,
1298 		.has_outbox = true,
1299 		.out_is_imm = false,
1300 		.encode_slave_id = false,
1301 		.verify = NULL,
1302 		.wrapper = mlx4_QUERY_SRQ_wrapper
1303 	},
1304 	{
1305 		.opcode = MLX4_CMD_ARM_SRQ,
1306 		.has_inbox = false,
1307 		.has_outbox = false,
1308 		.out_is_imm = false,
1309 		.encode_slave_id = false,
1310 		.verify = NULL,
1311 		.wrapper = mlx4_ARM_SRQ_wrapper
1312 	},
1313 	{
1314 		.opcode = MLX4_CMD_RST2INIT_QP,
1315 		.has_inbox = true,
1316 		.has_outbox = false,
1317 		.out_is_imm = false,
1318 		.encode_slave_id = true,
1319 		.verify = NULL,
1320 		.wrapper = mlx4_RST2INIT_QP_wrapper
1321 	},
1322 	{
1323 		.opcode = MLX4_CMD_INIT2INIT_QP,
1324 		.has_inbox = true,
1325 		.has_outbox = false,
1326 		.out_is_imm = false,
1327 		.encode_slave_id = false,
1328 		.verify = NULL,
1329 		.wrapper = mlx4_INIT2INIT_QP_wrapper
1330 	},
1331 	{
1332 		.opcode = MLX4_CMD_INIT2RTR_QP,
1333 		.has_inbox = true,
1334 		.has_outbox = false,
1335 		.out_is_imm = false,
1336 		.encode_slave_id = false,
1337 		.verify = NULL,
1338 		.wrapper = mlx4_INIT2RTR_QP_wrapper
1339 	},
1340 	{
1341 		.opcode = MLX4_CMD_RTR2RTS_QP,
1342 		.has_inbox = true,
1343 		.has_outbox = false,
1344 		.out_is_imm = false,
1345 		.encode_slave_id = false,
1346 		.verify = NULL,
1347 		.wrapper = mlx4_RTR2RTS_QP_wrapper
1348 	},
1349 	{
1350 		.opcode = MLX4_CMD_RTS2RTS_QP,
1351 		.has_inbox = true,
1352 		.has_outbox = false,
1353 		.out_is_imm = false,
1354 		.encode_slave_id = false,
1355 		.verify = NULL,
1356 		.wrapper = mlx4_RTS2RTS_QP_wrapper
1357 	},
1358 	{
1359 		.opcode = MLX4_CMD_SQERR2RTS_QP,
1360 		.has_inbox = true,
1361 		.has_outbox = false,
1362 		.out_is_imm = false,
1363 		.encode_slave_id = false,
1364 		.verify = NULL,
1365 		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1366 	},
1367 	{
1368 		.opcode = MLX4_CMD_2ERR_QP,
1369 		.has_inbox = false,
1370 		.has_outbox = false,
1371 		.out_is_imm = false,
1372 		.encode_slave_id = false,
1373 		.verify = NULL,
1374 		.wrapper = mlx4_GEN_QP_wrapper
1375 	},
1376 	{
1377 		.opcode = MLX4_CMD_RTS2SQD_QP,
1378 		.has_inbox = false,
1379 		.has_outbox = false,
1380 		.out_is_imm = false,
1381 		.encode_slave_id = false,
1382 		.verify = NULL,
1383 		.wrapper = mlx4_GEN_QP_wrapper
1384 	},
1385 	{
1386 		.opcode = MLX4_CMD_SQD2SQD_QP,
1387 		.has_inbox = true,
1388 		.has_outbox = false,
1389 		.out_is_imm = false,
1390 		.encode_slave_id = false,
1391 		.verify = NULL,
1392 		.wrapper = mlx4_SQD2SQD_QP_wrapper
1393 	},
1394 	{
1395 		.opcode = MLX4_CMD_SQD2RTS_QP,
1396 		.has_inbox = true,
1397 		.has_outbox = false,
1398 		.out_is_imm = false,
1399 		.encode_slave_id = false,
1400 		.verify = NULL,
1401 		.wrapper = mlx4_SQD2RTS_QP_wrapper
1402 	},
1403 	{
1404 		.opcode = MLX4_CMD_2RST_QP,
1405 		.has_inbox = false,
1406 		.has_outbox = false,
1407 		.out_is_imm = false,
1408 		.encode_slave_id = false,
1409 		.verify = NULL,
1410 		.wrapper = mlx4_2RST_QP_wrapper
1411 	},
1412 	{
1413 		.opcode = MLX4_CMD_QUERY_QP,
1414 		.has_inbox = false,
1415 		.has_outbox = true,
1416 		.out_is_imm = false,
1417 		.encode_slave_id = false,
1418 		.verify = NULL,
1419 		.wrapper = mlx4_GEN_QP_wrapper
1420 	},
1421 	{
1422 		.opcode = MLX4_CMD_SUSPEND_QP,
1423 		.has_inbox = false,
1424 		.has_outbox = false,
1425 		.out_is_imm = false,
1426 		.encode_slave_id = false,
1427 		.verify = NULL,
1428 		.wrapper = mlx4_GEN_QP_wrapper
1429 	},
1430 	{
1431 		.opcode = MLX4_CMD_UNSUSPEND_QP,
1432 		.has_inbox = false,
1433 		.has_outbox = false,
1434 		.out_is_imm = false,
1435 		.encode_slave_id = false,
1436 		.verify = NULL,
1437 		.wrapper = mlx4_GEN_QP_wrapper
1438 	},
1439 	{
1440 		.opcode = MLX4_CMD_UPDATE_QP,
1441 		.has_inbox = true,
1442 		.has_outbox = false,
1443 		.out_is_imm = false,
1444 		.encode_slave_id = false,
1445 		.verify = NULL,
1446 		.wrapper = mlx4_UPDATE_QP_wrapper
1447 	},
1448 	{
1449 		.opcode = MLX4_CMD_GET_OP_REQ,
1450 		.has_inbox = false,
1451 		.has_outbox = false,
1452 		.out_is_imm = false,
1453 		.encode_slave_id = false,
1454 		.verify = NULL,
1455 		.wrapper = mlx4_CMD_EPERM_wrapper,
1456 	},
1457 	{
1458 		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1459 		.has_inbox = false,
1460 		.has_outbox = false,
1461 		.out_is_imm = false,
1462 		.encode_slave_id = false,
1463 		.verify = NULL, /* XXX verify: only demux can do this */
1464 		.wrapper = NULL
1465 	},
1466 	{
1467 		.opcode = MLX4_CMD_MAD_IFC,
1468 		.has_inbox = true,
1469 		.has_outbox = true,
1470 		.out_is_imm = false,
1471 		.encode_slave_id = false,
1472 		.verify = NULL,
1473 		.wrapper = mlx4_MAD_IFC_wrapper
1474 	},
1475 	{
1476 		.opcode = MLX4_CMD_MAD_DEMUX,
1477 		.has_inbox = false,
1478 		.has_outbox = false,
1479 		.out_is_imm = false,
1480 		.encode_slave_id = false,
1481 		.verify = NULL,
1482 		.wrapper = mlx4_CMD_EPERM_wrapper
1483 	},
1484 	{
1485 		.opcode = MLX4_CMD_QUERY_IF_STAT,
1486 		.has_inbox = false,
1487 		.has_outbox = true,
1488 		.out_is_imm = false,
1489 		.encode_slave_id = false,
1490 		.verify = NULL,
1491 		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1492 	},
1493 	{
1494 		.opcode = MLX4_CMD_ACCESS_REG,
1495 		.has_inbox = true,
1496 		.has_outbox = true,
1497 		.out_is_imm = false,
1498 		.encode_slave_id = false,
1499 		.verify = NULL,
1500 		.wrapper = mlx4_ACCESS_REG_wrapper,
1501 	},
1502 	/* Native multicast commands are not available for guests */
1503 	{
1504 		.opcode = MLX4_CMD_QP_ATTACH,
1505 		.has_inbox = true,
1506 		.has_outbox = false,
1507 		.out_is_imm = false,
1508 		.encode_slave_id = false,
1509 		.verify = NULL,
1510 		.wrapper = mlx4_QP_ATTACH_wrapper
1511 	},
1512 	{
1513 		.opcode = MLX4_CMD_PROMISC,
1514 		.has_inbox = false,
1515 		.has_outbox = false,
1516 		.out_is_imm = false,
1517 		.encode_slave_id = false,
1518 		.verify = NULL,
1519 		.wrapper = mlx4_PROMISC_wrapper
1520 	},
1521 	/* Ethernet specific commands */
1522 	{
1523 		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1524 		.has_inbox = true,
1525 		.has_outbox = false,
1526 		.out_is_imm = false,
1527 		.encode_slave_id = false,
1528 		.verify = NULL,
1529 		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1530 	},
1531 	{
1532 		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1533 		.has_inbox = false,
1534 		.has_outbox = false,
1535 		.out_is_imm = false,
1536 		.encode_slave_id = false,
1537 		.verify = NULL,
1538 		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1539 	},
1540 	{
1541 		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1542 		.has_inbox = false,
1543 		.has_outbox = true,
1544 		.out_is_imm = false,
1545 		.encode_slave_id = false,
1546 		.verify = NULL,
1547 		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1548 	},
1549 	{
1550 		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1551 		.has_inbox = false,
1552 		.has_outbox = false,
1553 		.out_is_imm = false,
1554 		.encode_slave_id = false,
1555 		.verify = NULL,
1556 		.wrapper = NULL
1557 	},
1558 	/* flow steering commands */
1559 	{
1560 		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1561 		.has_inbox = true,
1562 		.has_outbox = false,
1563 		.out_is_imm = true,
1564 		.encode_slave_id = false,
1565 		.verify = NULL,
1566 		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1567 	},
1568 	{
1569 		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1570 		.has_inbox = false,
1571 		.has_outbox = false,
1572 		.out_is_imm = false,
1573 		.encode_slave_id = false,
1574 		.verify = NULL,
1575 		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1576 	},
1577 	{
1578 		.opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1579 		.has_inbox = false,
1580 		.has_outbox = false,
1581 		.out_is_imm = false,
1582 		.encode_slave_id = false,
1583 		.verify = NULL,
1584 		.wrapper = mlx4_CMD_EPERM_wrapper
1585 	},
1586 	{
1587 		.opcode = MLX4_CMD_VIRT_PORT_MAP,
1588 		.has_inbox = false,
1589 		.has_outbox = false,
1590 		.out_is_imm = false,
1591 		.encode_slave_id = false,
1592 		.verify = NULL,
1593 		.wrapper = mlx4_CMD_EPERM_wrapper
1594 	},
1595 };
1596 
1597 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1598 				    struct mlx4_vhcr_cmd *in_vhcr)
1599 {
1600 	struct mlx4_priv *priv = mlx4_priv(dev);
1601 	struct mlx4_cmd_info *cmd = NULL;
1602 	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1603 	struct mlx4_vhcr *vhcr;
1604 	struct mlx4_cmd_mailbox *inbox = NULL;
1605 	struct mlx4_cmd_mailbox *outbox = NULL;
1606 	u64 in_param;
1607 	u64 out_param;
1608 	int ret = 0;
1609 	int i;
1610 	int err = 0;
1611 
1612 	/* Create sw representation of Virtual HCR */
1613 	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1614 	if (!vhcr)
1615 		return -ENOMEM;
1616 
1617 	/* DMA in the vHCR */
1618 	if (!in_vhcr) {
1619 		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1620 				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1621 				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1622 					    MLX4_ACCESS_MEM_ALIGN), 1);
1623 		if (ret) {
1624 			if (!(dev->persist->state &
1625 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1626 				mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1627 					 __func__, ret);
1628 			kfree(vhcr);
1629 			return ret;
1630 		}
1631 	}
1632 
1633 	/* Fill SW VHCR fields */
1634 	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1635 	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1636 	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1637 	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1638 	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1639 	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1640 	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1641 
1642 	/* Lookup command */
1643 	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1644 		if (vhcr->op == cmd_info[i].opcode) {
1645 			cmd = &cmd_info[i];
1646 			break;
1647 		}
1648 	}
1649 	if (!cmd) {
1650 		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1651 			 vhcr->op, slave);
1652 		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1653 		goto out_status;
1654 	}
1655 
1656 	/* Read inbox */
1657 	if (cmd->has_inbox) {
1658 		vhcr->in_param &= INBOX_MASK;
1659 		inbox = mlx4_alloc_cmd_mailbox(dev);
1660 		if (IS_ERR(inbox)) {
1661 			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1662 			inbox = NULL;
1663 			goto out_status;
1664 		}
1665 
1666 		ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1667 				      vhcr->in_param,
1668 				      MLX4_MAILBOX_SIZE, 1);
1669 		if (ret) {
1670 			if (!(dev->persist->state &
1671 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1672 				mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1673 					 __func__, cmd->opcode);
1674 			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1675 			goto out_status;
1676 		}
1677 	}
1678 
1679 	/* Apply permission and bound checks if applicable */
1680 	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1681 		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1682 			  vhcr->op, slave, vhcr->in_modifier);
1683 		vhcr_cmd->status = CMD_STAT_BAD_OP;
1684 		goto out_status;
1685 	}
1686 
1687 	/* Allocate outbox */
1688 	if (cmd->has_outbox) {
1689 		outbox = mlx4_alloc_cmd_mailbox(dev);
1690 		if (IS_ERR(outbox)) {
1691 			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1692 			outbox = NULL;
1693 			goto out_status;
1694 		}
1695 	}
1696 
1697 	/* Execute the command! */
1698 	if (cmd->wrapper) {
1699 		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1700 				   cmd);
1701 		if (cmd->out_is_imm)
1702 			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1703 	} else {
1704 		in_param = cmd->has_inbox ? (u64) inbox->dma :
1705 			vhcr->in_param;
1706 		out_param = cmd->has_outbox ? (u64) outbox->dma :
1707 			vhcr->out_param;
1708 		err = __mlx4_cmd(dev, in_param, &out_param,
1709 				 cmd->out_is_imm, vhcr->in_modifier,
1710 				 vhcr->op_modifier, vhcr->op,
1711 				 MLX4_CMD_TIME_CLASS_A,
1712 				 MLX4_CMD_NATIVE);
1713 
1714 		if (cmd->out_is_imm) {
1715 			vhcr->out_param = out_param;
1716 			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1717 		}
1718 	}
1719 
1720 	if (err) {
1721 		if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1722 			mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1723 				  vhcr->op, slave, vhcr->errno, err);
1724 		vhcr_cmd->status = mlx4_errno_to_status(err);
1725 		goto out_status;
1726 	}
1727 
1728 
1729 	/* Write outbox if command completed successfully */
1730 	if (cmd->has_outbox && !vhcr_cmd->status) {
1731 		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1732 				      vhcr->out_param,
1733 				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1734 		if (ret) {
1735 			/* If we failed to write back the outbox after the
1736 			 *command was successfully executed, we must fail this
1737 			 * slave, as it is now in undefined state */
1738 			if (!(dev->persist->state &
1739 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1740 				mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1741 			goto out;
1742 		}
1743 	}
1744 
1745 out_status:
1746 	/* DMA back vhcr result */
1747 	if (!in_vhcr) {
1748 		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1749 				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1750 				      ALIGN(sizeof(struct mlx4_vhcr),
1751 					    MLX4_ACCESS_MEM_ALIGN),
1752 				      MLX4_CMD_WRAPPED);
1753 		if (ret)
1754 			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1755 				 __func__);
1756 		else if (vhcr->e_bit &&
1757 			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1758 				mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1759 					  slave);
1760 	}
1761 
1762 out:
1763 	kfree(vhcr);
1764 	mlx4_free_cmd_mailbox(dev, inbox);
1765 	mlx4_free_cmd_mailbox(dev, outbox);
1766 	return ret;
1767 }
1768 
1769 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1770 					    int slave, int port)
1771 {
1772 	struct mlx4_vport_oper_state *vp_oper;
1773 	struct mlx4_vport_state *vp_admin;
1774 	struct mlx4_vf_immed_vlan_work *work;
1775 	struct mlx4_dev *dev = &(priv->dev);
1776 	int err;
1777 	int admin_vlan_ix = NO_INDX;
1778 
1779 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1780 	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1781 
1782 	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1783 	    vp_oper->state.default_qos == vp_admin->default_qos &&
1784 	    vp_oper->state.link_state == vp_admin->link_state)
1785 		return 0;
1786 
1787 	if (!(priv->mfunc.master.slave_state[slave].active &&
1788 	      dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1789 		/* even if the UPDATE_QP command isn't supported, we still want
1790 		 * to set this VF link according to the admin directive
1791 		 */
1792 		vp_oper->state.link_state = vp_admin->link_state;
1793 		return -1;
1794 	}
1795 
1796 	mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1797 		 slave, port);
1798 	mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1799 		 vp_admin->default_vlan, vp_admin->default_qos,
1800 		 vp_admin->link_state);
1801 
1802 	work = kzalloc(sizeof(*work), GFP_KERNEL);
1803 	if (!work)
1804 		return -ENOMEM;
1805 
1806 	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1807 		if (MLX4_VGT != vp_admin->default_vlan) {
1808 			err = __mlx4_register_vlan(&priv->dev, port,
1809 						   vp_admin->default_vlan,
1810 						   &admin_vlan_ix);
1811 			if (err) {
1812 				kfree(work);
1813 				mlx4_warn(&priv->dev,
1814 					  "No vlan resources slave %d, port %d\n",
1815 					  slave, port);
1816 				return err;
1817 			}
1818 		} else {
1819 			admin_vlan_ix = NO_INDX;
1820 		}
1821 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1822 		mlx4_dbg(&priv->dev,
1823 			 "alloc vlan %d idx  %d slave %d port %d\n",
1824 			 (int)(vp_admin->default_vlan),
1825 			 admin_vlan_ix, slave, port);
1826 	}
1827 
1828 	/* save original vlan ix and vlan id */
1829 	work->orig_vlan_id = vp_oper->state.default_vlan;
1830 	work->orig_vlan_ix = vp_oper->vlan_idx;
1831 
1832 	/* handle new qos */
1833 	if (vp_oper->state.default_qos != vp_admin->default_qos)
1834 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1835 
1836 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1837 		vp_oper->vlan_idx = admin_vlan_ix;
1838 
1839 	vp_oper->state.default_vlan = vp_admin->default_vlan;
1840 	vp_oper->state.default_qos = vp_admin->default_qos;
1841 	vp_oper->state.link_state = vp_admin->link_state;
1842 
1843 	if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1844 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1845 
1846 	/* iterate over QPs owned by this slave, using UPDATE_QP */
1847 	work->port = port;
1848 	work->slave = slave;
1849 	work->qos = vp_oper->state.default_qos;
1850 	work->vlan_id = vp_oper->state.default_vlan;
1851 	work->vlan_ix = vp_oper->vlan_idx;
1852 	work->priv = priv;
1853 	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1854 	queue_work(priv->mfunc.master.comm_wq, &work->work);
1855 
1856 	return 0;
1857 }
1858 
1859 
1860 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1861 {
1862 	int port, err;
1863 	struct mlx4_vport_state *vp_admin;
1864 	struct mlx4_vport_oper_state *vp_oper;
1865 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1866 			&priv->dev, slave);
1867 	int min_port = find_first_bit(actv_ports.ports,
1868 				      priv->dev.caps.num_ports) + 1;
1869 	int max_port = min_port - 1 +
1870 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1871 
1872 	for (port = min_port; port <= max_port; port++) {
1873 		if (!test_bit(port - 1, actv_ports.ports))
1874 			continue;
1875 		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1876 			priv->mfunc.master.vf_admin[slave].enable_smi[port];
1877 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1878 		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1879 		vp_oper->state = *vp_admin;
1880 		if (MLX4_VGT != vp_admin->default_vlan) {
1881 			err = __mlx4_register_vlan(&priv->dev, port,
1882 						   vp_admin->default_vlan, &(vp_oper->vlan_idx));
1883 			if (err) {
1884 				vp_oper->vlan_idx = NO_INDX;
1885 				mlx4_warn(&priv->dev,
1886 					  "No vlan resources slave %d, port %d\n",
1887 					  slave, port);
1888 				return err;
1889 			}
1890 			mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
1891 				 (int)(vp_oper->state.default_vlan),
1892 				 vp_oper->vlan_idx, slave, port);
1893 		}
1894 		if (vp_admin->spoofchk) {
1895 			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1896 							       port,
1897 							       vp_admin->mac);
1898 			if (0 > vp_oper->mac_idx) {
1899 				err = vp_oper->mac_idx;
1900 				vp_oper->mac_idx = NO_INDX;
1901 				mlx4_warn(&priv->dev,
1902 					  "No mac resources slave %d, port %d\n",
1903 					  slave, port);
1904 				return err;
1905 			}
1906 			mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
1907 				 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1908 		}
1909 	}
1910 	return 0;
1911 }
1912 
1913 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1914 {
1915 	int port;
1916 	struct mlx4_vport_oper_state *vp_oper;
1917 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1918 			&priv->dev, slave);
1919 	int min_port = find_first_bit(actv_ports.ports,
1920 				      priv->dev.caps.num_ports) + 1;
1921 	int max_port = min_port - 1 +
1922 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1923 
1924 
1925 	for (port = min_port; port <= max_port; port++) {
1926 		if (!test_bit(port - 1, actv_ports.ports))
1927 			continue;
1928 		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1929 			MLX4_VF_SMI_DISABLED;
1930 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1931 		if (NO_INDX != vp_oper->vlan_idx) {
1932 			__mlx4_unregister_vlan(&priv->dev,
1933 					       port, vp_oper->state.default_vlan);
1934 			vp_oper->vlan_idx = NO_INDX;
1935 		}
1936 		if (NO_INDX != vp_oper->mac_idx) {
1937 			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1938 			vp_oper->mac_idx = NO_INDX;
1939 		}
1940 	}
1941 	return;
1942 }
1943 
1944 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1945 			       u16 param, u8 toggle)
1946 {
1947 	struct mlx4_priv *priv = mlx4_priv(dev);
1948 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1949 	u32 reply;
1950 	u8 is_going_down = 0;
1951 	int i;
1952 	unsigned long flags;
1953 
1954 	slave_state[slave].comm_toggle ^= 1;
1955 	reply = (u32) slave_state[slave].comm_toggle << 31;
1956 	if (toggle != slave_state[slave].comm_toggle) {
1957 		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1958 			  toggle, slave);
1959 		goto reset_slave;
1960 	}
1961 	if (cmd == MLX4_COMM_CMD_RESET) {
1962 		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1963 		slave_state[slave].active = false;
1964 		slave_state[slave].old_vlan_api = false;
1965 		mlx4_master_deactivate_admin_state(priv, slave);
1966 		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1967 				slave_state[slave].event_eq[i].eqn = -1;
1968 				slave_state[slave].event_eq[i].token = 0;
1969 		}
1970 		/*check if we are in the middle of FLR process,
1971 		if so return "retry" status to the slave*/
1972 		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1973 			goto inform_slave_state;
1974 
1975 		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1976 
1977 		/* write the version in the event field */
1978 		reply |= mlx4_comm_get_version();
1979 
1980 		goto reset_slave;
1981 	}
1982 	/*command from slave in the middle of FLR*/
1983 	if (cmd != MLX4_COMM_CMD_RESET &&
1984 	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1985 		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1986 			  slave, cmd);
1987 		return;
1988 	}
1989 
1990 	switch (cmd) {
1991 	case MLX4_COMM_CMD_VHCR0:
1992 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1993 			goto reset_slave;
1994 		slave_state[slave].vhcr_dma = ((u64) param) << 48;
1995 		priv->mfunc.master.slave_state[slave].cookie = 0;
1996 		mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1997 		break;
1998 	case MLX4_COMM_CMD_VHCR1:
1999 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2000 			goto reset_slave;
2001 		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2002 		break;
2003 	case MLX4_COMM_CMD_VHCR2:
2004 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2005 			goto reset_slave;
2006 		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2007 		break;
2008 	case MLX4_COMM_CMD_VHCR_EN:
2009 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2010 			goto reset_slave;
2011 		slave_state[slave].vhcr_dma |= param;
2012 		if (mlx4_master_activate_admin_state(priv, slave))
2013 				goto reset_slave;
2014 		slave_state[slave].active = true;
2015 		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2016 		break;
2017 	case MLX4_COMM_CMD_VHCR_POST:
2018 		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2019 		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2020 			mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2021 				  slave, cmd, slave_state[slave].last_cmd);
2022 			goto reset_slave;
2023 		}
2024 
2025 		mutex_lock(&priv->cmd.slave_cmd_mutex);
2026 		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2027 			mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2028 				 slave);
2029 			mutex_unlock(&priv->cmd.slave_cmd_mutex);
2030 			goto reset_slave;
2031 		}
2032 		mutex_unlock(&priv->cmd.slave_cmd_mutex);
2033 		break;
2034 	default:
2035 		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2036 		goto reset_slave;
2037 	}
2038 	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2039 	if (!slave_state[slave].is_slave_going_down)
2040 		slave_state[slave].last_cmd = cmd;
2041 	else
2042 		is_going_down = 1;
2043 	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2044 	if (is_going_down) {
2045 		mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2046 			  cmd, slave);
2047 		return;
2048 	}
2049 	__raw_writel((__force u32) cpu_to_be32(reply),
2050 		     &priv->mfunc.comm[slave].slave_read);
2051 	mmiowb();
2052 
2053 	return;
2054 
2055 reset_slave:
2056 	/* cleanup any slave resources */
2057 	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2058 		mlx4_delete_all_resources_for_slave(dev, slave);
2059 
2060 	if (cmd != MLX4_COMM_CMD_RESET) {
2061 		mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2062 			  slave, cmd);
2063 		/* Turn on internal error letting slave reset itself immeditaly,
2064 		 * otherwise it might take till timeout on command is passed
2065 		 */
2066 		reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2067 	}
2068 
2069 	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2070 	if (!slave_state[slave].is_slave_going_down)
2071 		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2072 	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2073 	/*with slave in the middle of flr, no need to clean resources again.*/
2074 inform_slave_state:
2075 	memset(&slave_state[slave].event_eq, 0,
2076 	       sizeof(struct mlx4_slave_event_eq_info));
2077 	__raw_writel((__force u32) cpu_to_be32(reply),
2078 		     &priv->mfunc.comm[slave].slave_read);
2079 	wmb();
2080 }
2081 
2082 /* master command processing */
2083 void mlx4_master_comm_channel(struct work_struct *work)
2084 {
2085 	struct mlx4_mfunc_master_ctx *master =
2086 		container_of(work,
2087 			     struct mlx4_mfunc_master_ctx,
2088 			     comm_work);
2089 	struct mlx4_mfunc *mfunc =
2090 		container_of(master, struct mlx4_mfunc, master);
2091 	struct mlx4_priv *priv =
2092 		container_of(mfunc, struct mlx4_priv, mfunc);
2093 	struct mlx4_dev *dev = &priv->dev;
2094 	__be32 *bit_vec;
2095 	u32 comm_cmd;
2096 	u32 vec;
2097 	int i, j, slave;
2098 	int toggle;
2099 	int served = 0;
2100 	int reported = 0;
2101 	u32 slt;
2102 
2103 	bit_vec = master->comm_arm_bit_vector;
2104 	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2105 		vec = be32_to_cpu(bit_vec[i]);
2106 		for (j = 0; j < 32; j++) {
2107 			if (!(vec & (1 << j)))
2108 				continue;
2109 			++reported;
2110 			slave = (i * 32) + j;
2111 			comm_cmd = swab32(readl(
2112 					  &mfunc->comm[slave].slave_write));
2113 			slt = swab32(readl(&mfunc->comm[slave].slave_read))
2114 				     >> 31;
2115 			toggle = comm_cmd >> 31;
2116 			if (toggle != slt) {
2117 				if (master->slave_state[slave].comm_toggle
2118 				    != slt) {
2119 					pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2120 						slave, slt,
2121 						master->slave_state[slave].comm_toggle);
2122 					master->slave_state[slave].comm_toggle =
2123 						slt;
2124 				}
2125 				mlx4_master_do_cmd(dev, slave,
2126 						   comm_cmd >> 16 & 0xff,
2127 						   comm_cmd & 0xffff, toggle);
2128 				++served;
2129 			}
2130 		}
2131 	}
2132 
2133 	if (reported && reported != served)
2134 		mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2135 			  reported, served);
2136 
2137 	if (mlx4_ARM_COMM_CHANNEL(dev))
2138 		mlx4_warn(dev, "Failed to arm comm channel events\n");
2139 }
2140 
2141 static int sync_toggles(struct mlx4_dev *dev)
2142 {
2143 	struct mlx4_priv *priv = mlx4_priv(dev);
2144 	u32 wr_toggle;
2145 	u32 rd_toggle;
2146 	unsigned long end;
2147 
2148 	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2149 	if (wr_toggle == 0xffffffff)
2150 		end = jiffies + msecs_to_jiffies(30000);
2151 	else
2152 		end = jiffies + msecs_to_jiffies(5000);
2153 
2154 	while (time_before(jiffies, end)) {
2155 		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2156 		if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2157 			/* PCI might be offline */
2158 			msleep(100);
2159 			wr_toggle = swab32(readl(&priv->mfunc.comm->
2160 					   slave_write));
2161 			continue;
2162 		}
2163 
2164 		if (rd_toggle >> 31 == wr_toggle >> 31) {
2165 			priv->cmd.comm_toggle = rd_toggle >> 31;
2166 			return 0;
2167 		}
2168 
2169 		cond_resched();
2170 	}
2171 
2172 	/*
2173 	 * we could reach here if for example the previous VM using this
2174 	 * function misbehaved and left the channel with unsynced state. We
2175 	 * should fix this here and give this VM a chance to use a properly
2176 	 * synced channel
2177 	 */
2178 	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2179 	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2180 	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2181 	priv->cmd.comm_toggle = 0;
2182 
2183 	return 0;
2184 }
2185 
2186 int mlx4_multi_func_init(struct mlx4_dev *dev)
2187 {
2188 	struct mlx4_priv *priv = mlx4_priv(dev);
2189 	struct mlx4_slave_state *s_state;
2190 	int i, j, err, port;
2191 
2192 	if (mlx4_is_master(dev))
2193 		priv->mfunc.comm =
2194 		ioremap(pci_resource_start(dev->persist->pdev,
2195 					   priv->fw.comm_bar) +
2196 			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2197 	else
2198 		priv->mfunc.comm =
2199 		ioremap(pci_resource_start(dev->persist->pdev, 2) +
2200 			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2201 	if (!priv->mfunc.comm) {
2202 		mlx4_err(dev, "Couldn't map communication vector\n");
2203 		goto err_vhcr;
2204 	}
2205 
2206 	if (mlx4_is_master(dev)) {
2207 		priv->mfunc.master.slave_state =
2208 			kzalloc(dev->num_slaves *
2209 				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2210 		if (!priv->mfunc.master.slave_state)
2211 			goto err_comm;
2212 
2213 		priv->mfunc.master.vf_admin =
2214 			kzalloc(dev->num_slaves *
2215 				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2216 		if (!priv->mfunc.master.vf_admin)
2217 			goto err_comm_admin;
2218 
2219 		priv->mfunc.master.vf_oper =
2220 			kzalloc(dev->num_slaves *
2221 				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2222 		if (!priv->mfunc.master.vf_oper)
2223 			goto err_comm_oper;
2224 
2225 		for (i = 0; i < dev->num_slaves; ++i) {
2226 			s_state = &priv->mfunc.master.slave_state[i];
2227 			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2228 			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2229 				s_state->event_eq[j].eqn = -1;
2230 			__raw_writel((__force u32) 0,
2231 				     &priv->mfunc.comm[i].slave_write);
2232 			__raw_writel((__force u32) 0,
2233 				     &priv->mfunc.comm[i].slave_read);
2234 			mmiowb();
2235 			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2236 				s_state->vlan_filter[port] =
2237 					kzalloc(sizeof(struct mlx4_vlan_fltr),
2238 						GFP_KERNEL);
2239 				if (!s_state->vlan_filter[port]) {
2240 					if (--port)
2241 						kfree(s_state->vlan_filter[port]);
2242 					goto err_slaves;
2243 				}
2244 				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2245 				priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
2246 				priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
2247 				priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2248 				priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
2249 			}
2250 			spin_lock_init(&s_state->lock);
2251 		}
2252 
2253 		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2254 		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2255 		INIT_WORK(&priv->mfunc.master.comm_work,
2256 			  mlx4_master_comm_channel);
2257 		INIT_WORK(&priv->mfunc.master.slave_event_work,
2258 			  mlx4_gen_slave_eqe);
2259 		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2260 			  mlx4_master_handle_slave_flr);
2261 		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2262 		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2263 		priv->mfunc.master.comm_wq =
2264 			create_singlethread_workqueue("mlx4_comm");
2265 		if (!priv->mfunc.master.comm_wq)
2266 			goto err_slaves;
2267 
2268 		if (mlx4_init_resource_tracker(dev))
2269 			goto err_thread;
2270 
2271 	} else {
2272 		err = sync_toggles(dev);
2273 		if (err) {
2274 			mlx4_err(dev, "Couldn't sync toggles\n");
2275 			goto err_comm;
2276 		}
2277 	}
2278 	return 0;
2279 
2280 err_thread:
2281 	flush_workqueue(priv->mfunc.master.comm_wq);
2282 	destroy_workqueue(priv->mfunc.master.comm_wq);
2283 err_slaves:
2284 	while (--i) {
2285 		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2286 			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2287 	}
2288 	kfree(priv->mfunc.master.vf_oper);
2289 err_comm_oper:
2290 	kfree(priv->mfunc.master.vf_admin);
2291 err_comm_admin:
2292 	kfree(priv->mfunc.master.slave_state);
2293 err_comm:
2294 	iounmap(priv->mfunc.comm);
2295 err_vhcr:
2296 	dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2297 			  priv->mfunc.vhcr,
2298 			  priv->mfunc.vhcr_dma);
2299 	priv->mfunc.vhcr = NULL;
2300 	return -ENOMEM;
2301 }
2302 
2303 int mlx4_cmd_init(struct mlx4_dev *dev)
2304 {
2305 	struct mlx4_priv *priv = mlx4_priv(dev);
2306 	int flags = 0;
2307 
2308 	if (!priv->cmd.initialized) {
2309 		mutex_init(&priv->cmd.slave_cmd_mutex);
2310 		sema_init(&priv->cmd.poll_sem, 1);
2311 		priv->cmd.use_events = 0;
2312 		priv->cmd.toggle     = 1;
2313 		priv->cmd.initialized = 1;
2314 		flags |= MLX4_CMD_CLEANUP_STRUCT;
2315 	}
2316 
2317 	if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2318 		priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2319 					0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2320 		if (!priv->cmd.hcr) {
2321 			mlx4_err(dev, "Couldn't map command register\n");
2322 			goto err;
2323 		}
2324 		flags |= MLX4_CMD_CLEANUP_HCR;
2325 	}
2326 
2327 	if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2328 		priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2329 						      PAGE_SIZE,
2330 						      &priv->mfunc.vhcr_dma,
2331 						      GFP_KERNEL);
2332 		if (!priv->mfunc.vhcr)
2333 			goto err;
2334 
2335 		flags |= MLX4_CMD_CLEANUP_VHCR;
2336 	}
2337 
2338 	if (!priv->cmd.pool) {
2339 		priv->cmd.pool = pci_pool_create("mlx4_cmd",
2340 						 dev->persist->pdev,
2341 						 MLX4_MAILBOX_SIZE,
2342 						 MLX4_MAILBOX_SIZE, 0);
2343 		if (!priv->cmd.pool)
2344 			goto err;
2345 
2346 		flags |= MLX4_CMD_CLEANUP_POOL;
2347 	}
2348 
2349 	return 0;
2350 
2351 err:
2352 	mlx4_cmd_cleanup(dev, flags);
2353 	return -ENOMEM;
2354 }
2355 
2356 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2357 {
2358 	struct mlx4_priv *priv = mlx4_priv(dev);
2359 	int slave;
2360 	u32 slave_read;
2361 
2362 	/* Report an internal error event to all
2363 	 * communication channels.
2364 	 */
2365 	for (slave = 0; slave < dev->num_slaves; slave++) {
2366 		slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2367 		slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2368 		__raw_writel((__force u32)cpu_to_be32(slave_read),
2369 			     &priv->mfunc.comm[slave].slave_read);
2370 		/* Make sure that our comm channel write doesn't
2371 		 * get mixed in with writes from another CPU.
2372 		 */
2373 		mmiowb();
2374 	}
2375 }
2376 
2377 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2378 {
2379 	struct mlx4_priv *priv = mlx4_priv(dev);
2380 	int i, port;
2381 
2382 	if (mlx4_is_master(dev)) {
2383 		flush_workqueue(priv->mfunc.master.comm_wq);
2384 		destroy_workqueue(priv->mfunc.master.comm_wq);
2385 		for (i = 0; i < dev->num_slaves; i++) {
2386 			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2387 				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2388 		}
2389 		kfree(priv->mfunc.master.slave_state);
2390 		kfree(priv->mfunc.master.vf_admin);
2391 		kfree(priv->mfunc.master.vf_oper);
2392 		dev->num_slaves = 0;
2393 	}
2394 
2395 	iounmap(priv->mfunc.comm);
2396 }
2397 
2398 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2399 {
2400 	struct mlx4_priv *priv = mlx4_priv(dev);
2401 
2402 	if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2403 		pci_pool_destroy(priv->cmd.pool);
2404 		priv->cmd.pool = NULL;
2405 	}
2406 
2407 	if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2408 	    (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2409 		iounmap(priv->cmd.hcr);
2410 		priv->cmd.hcr = NULL;
2411 	}
2412 	if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2413 	    (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2414 		dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2415 				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2416 		priv->mfunc.vhcr = NULL;
2417 	}
2418 	if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2419 		priv->cmd.initialized = 0;
2420 }
2421 
2422 /*
2423  * Switch to using events to issue FW commands (can only be called
2424  * after event queue for command events has been initialized).
2425  */
2426 int mlx4_cmd_use_events(struct mlx4_dev *dev)
2427 {
2428 	struct mlx4_priv *priv = mlx4_priv(dev);
2429 	int i;
2430 	int err = 0;
2431 
2432 	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2433 				   sizeof (struct mlx4_cmd_context),
2434 				   GFP_KERNEL);
2435 	if (!priv->cmd.context)
2436 		return -ENOMEM;
2437 
2438 	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2439 		priv->cmd.context[i].token = i;
2440 		priv->cmd.context[i].next  = i + 1;
2441 		/* To support fatal error flow, initialize all
2442 		 * cmd contexts to allow simulating completions
2443 		 * with complete() at any time.
2444 		 */
2445 		init_completion(&priv->cmd.context[i].done);
2446 	}
2447 
2448 	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2449 	priv->cmd.free_head = 0;
2450 
2451 	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2452 	spin_lock_init(&priv->cmd.context_lock);
2453 
2454 	for (priv->cmd.token_mask = 1;
2455 	     priv->cmd.token_mask < priv->cmd.max_cmds;
2456 	     priv->cmd.token_mask <<= 1)
2457 		; /* nothing */
2458 	--priv->cmd.token_mask;
2459 
2460 	down(&priv->cmd.poll_sem);
2461 	priv->cmd.use_events = 1;
2462 
2463 	return err;
2464 }
2465 
2466 /*
2467  * Switch back to polling (used when shutting down the device)
2468  */
2469 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2470 {
2471 	struct mlx4_priv *priv = mlx4_priv(dev);
2472 	int i;
2473 
2474 	priv->cmd.use_events = 0;
2475 
2476 	for (i = 0; i < priv->cmd.max_cmds; ++i)
2477 		down(&priv->cmd.event_sem);
2478 
2479 	kfree(priv->cmd.context);
2480 
2481 	up(&priv->cmd.poll_sem);
2482 }
2483 
2484 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2485 {
2486 	struct mlx4_cmd_mailbox *mailbox;
2487 
2488 	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2489 	if (!mailbox)
2490 		return ERR_PTR(-ENOMEM);
2491 
2492 	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2493 				      &mailbox->dma);
2494 	if (!mailbox->buf) {
2495 		kfree(mailbox);
2496 		return ERR_PTR(-ENOMEM);
2497 	}
2498 
2499 	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2500 
2501 	return mailbox;
2502 }
2503 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2504 
2505 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2506 			   struct mlx4_cmd_mailbox *mailbox)
2507 {
2508 	if (!mailbox)
2509 		return;
2510 
2511 	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2512 	kfree(mailbox);
2513 }
2514 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2515 
2516 u32 mlx4_comm_get_version(void)
2517 {
2518 	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2519 }
2520 
2521 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2522 {
2523 	if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2524 		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2525 			 vf, dev->persist->num_vfs);
2526 		return -EINVAL;
2527 	}
2528 
2529 	return vf+1;
2530 }
2531 
2532 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2533 {
2534 	if (slave < 1 || slave > dev->persist->num_vfs) {
2535 		mlx4_err(dev,
2536 			 "Bad slave number:%d (number of activated slaves: %lu)\n",
2537 			 slave, dev->num_slaves);
2538 		return -EINVAL;
2539 	}
2540 	return slave - 1;
2541 }
2542 
2543 void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2544 {
2545 	struct mlx4_priv *priv = mlx4_priv(dev);
2546 	struct mlx4_cmd_context *context;
2547 	int i;
2548 
2549 	spin_lock(&priv->cmd.context_lock);
2550 	if (priv->cmd.context) {
2551 		for (i = 0; i < priv->cmd.max_cmds; ++i) {
2552 			context = &priv->cmd.context[i];
2553 			context->fw_status = CMD_STAT_INTERNAL_ERR;
2554 			context->result    =
2555 				mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2556 			complete(&context->done);
2557 		}
2558 	}
2559 	spin_unlock(&priv->cmd.context_lock);
2560 }
2561 
2562 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2563 {
2564 	struct mlx4_active_ports actv_ports;
2565 	int vf;
2566 
2567 	bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2568 
2569 	if (slave == 0) {
2570 		bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2571 		return actv_ports;
2572 	}
2573 
2574 	vf = mlx4_get_vf_indx(dev, slave);
2575 	if (vf < 0)
2576 		return actv_ports;
2577 
2578 	bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2579 		   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2580 		   dev->caps.num_ports));
2581 
2582 	return actv_ports;
2583 }
2584 EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2585 
2586 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2587 {
2588 	unsigned n;
2589 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2590 	unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2591 
2592 	if (port <= 0 || port > m)
2593 		return -EINVAL;
2594 
2595 	n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2596 	if (port <= n)
2597 		port = n + 1;
2598 
2599 	return port;
2600 }
2601 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2602 
2603 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2604 {
2605 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2606 	if (test_bit(port - 1, actv_ports.ports))
2607 		return port -
2608 			find_first_bit(actv_ports.ports, dev->caps.num_ports);
2609 
2610 	return -1;
2611 }
2612 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2613 
2614 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2615 						   int port)
2616 {
2617 	unsigned i;
2618 	struct mlx4_slaves_pport slaves_pport;
2619 
2620 	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2621 
2622 	if (port <= 0 || port > dev->caps.num_ports)
2623 		return slaves_pport;
2624 
2625 	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2626 		struct mlx4_active_ports actv_ports =
2627 			mlx4_get_active_ports(dev, i);
2628 		if (test_bit(port - 1, actv_ports.ports))
2629 			set_bit(i, slaves_pport.slaves);
2630 	}
2631 
2632 	return slaves_pport;
2633 }
2634 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2635 
2636 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2637 		struct mlx4_dev *dev,
2638 		const struct mlx4_active_ports *crit_ports)
2639 {
2640 	unsigned i;
2641 	struct mlx4_slaves_pport slaves_pport;
2642 
2643 	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2644 
2645 	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2646 		struct mlx4_active_ports actv_ports =
2647 			mlx4_get_active_ports(dev, i);
2648 		if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2649 				 dev->caps.num_ports))
2650 			set_bit(i, slaves_pport.slaves);
2651 	}
2652 
2653 	return slaves_pport;
2654 }
2655 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2656 
2657 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2658 {
2659 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2660 	int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2661 			+ 1;
2662 	int max_port = min_port +
2663 		bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2664 
2665 	if (port < min_port)
2666 		port = min_port;
2667 	else if (port >= max_port)
2668 		port = max_port - 1;
2669 
2670 	return port;
2671 }
2672 
2673 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2674 {
2675 	struct mlx4_priv *priv = mlx4_priv(dev);
2676 	struct mlx4_vport_state *s_info;
2677 	int slave;
2678 
2679 	if (!mlx4_is_master(dev))
2680 		return -EPROTONOSUPPORT;
2681 
2682 	slave = mlx4_get_slave_indx(dev, vf);
2683 	if (slave < 0)
2684 		return -EINVAL;
2685 
2686 	port = mlx4_slaves_closest_port(dev, slave, port);
2687 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2688 	s_info->mac = mac;
2689 	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2690 		  vf, port, s_info->mac);
2691 	return 0;
2692 }
2693 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2694 
2695 
2696 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2697 {
2698 	struct mlx4_priv *priv = mlx4_priv(dev);
2699 	struct mlx4_vport_state *vf_admin;
2700 	int slave;
2701 
2702 	if ((!mlx4_is_master(dev)) ||
2703 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2704 		return -EPROTONOSUPPORT;
2705 
2706 	if ((vlan > 4095) || (qos > 7))
2707 		return -EINVAL;
2708 
2709 	slave = mlx4_get_slave_indx(dev, vf);
2710 	if (slave < 0)
2711 		return -EINVAL;
2712 
2713 	port = mlx4_slaves_closest_port(dev, slave, port);
2714 	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2715 
2716 	if ((0 == vlan) && (0 == qos))
2717 		vf_admin->default_vlan = MLX4_VGT;
2718 	else
2719 		vf_admin->default_vlan = vlan;
2720 	vf_admin->default_qos = qos;
2721 
2722 	if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2723 		mlx4_info(dev,
2724 			  "updating vf %d port %d config will take effect on next VF restart\n",
2725 			  vf, port);
2726 	return 0;
2727 }
2728 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2729 
2730  /* mlx4_get_slave_default_vlan -
2731  * return true if VST ( default vlan)
2732  * if VST, will return vlan & qos (if not NULL)
2733  */
2734 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2735 				 u16 *vlan, u8 *qos)
2736 {
2737 	struct mlx4_vport_oper_state *vp_oper;
2738 	struct mlx4_priv *priv;
2739 
2740 	priv = mlx4_priv(dev);
2741 	port = mlx4_slaves_closest_port(dev, slave, port);
2742 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2743 
2744 	if (MLX4_VGT != vp_oper->state.default_vlan) {
2745 		if (vlan)
2746 			*vlan = vp_oper->state.default_vlan;
2747 		if (qos)
2748 			*qos = vp_oper->state.default_qos;
2749 		return true;
2750 	}
2751 	return false;
2752 }
2753 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2754 
2755 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2756 {
2757 	struct mlx4_priv *priv = mlx4_priv(dev);
2758 	struct mlx4_vport_state *s_info;
2759 	int slave;
2760 
2761 	if ((!mlx4_is_master(dev)) ||
2762 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2763 		return -EPROTONOSUPPORT;
2764 
2765 	slave = mlx4_get_slave_indx(dev, vf);
2766 	if (slave < 0)
2767 		return -EINVAL;
2768 
2769 	port = mlx4_slaves_closest_port(dev, slave, port);
2770 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2771 	s_info->spoofchk = setting;
2772 
2773 	return 0;
2774 }
2775 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2776 
2777 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2778 {
2779 	struct mlx4_priv *priv = mlx4_priv(dev);
2780 	struct mlx4_vport_state *s_info;
2781 	int slave;
2782 
2783 	if (!mlx4_is_master(dev))
2784 		return -EPROTONOSUPPORT;
2785 
2786 	slave = mlx4_get_slave_indx(dev, vf);
2787 	if (slave < 0)
2788 		return -EINVAL;
2789 
2790 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2791 	ivf->vf = vf;
2792 
2793 	/* need to convert it to a func */
2794 	ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2795 	ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2796 	ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2797 	ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2798 	ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2799 	ivf->mac[5] = ((s_info->mac)  & 0xff);
2800 
2801 	ivf->vlan		= s_info->default_vlan;
2802 	ivf->qos		= s_info->default_qos;
2803 	ivf->max_tx_rate	= s_info->tx_rate;
2804 	ivf->min_tx_rate	= 0;
2805 	ivf->spoofchk		= s_info->spoofchk;
2806 	ivf->linkstate		= s_info->link_state;
2807 
2808 	return 0;
2809 }
2810 EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
2811 
2812 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2813 {
2814 	struct mlx4_priv *priv = mlx4_priv(dev);
2815 	struct mlx4_vport_state *s_info;
2816 	int slave;
2817 	u8 link_stat_event;
2818 
2819 	slave = mlx4_get_slave_indx(dev, vf);
2820 	if (slave < 0)
2821 		return -EINVAL;
2822 
2823 	port = mlx4_slaves_closest_port(dev, slave, port);
2824 	switch (link_state) {
2825 	case IFLA_VF_LINK_STATE_AUTO:
2826 		/* get current link state */
2827 		if (!priv->sense.do_sense_port[port])
2828 			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2829 		else
2830 			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2831 	    break;
2832 
2833 	case IFLA_VF_LINK_STATE_ENABLE:
2834 		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2835 	    break;
2836 
2837 	case IFLA_VF_LINK_STATE_DISABLE:
2838 		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2839 	    break;
2840 
2841 	default:
2842 		mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2843 			  link_state, slave, port);
2844 		return -EINVAL;
2845 	};
2846 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2847 	s_info->link_state = link_state;
2848 
2849 	/* send event */
2850 	mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
2851 
2852 	if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2853 		mlx4_dbg(dev,
2854 			 "updating vf %d port %d no link state HW enforcment\n",
2855 			 vf, port);
2856 	return 0;
2857 }
2858 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
2859 
2860 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
2861 {
2862 	struct mlx4_priv *priv = mlx4_priv(dev);
2863 
2864 	if (slave < 1 || slave >= dev->num_slaves ||
2865 	    port < 1 || port > MLX4_MAX_PORTS)
2866 		return 0;
2867 
2868 	return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
2869 		MLX4_VF_SMI_ENABLED;
2870 }
2871 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
2872 
2873 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
2874 {
2875 	struct mlx4_priv *priv = mlx4_priv(dev);
2876 
2877 	if (slave == mlx4_master_func_num(dev))
2878 		return 1;
2879 
2880 	if (slave < 1 || slave >= dev->num_slaves ||
2881 	    port < 1 || port > MLX4_MAX_PORTS)
2882 		return 0;
2883 
2884 	return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
2885 		MLX4_VF_SMI_ENABLED;
2886 }
2887 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
2888 
2889 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
2890 				 int enabled)
2891 {
2892 	struct mlx4_priv *priv = mlx4_priv(dev);
2893 
2894 	if (slave == mlx4_master_func_num(dev))
2895 		return 0;
2896 
2897 	if (slave < 1 || slave >= dev->num_slaves ||
2898 	    port < 1 || port > MLX4_MAX_PORTS ||
2899 	    enabled < 0 || enabled > 1)
2900 		return -EINVAL;
2901 
2902 	priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
2903 	return 0;
2904 }
2905 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
2906