Lines Matching +full:slave +full:- +full:dev

16  *      - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
136 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
142 [CMD_STAT_INTERNAL_ERR] = -EIO, in mlx4_status_to_errno()
143 [CMD_STAT_BAD_OP] = -EPERM, in mlx4_status_to_errno()
144 [CMD_STAT_BAD_PARAM] = -EINVAL, in mlx4_status_to_errno()
145 [CMD_STAT_BAD_SYS_STATE] = -ENXIO, in mlx4_status_to_errno()
146 [CMD_STAT_BAD_RESOURCE] = -EBADF, in mlx4_status_to_errno()
147 [CMD_STAT_RESOURCE_BUSY] = -EBUSY, in mlx4_status_to_errno()
148 [CMD_STAT_EXCEED_LIM] = -ENOMEM, in mlx4_status_to_errno()
149 [CMD_STAT_BAD_RES_STATE] = -EBADF, in mlx4_status_to_errno()
150 [CMD_STAT_BAD_INDEX] = -EBADF, in mlx4_status_to_errno()
151 [CMD_STAT_BAD_NVMEM] = -EFAULT, in mlx4_status_to_errno()
152 [CMD_STAT_ICM_ERROR] = -ENFILE, in mlx4_status_to_errno()
153 [CMD_STAT_BAD_QP_STATE] = -EINVAL, in mlx4_status_to_errno()
154 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, in mlx4_status_to_errno()
155 [CMD_STAT_REG_BOUND] = -EBUSY, in mlx4_status_to_errno()
156 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, in mlx4_status_to_errno()
157 [CMD_STAT_BAD_PKT] = -EINVAL, in mlx4_status_to_errno()
158 [CMD_STAT_BAD_SIZE] = -ENOMEM, in mlx4_status_to_errno()
159 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, in mlx4_status_to_errno()
164 return -EIO; in mlx4_status_to_errno()
172 case -EPERM: in mlx4_errno_to_status()
174 case -EINVAL: in mlx4_errno_to_status()
176 case -ENXIO: in mlx4_errno_to_status()
178 case -EBUSY: in mlx4_errno_to_status()
180 case -ENOMEM: in mlx4_errno_to_status()
182 case -ENFILE: in mlx4_errno_to_status()
189 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op, in mlx4_internal_err_ret_value() argument
241 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier, in mlx4_cmd_reset_flow() argument
248 mlx4_enter_error_state(dev->persist); in mlx4_cmd_reset_flow()
249 err = mlx4_internal_err_ret_value(dev, op, op_modifier); in mlx4_cmd_reset_flow()
255 static int comm_pending(struct mlx4_dev *dev) in comm_pending() argument
257 struct mlx4_priv *priv = mlx4_priv(dev); in comm_pending()
258 u32 status = readl(&priv->mfunc.comm->slave_read); in comm_pending()
260 return (swab32(status) >> 31) != priv->cmd.comm_toggle; in comm_pending()
263 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) in mlx4_comm_cmd_post() argument
265 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_comm_cmd_post()
273 mutex_lock(&dev->persist->device_state_mutex); in mlx4_comm_cmd_post()
275 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { in mlx4_comm_cmd_post()
276 mutex_unlock(&dev->persist->device_state_mutex); in mlx4_comm_cmd_post()
277 return -EIO; in mlx4_comm_cmd_post()
280 priv->cmd.comm_toggle ^= 1; in mlx4_comm_cmd_post()
281 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); in mlx4_comm_cmd_post()
283 &priv->mfunc.comm->slave_write); in mlx4_comm_cmd_post()
284 mutex_unlock(&dev->persist->device_state_mutex); in mlx4_comm_cmd_post()
288 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, in mlx4_comm_cmd_poll() argument
291 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_comm_cmd_poll()
297 if (comm_pending(dev)) { in mlx4_comm_cmd_poll()
298 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n", in mlx4_comm_cmd_poll()
299 priv->cmd.comm_toggle, cmd); in mlx4_comm_cmd_poll()
300 return -EAGAIN; in mlx4_comm_cmd_poll()
304 down(&priv->cmd.poll_sem); in mlx4_comm_cmd_poll()
305 if (mlx4_comm_cmd_post(dev, cmd, param)) { in mlx4_comm_cmd_poll()
314 while (comm_pending(dev) && time_before(jiffies, end)) in mlx4_comm_cmd_poll()
316 ret_from_pending = comm_pending(dev); in mlx4_comm_cmd_poll()
318 /* check if the slave is trying to boot in the middle of in mlx4_comm_cmd_poll()
319 * FLR process. The only non-zero result in the RESET command in mlx4_comm_cmd_poll()
325 mlx4_warn(dev, "Communication channel command 0x%x timed out\n", in mlx4_comm_cmd_poll()
332 mlx4_enter_error_state(dev->persist); in mlx4_comm_cmd_poll()
334 up(&priv->cmd.poll_sem); in mlx4_comm_cmd_poll()
338 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd, in mlx4_comm_cmd_wait() argument
341 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; in mlx4_comm_cmd_wait()
346 down(&cmd->event_sem); in mlx4_comm_cmd_wait()
348 spin_lock(&cmd->context_lock); in mlx4_comm_cmd_wait()
349 BUG_ON(cmd->free_head < 0); in mlx4_comm_cmd_wait()
350 context = &cmd->context[cmd->free_head]; in mlx4_comm_cmd_wait()
351 context->token += cmd->token_mask + 1; in mlx4_comm_cmd_wait()
352 cmd->free_head = context->next; in mlx4_comm_cmd_wait()
353 spin_unlock(&cmd->context_lock); in mlx4_comm_cmd_wait()
355 reinit_completion(&context->done); in mlx4_comm_cmd_wait()
357 if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) { in mlx4_comm_cmd_wait()
365 if (!wait_for_completion_timeout(&context->done, in mlx4_comm_cmd_wait()
367 mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n", in mlx4_comm_cmd_wait()
372 err = context->result; in mlx4_comm_cmd_wait()
373 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { in mlx4_comm_cmd_wait()
374 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", in mlx4_comm_cmd_wait()
375 vhcr_cmd, context->fw_status); in mlx4_comm_cmd_wait()
376 if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) in mlx4_comm_cmd_wait()
387 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { in mlx4_comm_cmd_wait()
389 while (comm_pending(dev) && time_before(jiffies, end)) in mlx4_comm_cmd_wait()
396 mlx4_enter_error_state(dev->persist); in mlx4_comm_cmd_wait()
398 spin_lock(&cmd->context_lock); in mlx4_comm_cmd_wait()
399 context->next = cmd->free_head; in mlx4_comm_cmd_wait()
400 cmd->free_head = context - cmd->context; in mlx4_comm_cmd_wait()
401 spin_unlock(&cmd->context_lock); in mlx4_comm_cmd_wait()
403 up(&cmd->event_sem); in mlx4_comm_cmd_wait()
407 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, in mlx4_comm_cmd() argument
410 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) in mlx4_comm_cmd()
413 if (mlx4_priv(dev)->cmd.use_events) in mlx4_comm_cmd()
414 return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout); in mlx4_comm_cmd()
415 return mlx4_comm_cmd_poll(dev, cmd, param, timeout); in mlx4_comm_cmd()
418 static int cmd_pending(struct mlx4_dev *dev) in cmd_pending() argument
422 if (pci_channel_offline(dev->persist->pdev)) in cmd_pending()
423 return -EIO; in cmd_pending()
425 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); in cmd_pending()
428 (mlx4_priv(dev)->cmd.toggle == in cmd_pending()
432 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, in mlx4_cmd_post() argument
436 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; in mlx4_cmd_post()
437 u32 __iomem *hcr = cmd->hcr; in mlx4_cmd_post()
438 int ret = -EIO; in mlx4_cmd_post()
441 mutex_lock(&dev->persist->device_state_mutex); in mlx4_cmd_post()
447 if (pci_channel_offline(dev->persist->pdev) || in mlx4_cmd_post()
448 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { in mlx4_cmd_post()
460 while (cmd_pending(dev)) { in mlx4_cmd_post()
461 if (pci_channel_offline(dev->persist->pdev)) { in mlx4_cmd_post()
470 mlx4_err(dev, "%s:cmd_pending failed\n", __func__); in mlx4_cmd_post()
493 (cmd->toggle << HCR_T_BIT) | in mlx4_cmd_post()
498 cmd->toggle = cmd->toggle ^ 1; in mlx4_cmd_post()
504 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n", in mlx4_cmd_post()
506 mutex_unlock(&dev->persist->device_state_mutex); in mlx4_cmd_post()
511 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, in mlx4_slave_cmd() argument
515 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_slave_cmd()
516 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; in mlx4_slave_cmd()
519 mutex_lock(&priv->cmd.slave_cmd_mutex); in mlx4_slave_cmd()
521 vhcr->in_param = cpu_to_be64(in_param); in mlx4_slave_cmd()
522 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; in mlx4_slave_cmd()
523 vhcr->in_modifier = cpu_to_be32(in_modifier); in mlx4_slave_cmd()
524 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff)); in mlx4_slave_cmd()
525 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); in mlx4_slave_cmd()
526 vhcr->status = 0; in mlx4_slave_cmd()
527 vhcr->flags = !!(priv->cmd.use_events) << 6; in mlx4_slave_cmd()
529 if (mlx4_is_master(dev)) { in mlx4_slave_cmd()
530 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); in mlx4_slave_cmd()
535 be64_to_cpu(vhcr->out_param); in mlx4_slave_cmd()
537 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", in mlx4_slave_cmd()
539 vhcr->status = CMD_STAT_BAD_PARAM; in mlx4_slave_cmd()
542 ret = mlx4_status_to_errno(vhcr->status); in mlx4_slave_cmd()
545 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) in mlx4_slave_cmd()
546 ret = mlx4_internal_err_ret_value(dev, op, op_modifier); in mlx4_slave_cmd()
548 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op, in mlx4_slave_cmd()
554 be64_to_cpu(vhcr->out_param); in mlx4_slave_cmd()
556 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", in mlx4_slave_cmd()
558 vhcr->status = CMD_STAT_BAD_PARAM; in mlx4_slave_cmd()
561 ret = mlx4_status_to_errno(vhcr->status); in mlx4_slave_cmd()
563 if (dev->persist->state & in mlx4_slave_cmd()
565 ret = mlx4_internal_err_ret_value(dev, op, in mlx4_slave_cmd()
568 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op); in mlx4_slave_cmd()
572 mutex_unlock(&priv->cmd.slave_cmd_mutex); in mlx4_slave_cmd()
576 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, in mlx4_cmd_poll() argument
580 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cmd_poll()
581 void __iomem *hcr = priv->cmd.hcr; in mlx4_cmd_poll()
586 down(&priv->cmd.poll_sem); in mlx4_cmd_poll()
588 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { in mlx4_cmd_poll()
593 err = mlx4_internal_err_ret_value(dev, op, op_modifier); in mlx4_cmd_poll()
598 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", in mlx4_cmd_poll()
600 err = -EINVAL; in mlx4_cmd_poll()
604 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, in mlx4_cmd_poll()
610 while (cmd_pending(dev) && time_before(jiffies, end)) { in mlx4_cmd_poll()
611 if (pci_channel_offline(dev->persist->pdev)) { in mlx4_cmd_poll()
616 err = -EIO; in mlx4_cmd_poll()
620 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { in mlx4_cmd_poll()
621 err = mlx4_internal_err_ret_value(dev, op, op_modifier); in mlx4_cmd_poll()
628 if (cmd_pending(dev)) { in mlx4_cmd_poll()
629 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", in mlx4_cmd_poll()
631 err = -EIO; in mlx4_cmd_poll()
645 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", in mlx4_cmd_poll()
654 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); in mlx4_cmd_poll()
656 up(&priv->cmd.poll_sem); in mlx4_cmd_poll()
660 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) in mlx4_cmd_event() argument
662 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cmd_event()
664 &priv->cmd.context[token & priv->cmd.token_mask]; in mlx4_cmd_event()
667 if (token != context->token) in mlx4_cmd_event()
670 context->fw_status = status; in mlx4_cmd_event()
671 context->result = mlx4_status_to_errno(status); in mlx4_cmd_event()
672 context->out_param = out_param; in mlx4_cmd_event()
674 complete(&context->done); in mlx4_cmd_event()
677 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, in mlx4_cmd_wait() argument
681 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; in mlx4_cmd_wait()
686 down(&cmd->event_sem); in mlx4_cmd_wait()
688 spin_lock(&cmd->context_lock); in mlx4_cmd_wait()
689 BUG_ON(cmd->free_head < 0); in mlx4_cmd_wait()
690 context = &cmd->context[cmd->free_head]; in mlx4_cmd_wait()
691 context->token += cmd->token_mask + 1; in mlx4_cmd_wait()
692 cmd->free_head = context->next; in mlx4_cmd_wait()
693 spin_unlock(&cmd->context_lock); in mlx4_cmd_wait()
696 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", in mlx4_cmd_wait()
698 err = -EINVAL; in mlx4_cmd_wait()
702 reinit_completion(&context->done); in mlx4_cmd_wait()
704 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, in mlx4_cmd_wait()
705 in_modifier, op_modifier, op, context->token, 1); in mlx4_cmd_wait()
711 wait_for_completion_interruptible_timeout(&context->done, in mlx4_cmd_wait()
714 context->fw_status = 0; in mlx4_cmd_wait()
715 context->out_param = 0; in mlx4_cmd_wait()
716 context->result = 0; in mlx4_cmd_wait()
719 ret_wait = (long)wait_for_completion_timeout(&context->done, in mlx4_cmd_wait()
723 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", in mlx4_cmd_wait()
726 err = -EBUSY; in mlx4_cmd_wait()
729 err = -EIO; in mlx4_cmd_wait()
734 err = context->result; in mlx4_cmd_wait()
739 * specific command/input_mod/opcode_mod/fw-status to be debug. in mlx4_cmd_wait()
744 context->fw_status == CMD_STAT_BAD_SIZE) in mlx4_cmd_wait()
745 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", in mlx4_cmd_wait()
746 op, context->fw_status); in mlx4_cmd_wait()
748 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", in mlx4_cmd_wait()
749 op, context->fw_status); in mlx4_cmd_wait()
750 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) in mlx4_cmd_wait()
751 err = mlx4_internal_err_ret_value(dev, op, op_modifier); in mlx4_cmd_wait()
752 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) in mlx4_cmd_wait()
759 *out_param = context->out_param; in mlx4_cmd_wait()
763 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); in mlx4_cmd_wait()
765 spin_lock(&cmd->context_lock); in mlx4_cmd_wait()
766 context->next = cmd->free_head; in mlx4_cmd_wait()
767 cmd->free_head = context - cmd->context; in mlx4_cmd_wait()
768 spin_unlock(&cmd->context_lock); in mlx4_cmd_wait()
770 up(&cmd->event_sem); in mlx4_cmd_wait()
774 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, in __mlx4_cmd() argument
778 if (pci_channel_offline(dev->persist->pdev)) in __mlx4_cmd()
779 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO); in __mlx4_cmd()
781 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { in __mlx4_cmd()
784 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) in __mlx4_cmd()
785 return mlx4_internal_err_ret_value(dev, op, in __mlx4_cmd()
787 down_read(&mlx4_priv(dev)->cmd.switch_sem); in __mlx4_cmd()
788 if (mlx4_priv(dev)->cmd.use_events) in __mlx4_cmd()
789 ret = mlx4_cmd_wait(dev, in_param, out_param, in __mlx4_cmd()
793 ret = mlx4_cmd_poll(dev, in_param, out_param, in __mlx4_cmd()
797 up_read(&mlx4_priv(dev)->cmd.switch_sem); in __mlx4_cmd()
800 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, in __mlx4_cmd()
806 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) in mlx4_ARM_COMM_CHANNEL() argument
808 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, in mlx4_ARM_COMM_CHANNEL()
812 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, in mlx4_ACCESS_MEM() argument
813 int slave, u64 slave_addr, in mlx4_ACCESS_MEM() argument
820 (slave & ~0x7f) | (size & 0xff)) { in mlx4_ACCESS_MEM()
821 …mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n", in mlx4_ACCESS_MEM()
822 slave_addr, master_addr, slave, size); in mlx4_ACCESS_MEM()
823 return -EINVAL; in mlx4_ACCESS_MEM()
827 in_param = (u64) slave | slave_addr; in mlx4_ACCESS_MEM()
828 out_param = (u64) dev->caps.function | master_addr; in mlx4_ACCESS_MEM()
830 in_param = (u64) dev->caps.function | master_addr; in mlx4_ACCESS_MEM()
831 out_param = (u64) slave | slave_addr; in mlx4_ACCESS_MEM()
834 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0, in mlx4_ACCESS_MEM()
839 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey, in query_pkey_block() argument
843 struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf); in query_pkey_block()
844 struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf); in query_pkey_block()
849 return -EINVAL; in query_pkey_block()
851 in_mad->attr_mod = cpu_to_be32(index / 32); in query_pkey_block()
853 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, in query_pkey_block()
860 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]); in query_pkey_block()
865 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table, in get_full_pkey_table() argument
872 for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) { in get_full_pkey_table()
873 err = query_pkey_block(dev, port, i, table + i, inbox, outbox); in get_full_pkey_table()
883 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf) in vf_port_state() argument
885 if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP) in vf_port_state()
891 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, in mlx4_MAD_IFC_wrapper() argument
897 struct ib_smp *smp = inbox->buf; in mlx4_MAD_IFC_wrapper()
905 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_MAD_IFC_wrapper()
906 struct ib_smp *outsmp = outbox->buf; in mlx4_MAD_IFC_wrapper()
907 __be16 *outtab = (__be16 *)(outsmp->data); in mlx4_MAD_IFC_wrapper()
911 slave_port = vhcr->in_modifier; in mlx4_MAD_IFC_wrapper()
912 port = mlx4_slave_convert_port(dev, slave, slave_port); in mlx4_MAD_IFC_wrapper()
914 /* network-view bit is for driver use only, and should not be passed to FW */ in mlx4_MAD_IFC_wrapper()
915 opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */ in mlx4_MAD_IFC_wrapper()
916 network_view = !!(vhcr->op_modifier & 0x8); in mlx4_MAD_IFC_wrapper()
918 if (smp->base_version == 1 && in mlx4_MAD_IFC_wrapper()
919 smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && in mlx4_MAD_IFC_wrapper()
920 smp->class_version == 1) { in mlx4_MAD_IFC_wrapper()
922 if (!network_view && smp->method == IB_MGMT_METHOD_GET) { in mlx4_MAD_IFC_wrapper()
923 if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) { in mlx4_MAD_IFC_wrapper()
924 index = be32_to_cpu(smp->attr_mod); in mlx4_MAD_IFC_wrapper()
925 if (port < 1 || port > dev->caps.num_ports) in mlx4_MAD_IFC_wrapper()
926 return -EINVAL; in mlx4_MAD_IFC_wrapper()
927 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, in mlx4_MAD_IFC_wrapper()
931 return -ENOMEM; in mlx4_MAD_IFC_wrapper()
935 err = get_full_pkey_table(dev, port, table, inbox, outbox); in mlx4_MAD_IFC_wrapper()
938 pidx = priv->virt2phys_pkey[slave][port - 1][vidx]; in mlx4_MAD_IFC_wrapper()
945 if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) { in mlx4_MAD_IFC_wrapper()
946 /*get the slave specific caps:*/ in mlx4_MAD_IFC_wrapper()
948 smp->attr_mod = cpu_to_be32(port); in mlx4_MAD_IFC_wrapper()
949 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, in mlx4_MAD_IFC_wrapper()
951 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); in mlx4_MAD_IFC_wrapper()
953 if (!err && slave != mlx4_master_func_num(dev)) { in mlx4_MAD_IFC_wrapper()
954 u8 *state = outsmp->data + PORT_STATE_OFFSET; in mlx4_MAD_IFC_wrapper()
956 *state = (*state & 0xf0) | vf_port_state(dev, port, slave); in mlx4_MAD_IFC_wrapper()
957 slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; in mlx4_MAD_IFC_wrapper()
958 memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4); in mlx4_MAD_IFC_wrapper()
962 if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) { in mlx4_MAD_IFC_wrapper()
963 __be64 guid = mlx4_get_admin_guid(dev, slave, in mlx4_MAD_IFC_wrapper()
969 if (slave == 0 && guid == 0) { in mlx4_MAD_IFC_wrapper()
970 smp->attr_mod = 0; in mlx4_MAD_IFC_wrapper()
971 err = mlx4_cmd_box(dev, in mlx4_MAD_IFC_wrapper()
972 inbox->dma, in mlx4_MAD_IFC_wrapper()
973 outbox->dma, in mlx4_MAD_IFC_wrapper()
974 vhcr->in_modifier, in mlx4_MAD_IFC_wrapper()
976 vhcr->op, in mlx4_MAD_IFC_wrapper()
981 mlx4_set_admin_guid(dev, in mlx4_MAD_IFC_wrapper()
982 *(__be64 *)outsmp-> in mlx4_MAD_IFC_wrapper()
983 data, slave, port); in mlx4_MAD_IFC_wrapper()
985 memcpy(outsmp->data, &guid, 8); in mlx4_MAD_IFC_wrapper()
989 memset(outsmp->data + 8, 0, 56); in mlx4_MAD_IFC_wrapper()
992 if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { in mlx4_MAD_IFC_wrapper()
993 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, in mlx4_MAD_IFC_wrapper()
995 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); in mlx4_MAD_IFC_wrapper()
997 slave_node_guid = mlx4_get_slave_node_guid(dev, slave); in mlx4_MAD_IFC_wrapper()
998 memcpy(outsmp->data + 12, &slave_node_guid, 8); in mlx4_MAD_IFC_wrapper()
1005 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs. in mlx4_MAD_IFC_wrapper()
1008 if (slave != mlx4_master_func_num(dev) && in mlx4_MAD_IFC_wrapper()
1009 !mlx4_vf_smi_enabled(dev, slave, port)) { in mlx4_MAD_IFC_wrapper()
1010 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && in mlx4_MAD_IFC_wrapper()
1011 smp->method == IB_MGMT_METHOD_GET) || network_view) { in mlx4_MAD_IFC_wrapper()
1012 …mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x… in mlx4_MAD_IFC_wrapper()
1013 slave, smp->mgmt_class, smp->method, in mlx4_MAD_IFC_wrapper()
1015 be16_to_cpu(smp->attr_id)); in mlx4_MAD_IFC_wrapper()
1016 return -EPERM; in mlx4_MAD_IFC_wrapper()
1020 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, in mlx4_MAD_IFC_wrapper()
1021 vhcr->in_modifier, opcode_modifier, in mlx4_MAD_IFC_wrapper()
1022 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); in mlx4_MAD_IFC_wrapper()
1025 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave, in mlx4_CMD_EPERM_wrapper() argument
1031 return -EPERM; in mlx4_CMD_EPERM_wrapper()
1034 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, in mlx4_DMA_wrapper() argument
1044 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; in mlx4_DMA_wrapper()
1045 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; in mlx4_DMA_wrapper()
1046 if (cmd->encode_slave_id) { in mlx4_DMA_wrapper()
1048 in_param |= slave; in mlx4_DMA_wrapper()
1051 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, in mlx4_DMA_wrapper()
1052 vhcr->in_modifier, vhcr->op_modifier, vhcr->op, in mlx4_DMA_wrapper()
1055 if (cmd->out_is_imm) in mlx4_DMA_wrapper()
1056 vhcr->out_param = out_param; in mlx4_DMA_wrapper()
1661 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, in mlx4_master_process_vhcr() argument
1664 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_master_process_vhcr()
1666 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr; in mlx4_master_process_vhcr()
1679 return -ENOMEM; in mlx4_master_process_vhcr()
1683 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, in mlx4_master_process_vhcr()
1684 priv->mfunc.master.slave_state[slave].vhcr_dma, in mlx4_master_process_vhcr()
1688 if (!(dev->persist->state & in mlx4_master_process_vhcr()
1690 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n", in mlx4_master_process_vhcr()
1698 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); in mlx4_master_process_vhcr()
1699 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); in mlx4_master_process_vhcr()
1700 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier); in mlx4_master_process_vhcr()
1701 vhcr->token = be16_to_cpu(vhcr_cmd->token); in mlx4_master_process_vhcr()
1702 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff; in mlx4_master_process_vhcr()
1703 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12); in mlx4_master_process_vhcr()
1704 vhcr->e_bit = vhcr_cmd->flags & (1 << 6); in mlx4_master_process_vhcr()
1708 if (vhcr->op == cmd_info[i].opcode) { in mlx4_master_process_vhcr()
1714 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n", in mlx4_master_process_vhcr()
1715 vhcr->op, slave); in mlx4_master_process_vhcr()
1716 vhcr_cmd->status = CMD_STAT_BAD_PARAM; in mlx4_master_process_vhcr()
1721 if (cmd->has_inbox) { in mlx4_master_process_vhcr()
1722 vhcr->in_param &= INBOX_MASK; in mlx4_master_process_vhcr()
1723 inbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_master_process_vhcr()
1725 vhcr_cmd->status = CMD_STAT_BAD_SIZE; in mlx4_master_process_vhcr()
1730 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave, in mlx4_master_process_vhcr()
1731 vhcr->in_param, in mlx4_master_process_vhcr()
1734 if (!(dev->persist->state & in mlx4_master_process_vhcr()
1736 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", in mlx4_master_process_vhcr()
1737 __func__, cmd->opcode); in mlx4_master_process_vhcr()
1738 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; in mlx4_master_process_vhcr()
1744 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { in mlx4_master_process_vhcr()
1745 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n", in mlx4_master_process_vhcr()
1746 vhcr->op, slave, vhcr->in_modifier); in mlx4_master_process_vhcr()
1747 vhcr_cmd->status = CMD_STAT_BAD_OP; in mlx4_master_process_vhcr()
1752 if (cmd->has_outbox) { in mlx4_master_process_vhcr()
1753 outbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_master_process_vhcr()
1755 vhcr_cmd->status = CMD_STAT_BAD_SIZE; in mlx4_master_process_vhcr()
1762 if (cmd->wrapper) { in mlx4_master_process_vhcr()
1763 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox, in mlx4_master_process_vhcr()
1765 if (cmd->out_is_imm) in mlx4_master_process_vhcr()
1766 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); in mlx4_master_process_vhcr()
1768 in_param = cmd->has_inbox ? (u64) inbox->dma : in mlx4_master_process_vhcr()
1769 vhcr->in_param; in mlx4_master_process_vhcr()
1770 out_param = cmd->has_outbox ? (u64) outbox->dma : in mlx4_master_process_vhcr()
1771 vhcr->out_param; in mlx4_master_process_vhcr()
1772 err = __mlx4_cmd(dev, in_param, &out_param, in mlx4_master_process_vhcr()
1773 cmd->out_is_imm, vhcr->in_modifier, in mlx4_master_process_vhcr()
1774 vhcr->op_modifier, vhcr->op, in mlx4_master_process_vhcr()
1778 if (cmd->out_is_imm) { in mlx4_master_process_vhcr()
1779 vhcr->out_param = out_param; in mlx4_master_process_vhcr()
1780 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); in mlx4_master_process_vhcr()
1785 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { in mlx4_master_process_vhcr()
1786 if (vhcr->op == MLX4_CMD_ALLOC_RES && in mlx4_master_process_vhcr()
1787 (vhcr->in_modifier & 0xff) == RES_COUNTER && in mlx4_master_process_vhcr()
1788 err == -EDQUOT) in mlx4_master_process_vhcr()
1789 mlx4_dbg(dev, in mlx4_master_process_vhcr()
1790 "Unable to allocate counter for slave %d (%d)\n", in mlx4_master_process_vhcr()
1791 slave, err); in mlx4_master_process_vhcr()
1793 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", in mlx4_master_process_vhcr()
1794 vhcr->op, slave, vhcr->errno, err); in mlx4_master_process_vhcr()
1796 vhcr_cmd->status = mlx4_errno_to_status(err); in mlx4_master_process_vhcr()
1802 if (cmd->has_outbox && !vhcr_cmd->status) { in mlx4_master_process_vhcr()
1803 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave, in mlx4_master_process_vhcr()
1804 vhcr->out_param, in mlx4_master_process_vhcr()
1809 * slave, as it is now in undefined state */ in mlx4_master_process_vhcr()
1810 if (!(dev->persist->state & in mlx4_master_process_vhcr()
1812 mlx4_err(dev, "%s:Failed writing outbox\n", __func__); in mlx4_master_process_vhcr()
1820 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, in mlx4_master_process_vhcr()
1821 priv->mfunc.master.slave_state[slave].vhcr_dma, in mlx4_master_process_vhcr()
1826 mlx4_err(dev, "%s:Failed writing vhcr result\n", in mlx4_master_process_vhcr()
1828 else if (vhcr->e_bit && in mlx4_master_process_vhcr()
1829 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) in mlx4_master_process_vhcr()
1830 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n", in mlx4_master_process_vhcr()
1831 slave); in mlx4_master_process_vhcr()
1836 mlx4_free_cmd_mailbox(dev, inbox); in mlx4_master_process_vhcr()
1837 mlx4_free_cmd_mailbox(dev, outbox); in mlx4_master_process_vhcr()
1842 int slave, int port) in mlx4_master_immediate_activate_vlan_qos() argument
1847 struct mlx4_dev *dev = &(priv->dev); in mlx4_master_immediate_activate_vlan_qos() local
1851 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; in mlx4_master_immediate_activate_vlan_qos()
1852 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_master_immediate_activate_vlan_qos()
1854 if (vp_oper->state.default_vlan == vp_admin->default_vlan && in mlx4_master_immediate_activate_vlan_qos()
1855 vp_oper->state.default_qos == vp_admin->default_qos && in mlx4_master_immediate_activate_vlan_qos()
1856 vp_oper->state.vlan_proto == vp_admin->vlan_proto && in mlx4_master_immediate_activate_vlan_qos()
1857 vp_oper->state.link_state == vp_admin->link_state && in mlx4_master_immediate_activate_vlan_qos()
1858 vp_oper->state.qos_vport == vp_admin->qos_vport) in mlx4_master_immediate_activate_vlan_qos()
1861 if (!(priv->mfunc.master.slave_state[slave].active && in mlx4_master_immediate_activate_vlan_qos()
1862 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) { in mlx4_master_immediate_activate_vlan_qos()
1866 vp_oper->state.link_state = vp_admin->link_state; in mlx4_master_immediate_activate_vlan_qos()
1867 return -1; in mlx4_master_immediate_activate_vlan_qos()
1870 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", in mlx4_master_immediate_activate_vlan_qos()
1871 slave, port); in mlx4_master_immediate_activate_vlan_qos()
1872 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", in mlx4_master_immediate_activate_vlan_qos()
1873 vp_admin->default_vlan, vp_admin->default_qos, in mlx4_master_immediate_activate_vlan_qos()
1874 vp_admin->link_state); in mlx4_master_immediate_activate_vlan_qos()
1878 return -ENOMEM; in mlx4_master_immediate_activate_vlan_qos()
1880 if (vp_oper->state.default_vlan != vp_admin->default_vlan) { in mlx4_master_immediate_activate_vlan_qos()
1881 if (MLX4_VGT != vp_admin->default_vlan) { in mlx4_master_immediate_activate_vlan_qos()
1882 err = __mlx4_register_vlan(&priv->dev, port, in mlx4_master_immediate_activate_vlan_qos()
1883 vp_admin->default_vlan, in mlx4_master_immediate_activate_vlan_qos()
1887 mlx4_warn(&priv->dev, in mlx4_master_immediate_activate_vlan_qos()
1888 "No vlan resources slave %d, port %d\n", in mlx4_master_immediate_activate_vlan_qos()
1889 slave, port); in mlx4_master_immediate_activate_vlan_qos()
1895 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; in mlx4_master_immediate_activate_vlan_qos()
1896 mlx4_dbg(&priv->dev, in mlx4_master_immediate_activate_vlan_qos()
1897 "alloc vlan %d idx %d slave %d port %d\n", in mlx4_master_immediate_activate_vlan_qos()
1898 (int)(vp_admin->default_vlan), in mlx4_master_immediate_activate_vlan_qos()
1899 admin_vlan_ix, slave, port); in mlx4_master_immediate_activate_vlan_qos()
1903 work->orig_vlan_id = vp_oper->state.default_vlan; in mlx4_master_immediate_activate_vlan_qos()
1904 work->orig_vlan_ix = vp_oper->vlan_idx; in mlx4_master_immediate_activate_vlan_qos()
1907 if (vp_oper->state.default_qos != vp_admin->default_qos) in mlx4_master_immediate_activate_vlan_qos()
1908 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; in mlx4_master_immediate_activate_vlan_qos()
1910 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) in mlx4_master_immediate_activate_vlan_qos()
1911 vp_oper->vlan_idx = admin_vlan_ix; in mlx4_master_immediate_activate_vlan_qos()
1913 vp_oper->state.default_vlan = vp_admin->default_vlan; in mlx4_master_immediate_activate_vlan_qos()
1914 vp_oper->state.default_qos = vp_admin->default_qos; in mlx4_master_immediate_activate_vlan_qos()
1915 vp_oper->state.vlan_proto = vp_admin->vlan_proto; in mlx4_master_immediate_activate_vlan_qos()
1916 vp_oper->state.link_state = vp_admin->link_state; in mlx4_master_immediate_activate_vlan_qos()
1917 vp_oper->state.qos_vport = vp_admin->qos_vport; in mlx4_master_immediate_activate_vlan_qos()
1919 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) in mlx4_master_immediate_activate_vlan_qos()
1920 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; in mlx4_master_immediate_activate_vlan_qos()
1922 /* iterate over QPs owned by this slave, using UPDATE_QP */ in mlx4_master_immediate_activate_vlan_qos()
1923 work->port = port; in mlx4_master_immediate_activate_vlan_qos()
1924 work->slave = slave; in mlx4_master_immediate_activate_vlan_qos()
1925 work->qos = vp_oper->state.default_qos; in mlx4_master_immediate_activate_vlan_qos()
1926 work->qos_vport = vp_oper->state.qos_vport; in mlx4_master_immediate_activate_vlan_qos()
1927 work->vlan_id = vp_oper->state.default_vlan; in mlx4_master_immediate_activate_vlan_qos()
1928 work->vlan_ix = vp_oper->vlan_idx; in mlx4_master_immediate_activate_vlan_qos()
1929 work->vlan_proto = vp_oper->state.vlan_proto; in mlx4_master_immediate_activate_vlan_qos()
1930 work->priv = priv; in mlx4_master_immediate_activate_vlan_qos()
1931 INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); in mlx4_master_immediate_activate_vlan_qos()
1932 queue_work(priv->mfunc.master.comm_wq, &work->work); in mlx4_master_immediate_activate_vlan_qos()
1937 static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port) in mlx4_set_default_port_qos() argument
1940 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_default_port_qos()
1942 port_qos_ctl = &priv->mfunc.master.qos_ctl[port]; in mlx4_set_default_port_qos()
1943 bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP); in mlx4_set_default_port_qos()
1946 set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm); in mlx4_set_default_port_qos()
1949 static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) in mlx4_allocate_port_vpps() argument
1957 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_allocate_port_vpps()
1959 err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param); in mlx4_allocate_port_vpps()
1961 mlx4_info(dev, "Failed query available VPPs\n"); in mlx4_allocate_port_vpps()
1965 port_qos = &priv->mfunc.master.qos_ctl[port]; in mlx4_allocate_port_vpps()
1967 bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP)); in mlx4_allocate_port_vpps()
1970 if (test_bit(i, port_qos->priority_bm)) in mlx4_allocate_port_vpps()
1974 err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param); in mlx4_allocate_port_vpps()
1976 mlx4_info(dev, "Failed allocating VPPs\n"); in mlx4_allocate_port_vpps()
1981 err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param); in mlx4_allocate_port_vpps()
1983 mlx4_info(dev, "Failed query available VPPs\n"); in mlx4_allocate_port_vpps()
1987 port_qos->num_of_qos_vfs = num_vfs; in mlx4_allocate_port_vpps()
1988 mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, available_vpp); in mlx4_allocate_port_vpps()
1991 mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i, in mlx4_allocate_port_vpps()
1995 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) in mlx4_master_activate_admin_state() argument
2001 &priv->mfunc.master.slave_state[slave]; in mlx4_master_activate_admin_state()
2003 &priv->dev, slave); in mlx4_master_activate_admin_state()
2005 for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) { in mlx4_master_activate_admin_state()
2007 priv->mfunc.master.vf_oper[slave].smi_enabled[port] = in mlx4_master_activate_admin_state()
2008 priv->mfunc.master.vf_admin[slave].enable_smi[port]; in mlx4_master_activate_admin_state()
2009 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; in mlx4_master_activate_admin_state()
2010 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_master_activate_admin_state()
2011 if (vp_admin->vlan_proto != htons(ETH_P_8021AD) || in mlx4_master_activate_admin_state()
2012 slave_state->vst_qinq_supported) { in mlx4_master_activate_admin_state()
2013 vp_oper->state.vlan_proto = vp_admin->vlan_proto; in mlx4_master_activate_admin_state()
2014 vp_oper->state.default_vlan = vp_admin->default_vlan; in mlx4_master_activate_admin_state()
2015 vp_oper->state.default_qos = vp_admin->default_qos; in mlx4_master_activate_admin_state()
2017 vp_oper->state.link_state = vp_admin->link_state; in mlx4_master_activate_admin_state()
2018 vp_oper->state.mac = vp_admin->mac; in mlx4_master_activate_admin_state()
2019 vp_oper->state.spoofchk = vp_admin->spoofchk; in mlx4_master_activate_admin_state()
2020 vp_oper->state.tx_rate = vp_admin->tx_rate; in mlx4_master_activate_admin_state()
2021 vp_oper->state.qos_vport = vp_admin->qos_vport; in mlx4_master_activate_admin_state()
2022 vp_oper->state.guid = vp_admin->guid; in mlx4_master_activate_admin_state()
2024 if (MLX4_VGT != vp_admin->default_vlan) { in mlx4_master_activate_admin_state()
2025 err = __mlx4_register_vlan(&priv->dev, port, in mlx4_master_activate_admin_state()
2026 vp_admin->default_vlan, &(vp_oper->vlan_idx)); in mlx4_master_activate_admin_state()
2028 vp_oper->vlan_idx = NO_INDX; in mlx4_master_activate_admin_state()
2029 vp_oper->state.default_vlan = MLX4_VGT; in mlx4_master_activate_admin_state()
2030 vp_oper->state.vlan_proto = htons(ETH_P_8021Q); in mlx4_master_activate_admin_state()
2031 mlx4_warn(&priv->dev, in mlx4_master_activate_admin_state()
2032 "No vlan resources slave %d, port %d\n", in mlx4_master_activate_admin_state()
2033 slave, port); in mlx4_master_activate_admin_state()
2036 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", in mlx4_master_activate_admin_state()
2037 (int)(vp_oper->state.default_vlan), in mlx4_master_activate_admin_state()
2038 vp_oper->vlan_idx, slave, port); in mlx4_master_activate_admin_state()
2040 if (vp_admin->spoofchk) { in mlx4_master_activate_admin_state()
2041 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev, in mlx4_master_activate_admin_state()
2043 vp_admin->mac); in mlx4_master_activate_admin_state()
2044 if (0 > vp_oper->mac_idx) { in mlx4_master_activate_admin_state()
2045 err = vp_oper->mac_idx; in mlx4_master_activate_admin_state()
2046 vp_oper->mac_idx = NO_INDX; in mlx4_master_activate_admin_state()
2047 mlx4_warn(&priv->dev, in mlx4_master_activate_admin_state()
2048 "No mac resources slave %d, port %d\n", in mlx4_master_activate_admin_state()
2049 slave, port); in mlx4_master_activate_admin_state()
2052 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n", in mlx4_master_activate_admin_state()
2053 vp_oper->state.mac, vp_oper->mac_idx, slave, port); in mlx4_master_activate_admin_state()
2059 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) in mlx4_master_deactivate_admin_state() argument
2064 &priv->dev, slave); in mlx4_master_deactivate_admin_state()
2066 for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) { in mlx4_master_deactivate_admin_state()
2068 priv->mfunc.master.vf_oper[slave].smi_enabled[port] = in mlx4_master_deactivate_admin_state()
2070 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; in mlx4_master_deactivate_admin_state()
2071 if (NO_INDX != vp_oper->vlan_idx) { in mlx4_master_deactivate_admin_state()
2072 __mlx4_unregister_vlan(&priv->dev, in mlx4_master_deactivate_admin_state()
2073 port, vp_oper->state.default_vlan); in mlx4_master_deactivate_admin_state()
2074 vp_oper->vlan_idx = NO_INDX; in mlx4_master_deactivate_admin_state()
2076 if (NO_INDX != vp_oper->mac_idx) { in mlx4_master_deactivate_admin_state()
2077 __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac); in mlx4_master_deactivate_admin_state()
2078 vp_oper->mac_idx = NO_INDX; in mlx4_master_deactivate_admin_state()
2084 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, in mlx4_master_do_cmd() argument
2087 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_master_do_cmd()
2088 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; in mlx4_master_do_cmd()
2094 slave_state[slave].comm_toggle ^= 1; in mlx4_master_do_cmd()
2095 reply = (u32) slave_state[slave].comm_toggle << 31; in mlx4_master_do_cmd()
2096 if (toggle != slave_state[slave].comm_toggle) { in mlx4_master_do_cmd()
2097 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n", in mlx4_master_do_cmd()
2098 toggle, slave); in mlx4_master_do_cmd()
2102 mlx4_warn(dev, "Received reset from slave:%d\n", slave); in mlx4_master_do_cmd()
2103 slave_state[slave].active = false; in mlx4_master_do_cmd()
2104 slave_state[slave].old_vlan_api = false; in mlx4_master_do_cmd()
2105 slave_state[slave].vst_qinq_supported = false; in mlx4_master_do_cmd()
2106 mlx4_master_deactivate_admin_state(priv, slave); in mlx4_master_do_cmd()
2108 slave_state[slave].event_eq[i].eqn = -1; in mlx4_master_do_cmd()
2109 slave_state[slave].event_eq[i].token = 0; in mlx4_master_do_cmd()
2112 if so return "retry" status to the slave*/ in mlx4_master_do_cmd()
2113 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) in mlx4_master_do_cmd()
2116 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, &slave); in mlx4_master_do_cmd()
2123 /*command from slave in the middle of FLR*/ in mlx4_master_do_cmd()
2125 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { in mlx4_master_do_cmd()
2126 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n", in mlx4_master_do_cmd()
2127 slave, cmd); in mlx4_master_do_cmd()
2133 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) in mlx4_master_do_cmd()
2135 slave_state[slave].vhcr_dma = ((u64) param) << 48; in mlx4_master_do_cmd()
2136 priv->mfunc.master.slave_state[slave].cookie = 0; in mlx4_master_do_cmd()
2139 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) in mlx4_master_do_cmd()
2141 slave_state[slave].vhcr_dma |= ((u64) param) << 32; in mlx4_master_do_cmd()
2144 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) in mlx4_master_do_cmd()
2146 slave_state[slave].vhcr_dma |= ((u64) param) << 16; in mlx4_master_do_cmd()
2149 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) in mlx4_master_do_cmd()
2151 slave_state[slave].vhcr_dma |= param; in mlx4_master_do_cmd()
2152 if (mlx4_master_activate_admin_state(priv, slave)) in mlx4_master_do_cmd()
2154 slave_state[slave].active = true; in mlx4_master_do_cmd()
2155 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, &slave); in mlx4_master_do_cmd()
2158 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && in mlx4_master_do_cmd()
2159 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) { in mlx4_master_do_cmd()
2160 mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n", in mlx4_master_do_cmd()
2161 slave, cmd, slave_state[slave].last_cmd); in mlx4_master_do_cmd()
2165 mutex_lock(&priv->cmd.slave_cmd_mutex); in mlx4_master_do_cmd()
2166 if (mlx4_master_process_vhcr(dev, slave, NULL)) { in mlx4_master_do_cmd()
2167 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n", in mlx4_master_do_cmd()
2168 slave); in mlx4_master_do_cmd()
2169 mutex_unlock(&priv->cmd.slave_cmd_mutex); in mlx4_master_do_cmd()
2172 mutex_unlock(&priv->cmd.slave_cmd_mutex); in mlx4_master_do_cmd()
2175 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); in mlx4_master_do_cmd()
2178 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); in mlx4_master_do_cmd()
2179 if (!slave_state[slave].is_slave_going_down) in mlx4_master_do_cmd()
2180 slave_state[slave].last_cmd = cmd; in mlx4_master_do_cmd()
2183 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); in mlx4_master_do_cmd()
2185 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n", in mlx4_master_do_cmd()
2186 cmd, slave); in mlx4_master_do_cmd()
2190 &priv->mfunc.comm[slave].slave_read); in mlx4_master_do_cmd()
2195 /* cleanup any slave resources */ in mlx4_master_do_cmd()
2196 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP) in mlx4_master_do_cmd()
2197 mlx4_delete_all_resources_for_slave(dev, slave); in mlx4_master_do_cmd()
2200 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n", in mlx4_master_do_cmd()
2201 slave, cmd); in mlx4_master_do_cmd()
2202 /* Turn on internal error letting slave reset itself immeditaly, in mlx4_master_do_cmd()
2208 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); in mlx4_master_do_cmd()
2209 if (!slave_state[slave].is_slave_going_down) in mlx4_master_do_cmd()
2210 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; in mlx4_master_do_cmd()
2211 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); in mlx4_master_do_cmd()
2212 /*with slave in the middle of flr, no need to clean resources again.*/ in mlx4_master_do_cmd()
2214 memset(&slave_state[slave].event_eq, 0, in mlx4_master_do_cmd()
2217 &priv->mfunc.comm[slave].slave_read); in mlx4_master_do_cmd()
2232 struct mlx4_dev *dev = &priv->dev; in mlx4_master_comm_channel() local
2236 int i, slave; in mlx4_master_comm_channel() local
2244 lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]); in mlx4_master_comm_channel()
2245 nmbr_bits = dev->persist->num_vfs + 1; in mlx4_master_comm_channel()
2246 if (++master->next_slave >= nmbr_bits) in mlx4_master_comm_channel()
2247 master->next_slave = 0; in mlx4_master_comm_channel()
2248 slave = master->next_slave; in mlx4_master_comm_channel()
2250 slave = find_next_bit((const unsigned long *)&lbit_vec, nmbr_bits, slave); in mlx4_master_comm_channel()
2251 if (!first && slave >= master->next_slave) in mlx4_master_comm_channel()
2253 if (slave == nmbr_bits) { in mlx4_master_comm_channel()
2257 slave = 0; in mlx4_master_comm_channel()
2261 comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write)); in mlx4_master_comm_channel()
2262 slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31; in mlx4_master_comm_channel()
2265 if (master->slave_state[slave].comm_toggle in mlx4_master_comm_channel()
2267 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n", in mlx4_master_comm_channel()
2268 slave, slt, in mlx4_master_comm_channel()
2269 master->slave_state[slave].comm_toggle); in mlx4_master_comm_channel()
2270 master->slave_state[slave].comm_toggle = in mlx4_master_comm_channel()
2273 mlx4_master_do_cmd(dev, slave, in mlx4_master_comm_channel()
2278 slave++; in mlx4_master_comm_channel()
2282 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n", in mlx4_master_comm_channel()
2285 if (mlx4_ARM_COMM_CHANNEL(dev)) in mlx4_master_comm_channel()
2286 mlx4_warn(dev, "Failed to arm comm channel events\n"); in mlx4_master_comm_channel()
2289 static int sync_toggles(struct mlx4_dev *dev) in sync_toggles() argument
2291 struct mlx4_priv *priv = mlx4_priv(dev); in sync_toggles()
2296 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)); in sync_toggles()
2303 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); in sync_toggles()
2310 if (dev->persist->interface_state & in sync_toggles()
2312 mlx4_warn(dev, in sync_toggles()
2314 return -EIO; in sync_toggles()
2318 wr_toggle = swab32(readl(&priv->mfunc.comm-> in sync_toggles()
2324 priv->cmd.comm_toggle = rd_toggle >> 31; in sync_toggles()
2337 mlx4_warn(dev, "recovering from previously mis-behaved VM\n"); in sync_toggles()
2338 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read); in sync_toggles()
2339 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write); in sync_toggles()
2340 priv->cmd.comm_toggle = 0; in sync_toggles()
2345 int mlx4_multi_func_init(struct mlx4_dev *dev) in mlx4_multi_func_init() argument
2347 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_multi_func_init()
2351 if (mlx4_is_master(dev)) in mlx4_multi_func_init()
2352 priv->mfunc.comm = in mlx4_multi_func_init()
2353 ioremap(pci_resource_start(dev->persist->pdev, in mlx4_multi_func_init()
2354 priv->fw.comm_bar) + in mlx4_multi_func_init()
2355 priv->fw.comm_base, MLX4_COMM_PAGESIZE); in mlx4_multi_func_init()
2357 priv->mfunc.comm = in mlx4_multi_func_init()
2358 ioremap(pci_resource_start(dev->persist->pdev, 2) + in mlx4_multi_func_init()
2360 if (!priv->mfunc.comm) { in mlx4_multi_func_init()
2361 mlx4_err(dev, "Couldn't map communication vector\n"); in mlx4_multi_func_init()
2365 if (mlx4_is_master(dev)) { in mlx4_multi_func_init()
2369 priv->mfunc.master.slave_state = in mlx4_multi_func_init()
2370 kcalloc(dev->num_slaves, in mlx4_multi_func_init()
2373 if (!priv->mfunc.master.slave_state) in mlx4_multi_func_init()
2376 priv->mfunc.master.vf_admin = in mlx4_multi_func_init()
2377 kcalloc(dev->num_slaves, in mlx4_multi_func_init()
2380 if (!priv->mfunc.master.vf_admin) in mlx4_multi_func_init()
2383 priv->mfunc.master.vf_oper = in mlx4_multi_func_init()
2384 kcalloc(dev->num_slaves, in mlx4_multi_func_init()
2387 if (!priv->mfunc.master.vf_oper) in mlx4_multi_func_init()
2390 priv->mfunc.master.next_slave = 0; in mlx4_multi_func_init()
2392 for (i = 0; i < dev->num_slaves; ++i) { in mlx4_multi_func_init()
2393 vf_admin = &priv->mfunc.master.vf_admin[i]; in mlx4_multi_func_init()
2394 vf_oper = &priv->mfunc.master.vf_oper[i]; in mlx4_multi_func_init()
2395 s_state = &priv->mfunc.master.slave_state[i]; in mlx4_multi_func_init()
2396 s_state->last_cmd = MLX4_COMM_CMD_RESET; in mlx4_multi_func_init()
2397 s_state->vst_qinq_supported = false; in mlx4_multi_func_init()
2398 mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); in mlx4_multi_func_init()
2400 s_state->event_eq[j].eqn = -1; in mlx4_multi_func_init()
2402 &priv->mfunc.comm[i].slave_write); in mlx4_multi_func_init()
2404 &priv->mfunc.comm[i].slave_read); in mlx4_multi_func_init()
2409 s_state->vlan_filter[port] = in mlx4_multi_func_init()
2412 if (!s_state->vlan_filter[port]) { in mlx4_multi_func_init()
2413 if (--port) in mlx4_multi_func_init()
2414 kfree(s_state->vlan_filter[port]); in mlx4_multi_func_init()
2418 admin_vport = &vf_admin->vport[port]; in mlx4_multi_func_init()
2419 oper_vport = &vf_oper->vport[port].state; in mlx4_multi_func_init()
2420 INIT_LIST_HEAD(&s_state->mcast_filters[port]); in mlx4_multi_func_init()
2421 admin_vport->default_vlan = MLX4_VGT; in mlx4_multi_func_init()
2422 oper_vport->default_vlan = MLX4_VGT; in mlx4_multi_func_init()
2423 admin_vport->qos_vport = in mlx4_multi_func_init()
2425 oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT; in mlx4_multi_func_init()
2426 admin_vport->vlan_proto = htons(ETH_P_8021Q); in mlx4_multi_func_init()
2427 oper_vport->vlan_proto = htons(ETH_P_8021Q); in mlx4_multi_func_init()
2428 vf_oper->vport[port].vlan_idx = NO_INDX; in mlx4_multi_func_init()
2429 vf_oper->vport[port].mac_idx = NO_INDX; in mlx4_multi_func_init()
2430 mlx4_set_random_admin_guid(dev, i, port); in mlx4_multi_func_init()
2432 spin_lock_init(&s_state->lock); in mlx4_multi_func_init()
2435 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) { in mlx4_multi_func_init()
2436 for (port = 1; port <= dev->caps.num_ports; port++) { in mlx4_multi_func_init()
2437 if (mlx4_is_eth(dev, port)) { in mlx4_multi_func_init()
2438 mlx4_set_default_port_qos(dev, port); in mlx4_multi_func_init()
2439 mlx4_allocate_port_vpps(dev, port); in mlx4_multi_func_init()
2444 memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe)); in mlx4_multi_func_init()
2445 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; in mlx4_multi_func_init()
2446 INIT_WORK(&priv->mfunc.master.comm_work, in mlx4_multi_func_init()
2448 INIT_WORK(&priv->mfunc.master.slave_event_work, in mlx4_multi_func_init()
2450 INIT_WORK(&priv->mfunc.master.slave_flr_event_work, in mlx4_multi_func_init()
2452 spin_lock_init(&priv->mfunc.master.slave_state_lock); in mlx4_multi_func_init()
2453 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock); in mlx4_multi_func_init()
2454 priv->mfunc.master.comm_wq = in mlx4_multi_func_init()
2456 if (!priv->mfunc.master.comm_wq) in mlx4_multi_func_init()
2459 if (mlx4_init_resource_tracker(dev)) in mlx4_multi_func_init()
2463 err = sync_toggles(dev); in mlx4_multi_func_init()
2465 mlx4_err(dev, "Couldn't sync toggles\n"); in mlx4_multi_func_init()
2472 destroy_workqueue(priv->mfunc.master.comm_wq); in mlx4_multi_func_init()
2474 while (i--) { in mlx4_multi_func_init()
2476 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); in mlx4_multi_func_init()
2478 kfree(priv->mfunc.master.vf_oper); in mlx4_multi_func_init()
2480 kfree(priv->mfunc.master.vf_admin); in mlx4_multi_func_init()
2482 kfree(priv->mfunc.master.slave_state); in mlx4_multi_func_init()
2484 iounmap(priv->mfunc.comm); in mlx4_multi_func_init()
2485 priv->mfunc.comm = NULL; in mlx4_multi_func_init()
2487 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, in mlx4_multi_func_init()
2488 priv->mfunc.vhcr, in mlx4_multi_func_init()
2489 priv->mfunc.vhcr_dma); in mlx4_multi_func_init()
2490 priv->mfunc.vhcr = NULL; in mlx4_multi_func_init()
2491 return -ENOMEM; in mlx4_multi_func_init()
2494 int mlx4_cmd_init(struct mlx4_dev *dev) in mlx4_cmd_init() argument
2496 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cmd_init()
2499 if (!priv->cmd.initialized) { in mlx4_cmd_init()
2500 init_rwsem(&priv->cmd.switch_sem); in mlx4_cmd_init()
2501 mutex_init(&priv->cmd.slave_cmd_mutex); in mlx4_cmd_init()
2502 sema_init(&priv->cmd.poll_sem, 1); in mlx4_cmd_init()
2503 priv->cmd.use_events = 0; in mlx4_cmd_init()
2504 priv->cmd.toggle = 1; in mlx4_cmd_init()
2505 priv->cmd.initialized = 1; in mlx4_cmd_init()
2509 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { in mlx4_cmd_init()
2510 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev, in mlx4_cmd_init()
2512 if (!priv->cmd.hcr) { in mlx4_cmd_init()
2513 mlx4_err(dev, "Couldn't map command register\n"); in mlx4_cmd_init()
2519 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { in mlx4_cmd_init()
2520 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev, in mlx4_cmd_init()
2522 &priv->mfunc.vhcr_dma, in mlx4_cmd_init()
2524 if (!priv->mfunc.vhcr) in mlx4_cmd_init()
2530 if (!priv->cmd.pool) { in mlx4_cmd_init()
2531 priv->cmd.pool = dma_pool_create("mlx4_cmd", in mlx4_cmd_init()
2532 &dev->persist->pdev->dev, in mlx4_cmd_init()
2535 if (!priv->cmd.pool) in mlx4_cmd_init()
2544 mlx4_cmd_cleanup(dev, flags); in mlx4_cmd_init()
2545 return -ENOMEM; in mlx4_cmd_init()
2548 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) in mlx4_report_internal_err_comm_event() argument
2550 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_report_internal_err_comm_event()
2551 int slave; in mlx4_report_internal_err_comm_event() local
2558 if (!priv->mfunc.comm) in mlx4_report_internal_err_comm_event()
2564 for (slave = 0; slave < dev->num_slaves; slave++) { in mlx4_report_internal_err_comm_event()
2565 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read)); in mlx4_report_internal_err_comm_event()
2568 &priv->mfunc.comm[slave].slave_read); in mlx4_report_internal_err_comm_event()
2572 void mlx4_multi_func_cleanup(struct mlx4_dev *dev) in mlx4_multi_func_cleanup() argument
2574 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_multi_func_cleanup()
2577 if (mlx4_is_master(dev)) { in mlx4_multi_func_cleanup()
2578 destroy_workqueue(priv->mfunc.master.comm_wq); in mlx4_multi_func_cleanup()
2579 for (i = 0; i < dev->num_slaves; i++) { in mlx4_multi_func_cleanup()
2581 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); in mlx4_multi_func_cleanup()
2583 kfree(priv->mfunc.master.slave_state); in mlx4_multi_func_cleanup()
2584 kfree(priv->mfunc.master.vf_admin); in mlx4_multi_func_cleanup()
2585 kfree(priv->mfunc.master.vf_oper); in mlx4_multi_func_cleanup()
2586 dev->num_slaves = 0; in mlx4_multi_func_cleanup()
2589 iounmap(priv->mfunc.comm); in mlx4_multi_func_cleanup()
2590 priv->mfunc.comm = NULL; in mlx4_multi_func_cleanup()
2593 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) in mlx4_cmd_cleanup() argument
2595 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cmd_cleanup()
2597 if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { in mlx4_cmd_cleanup()
2598 dma_pool_destroy(priv->cmd.pool); in mlx4_cmd_cleanup()
2599 priv->cmd.pool = NULL; in mlx4_cmd_cleanup()
2602 if (!mlx4_is_slave(dev) && priv->cmd.hcr && in mlx4_cmd_cleanup()
2604 iounmap(priv->cmd.hcr); in mlx4_cmd_cleanup()
2605 priv->cmd.hcr = NULL; in mlx4_cmd_cleanup()
2607 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && in mlx4_cmd_cleanup()
2609 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, in mlx4_cmd_cleanup()
2610 priv->mfunc.vhcr, priv->mfunc.vhcr_dma); in mlx4_cmd_cleanup()
2611 priv->mfunc.vhcr = NULL; in mlx4_cmd_cleanup()
2613 if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT)) in mlx4_cmd_cleanup()
2614 priv->cmd.initialized = 0; in mlx4_cmd_cleanup()
2621 int mlx4_cmd_use_events(struct mlx4_dev *dev) in mlx4_cmd_use_events() argument
2623 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cmd_use_events()
2627 priv->cmd.context = kmalloc_array(priv->cmd.max_cmds, in mlx4_cmd_use_events()
2630 if (!priv->cmd.context) in mlx4_cmd_use_events()
2631 return -ENOMEM; in mlx4_cmd_use_events()
2633 if (mlx4_is_mfunc(dev)) in mlx4_cmd_use_events()
2634 mutex_lock(&priv->cmd.slave_cmd_mutex); in mlx4_cmd_use_events()
2635 down_write(&priv->cmd.switch_sem); in mlx4_cmd_use_events()
2636 for (i = 0; i < priv->cmd.max_cmds; ++i) { in mlx4_cmd_use_events()
2637 priv->cmd.context[i].token = i; in mlx4_cmd_use_events()
2638 priv->cmd.context[i].next = i + 1; in mlx4_cmd_use_events()
2643 init_completion(&priv->cmd.context[i].done); in mlx4_cmd_use_events()
2646 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; in mlx4_cmd_use_events()
2647 priv->cmd.free_head = 0; in mlx4_cmd_use_events()
2649 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); in mlx4_cmd_use_events()
2651 for (priv->cmd.token_mask = 1; in mlx4_cmd_use_events()
2652 priv->cmd.token_mask < priv->cmd.max_cmds; in mlx4_cmd_use_events()
2653 priv->cmd.token_mask <<= 1) in mlx4_cmd_use_events()
2655 --priv->cmd.token_mask; in mlx4_cmd_use_events()
2657 down(&priv->cmd.poll_sem); in mlx4_cmd_use_events()
2658 priv->cmd.use_events = 1; in mlx4_cmd_use_events()
2659 up_write(&priv->cmd.switch_sem); in mlx4_cmd_use_events()
2660 if (mlx4_is_mfunc(dev)) in mlx4_cmd_use_events()
2661 mutex_unlock(&priv->cmd.slave_cmd_mutex); in mlx4_cmd_use_events()
2669 void mlx4_cmd_use_polling(struct mlx4_dev *dev) in mlx4_cmd_use_polling() argument
2671 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cmd_use_polling()
2674 if (mlx4_is_mfunc(dev)) in mlx4_cmd_use_polling()
2675 mutex_lock(&priv->cmd.slave_cmd_mutex); in mlx4_cmd_use_polling()
2676 down_write(&priv->cmd.switch_sem); in mlx4_cmd_use_polling()
2677 priv->cmd.use_events = 0; in mlx4_cmd_use_polling()
2679 for (i = 0; i < priv->cmd.max_cmds; ++i) in mlx4_cmd_use_polling()
2680 down(&priv->cmd.event_sem); in mlx4_cmd_use_polling()
2682 kfree(priv->cmd.context); in mlx4_cmd_use_polling()
2683 priv->cmd.context = NULL; in mlx4_cmd_use_polling()
2685 up(&priv->cmd.poll_sem); in mlx4_cmd_use_polling()
2686 up_write(&priv->cmd.switch_sem); in mlx4_cmd_use_polling()
2687 if (mlx4_is_mfunc(dev)) in mlx4_cmd_use_polling()
2688 mutex_unlock(&priv->cmd.slave_cmd_mutex); in mlx4_cmd_use_polling()
2691 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) in mlx4_alloc_cmd_mailbox() argument
2697 return ERR_PTR(-ENOMEM); in mlx4_alloc_cmd_mailbox()
2699 mailbox->buf = dma_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, in mlx4_alloc_cmd_mailbox()
2700 &mailbox->dma); in mlx4_alloc_cmd_mailbox()
2701 if (!mailbox->buf) { in mlx4_alloc_cmd_mailbox()
2703 return ERR_PTR(-ENOMEM); in mlx4_alloc_cmd_mailbox()
2710 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, in mlx4_free_cmd_mailbox() argument
2716 dma_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); in mlx4_free_cmd_mailbox()
2726 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) in mlx4_get_slave_indx() argument
2728 if ((vf < 0) || (vf >= dev->persist->num_vfs)) { in mlx4_get_slave_indx()
2729 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", in mlx4_get_slave_indx()
2730 vf, dev->persist->num_vfs); in mlx4_get_slave_indx()
2731 return -EINVAL; in mlx4_get_slave_indx()
2737 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) in mlx4_get_vf_indx() argument
2739 if (slave < 1 || slave > dev->persist->num_vfs) { in mlx4_get_vf_indx()
2740 mlx4_err(dev, in mlx4_get_vf_indx()
2741 "Bad slave number:%d (number of activated slaves: %lu)\n", in mlx4_get_vf_indx()
2742 slave, dev->num_slaves); in mlx4_get_vf_indx()
2743 return -EINVAL; in mlx4_get_vf_indx()
2745 return slave - 1; in mlx4_get_vf_indx()
2748 void mlx4_cmd_wake_completions(struct mlx4_dev *dev) in mlx4_cmd_wake_completions() argument
2750 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cmd_wake_completions()
2754 spin_lock(&priv->cmd.context_lock); in mlx4_cmd_wake_completions()
2755 if (priv->cmd.context) { in mlx4_cmd_wake_completions()
2756 for (i = 0; i < priv->cmd.max_cmds; ++i) { in mlx4_cmd_wake_completions()
2757 context = &priv->cmd.context[i]; in mlx4_cmd_wake_completions()
2758 context->fw_status = CMD_STAT_INTERNAL_ERR; in mlx4_cmd_wake_completions()
2759 context->result = in mlx4_cmd_wake_completions()
2761 complete(&context->done); in mlx4_cmd_wake_completions()
2764 spin_unlock(&priv->cmd.context_lock); in mlx4_cmd_wake_completions()
2767 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) in mlx4_get_active_ports() argument
2774 if (slave == 0) { in mlx4_get_active_ports()
2775 bitmap_fill(actv_ports.ports, dev->caps.num_ports); in mlx4_get_active_ports()
2779 vf = mlx4_get_vf_indx(dev, slave); in mlx4_get_active_ports()
2783 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1, in mlx4_get_active_ports()
2784 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports, in mlx4_get_active_ports()
2785 dev->caps.num_ports)); in mlx4_get_active_ports()
2791 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port) in mlx4_slave_convert_port() argument
2794 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); in mlx4_slave_convert_port()
2795 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports); in mlx4_slave_convert_port()
2798 return -EINVAL; in mlx4_slave_convert_port()
2800 n = find_first_bit(actv_ports.ports, dev->caps.num_ports); in mlx4_slave_convert_port()
2808 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port) in mlx4_phys_to_slave_port() argument
2810 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); in mlx4_phys_to_slave_port()
2811 if (test_bit(port - 1, actv_ports.ports)) in mlx4_phys_to_slave_port()
2812 return port - in mlx4_phys_to_slave_port()
2813 find_first_bit(actv_ports.ports, dev->caps.num_ports); in mlx4_phys_to_slave_port()
2815 return -1; in mlx4_phys_to_slave_port()
2819 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, in mlx4_phys_to_slaves_pport() argument
2827 if (port <= 0 || port > dev->caps.num_ports) in mlx4_phys_to_slaves_pport()
2830 for (i = 0; i < dev->persist->num_vfs + 1; i++) { in mlx4_phys_to_slaves_pport()
2832 mlx4_get_active_ports(dev, i); in mlx4_phys_to_slaves_pport()
2833 if (test_bit(port - 1, actv_ports.ports)) in mlx4_phys_to_slaves_pport()
2842 struct mlx4_dev *dev, in mlx4_phys_to_slaves_pport_actv() argument
2850 for (i = 0; i < dev->persist->num_vfs + 1; i++) { in mlx4_phys_to_slaves_pport_actv()
2852 mlx4_get_active_ports(dev, i); in mlx4_phys_to_slaves_pport_actv()
2853 if (bitmap_equal(crit_ports->ports, actv_ports.ports, in mlx4_phys_to_slaves_pport_actv()
2854 dev->caps.num_ports)) in mlx4_phys_to_slaves_pport_actv()
2862 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) in mlx4_slaves_closest_port() argument
2864 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); in mlx4_slaves_closest_port()
2865 int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) in mlx4_slaves_closest_port()
2868 bitmap_weight(actv_ports.ports, dev->caps.num_ports); in mlx4_slaves_closest_port()
2873 port = max_port - 1; in mlx4_slaves_closest_port()
2878 static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port, in mlx4_set_vport_qos() argument
2884 struct mlx4_dev *dev = &priv->dev; in mlx4_set_vport_qos() local
2887 port_qos = &priv->mfunc.master.qos_ctl[port]; in mlx4_set_vport_qos()
2890 if (slave > port_qos->num_of_qos_vfs) { in mlx4_set_vport_qos()
2891 mlx4_info(dev, "No available VPP resources for this VF\n"); in mlx4_set_vport_qos()
2892 return -EINVAL; in mlx4_set_vport_qos()
2896 err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos); in mlx4_set_vport_qos()
2898 mlx4_info(dev, "Failed to query Vport 0 QoS values\n"); in mlx4_set_vport_qos()
2903 if (test_bit(i, port_qos->priority_bm) && max_tx_rate) { in mlx4_set_vport_qos()
2915 err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos); in mlx4_set_vport_qos()
2917 mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave); in mlx4_set_vport_qos()
2924 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port, in mlx4_is_vf_vst_and_prio_qos() argument
2928 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_is_vf_vst_and_prio_qos()
2930 if (!mlx4_is_master(dev) || in mlx4_is_vf_vst_and_prio_qos()
2931 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) in mlx4_is_vf_vst_and_prio_qos()
2934 info = &priv->mfunc.master.qos_ctl[port]; in mlx4_is_vf_vst_and_prio_qos()
2936 if (vf_admin->default_vlan != MLX4_VGT && in mlx4_is_vf_vst_and_prio_qos()
2937 test_bit(vf_admin->default_qos, info->priority_bm)) in mlx4_is_vf_vst_and_prio_qos()
2943 static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port, in mlx4_valid_vf_state_change() argument
2949 if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) || in mlx4_valid_vf_state_change()
2950 !vf_admin->tx_rate) in mlx4_valid_vf_state_change()
2961 if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin)) in mlx4_valid_vf_state_change()
2964 mlx4_info(dev, "Cannot change VF state to %s while rate is set\n", in mlx4_valid_vf_state_change()
2968 mlx4_info(dev, "VST priority %d not supported for QoS\n", qos); in mlx4_valid_vf_state_change()
2970 mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n"); in mlx4_valid_vf_state_change()
2975 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac) in mlx4_set_vf_mac() argument
2977 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_vf_mac()
2979 int slave; in mlx4_set_vf_mac() local
2981 if (!mlx4_is_master(dev)) in mlx4_set_vf_mac()
2982 return -EPROTONOSUPPORT; in mlx4_set_vf_mac()
2985 return -EINVAL; in mlx4_set_vf_mac()
2987 slave = mlx4_get_slave_indx(dev, vf); in mlx4_set_vf_mac()
2988 if (slave < 0) in mlx4_set_vf_mac()
2989 return -EINVAL; in mlx4_set_vf_mac()
2991 port = mlx4_slaves_closest_port(dev, slave, port); in mlx4_set_vf_mac()
2992 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_set_vf_mac()
2994 if (s_info->spoofchk && is_zero_ether_addr(mac)) { in mlx4_set_vf_mac()
2995 mlx4_info(dev, "MAC invalidation is not allowed when spoofchk is on\n"); in mlx4_set_vf_mac()
2996 return -EPERM; in mlx4_set_vf_mac()
2999 s_info->mac = ether_addr_to_u64(mac); in mlx4_set_vf_mac()
3000 mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n", in mlx4_set_vf_mac()
3001 vf, port, s_info->mac); in mlx4_set_vf_mac()
3007 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos, in mlx4_set_vf_vlan() argument
3010 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_vf_vlan()
3014 int slave; in mlx4_set_vf_vlan() local
3016 if ((!mlx4_is_master(dev)) || in mlx4_set_vf_vlan()
3017 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) in mlx4_set_vf_vlan()
3018 return -EPROTONOSUPPORT; in mlx4_set_vf_vlan()
3021 return -EINVAL; in mlx4_set_vf_vlan()
3024 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP)) in mlx4_set_vf_vlan()
3025 return -EPROTONOSUPPORT; in mlx4_set_vf_vlan()
3029 return -EINVAL; in mlx4_set_vf_vlan()
3033 return -EINVAL; in mlx4_set_vf_vlan()
3035 slave = mlx4_get_slave_indx(dev, vf); in mlx4_set_vf_vlan()
3036 if (slave < 0) in mlx4_set_vf_vlan()
3037 return -EINVAL; in mlx4_set_vf_vlan()
3039 slave_state = &priv->mfunc.master.slave_state[slave]; in mlx4_set_vf_vlan()
3040 if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) && in mlx4_set_vf_vlan()
3041 (!slave_state->vst_qinq_supported)) { in mlx4_set_vf_vlan()
3042 mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf); in mlx4_set_vf_vlan()
3043 return -EPROTONOSUPPORT; in mlx4_set_vf_vlan()
3045 port = mlx4_slaves_closest_port(dev, slave, port); in mlx4_set_vf_vlan()
3046 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_set_vf_vlan()
3047 vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; in mlx4_set_vf_vlan()
3049 if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos)) in mlx4_set_vf_vlan()
3050 return -EPERM; in mlx4_set_vf_vlan()
3053 vf_admin->default_vlan = MLX4_VGT; in mlx4_set_vf_vlan()
3055 vf_admin->default_vlan = vlan; in mlx4_set_vf_vlan()
3056 vf_admin->default_qos = qos; in mlx4_set_vf_vlan()
3057 vf_admin->vlan_proto = proto; in mlx4_set_vf_vlan()
3060 * in vf_admin->rate and now, if priority supported we enforce the QoS in mlx4_set_vf_vlan()
3062 if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) && in mlx4_set_vf_vlan()
3063 vf_admin->tx_rate) in mlx4_set_vf_vlan()
3064 vf_admin->qos_vport = slave; in mlx4_set_vf_vlan()
3070 vf_oper->state.vlan_proto != proto) || in mlx4_set_vf_vlan()
3071 mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) in mlx4_set_vf_vlan()
3072 mlx4_info(dev, in mlx4_set_vf_vlan()
3079 int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, in mlx4_set_vf_rate() argument
3083 int slave; in mlx4_set_vf_rate() local
3085 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_vf_rate()
3087 if (!mlx4_is_master(dev) || in mlx4_set_vf_rate()
3088 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) in mlx4_set_vf_rate()
3089 return -EPROTONOSUPPORT; in mlx4_set_vf_rate()
3092 mlx4_info(dev, "Minimum BW share not supported\n"); in mlx4_set_vf_rate()
3093 return -EPROTONOSUPPORT; in mlx4_set_vf_rate()
3096 slave = mlx4_get_slave_indx(dev, vf); in mlx4_set_vf_rate()
3097 if (slave < 0) in mlx4_set_vf_rate()
3098 return -EINVAL; in mlx4_set_vf_rate()
3100 port = mlx4_slaves_closest_port(dev, slave, port); in mlx4_set_vf_rate()
3101 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_set_vf_rate()
3103 err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate); in mlx4_set_vf_rate()
3105 mlx4_info(dev, "vf %d failed to set rate %d\n", vf, in mlx4_set_vf_rate()
3110 vf_admin->tx_rate = max_tx_rate; in mlx4_set_vf_rate()
3116 if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) { in mlx4_set_vf_rate()
3117 mlx4_info(dev, in mlx4_set_vf_rate()
3120 if (vf_admin->default_vlan != MLX4_VGT) in mlx4_set_vf_rate()
3121 mlx4_info(dev, "VST priority not supported by QoS\n"); in mlx4_set_vf_rate()
3123 mlx4_info(dev, "VF in VGT mode (needed VST)\n"); in mlx4_set_vf_rate()
3125 mlx4_info(dev, in mlx4_set_vf_rate()
3132 vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT; in mlx4_set_vf_rate()
3134 if (priv->mfunc.master.slave_state[slave].active && in mlx4_set_vf_rate()
3135 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) in mlx4_set_vf_rate()
3136 mlx4_master_immediate_activate_vlan_qos(priv, slave, port); in mlx4_set_vf_rate()
3142 /* mlx4_get_slave_default_vlan -
3146 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, in mlx4_get_slave_default_vlan() argument
3152 priv = mlx4_priv(dev); in mlx4_get_slave_default_vlan()
3153 port = mlx4_slaves_closest_port(dev, slave, port); in mlx4_get_slave_default_vlan()
3154 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; in mlx4_get_slave_default_vlan()
3156 if (MLX4_VGT != vp_oper->state.default_vlan) { in mlx4_get_slave_default_vlan()
3158 *vlan = vp_oper->state.default_vlan; in mlx4_get_slave_default_vlan()
3160 *qos = vp_oper->state.default_qos; in mlx4_get_slave_default_vlan()
3167 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) in mlx4_set_vf_spoofchk() argument
3169 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_vf_spoofchk()
3171 int slave; in mlx4_set_vf_spoofchk() local
3174 if ((!mlx4_is_master(dev)) || in mlx4_set_vf_spoofchk()
3175 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM)) in mlx4_set_vf_spoofchk()
3176 return -EPROTONOSUPPORT; in mlx4_set_vf_spoofchk()
3178 slave = mlx4_get_slave_indx(dev, vf); in mlx4_set_vf_spoofchk()
3179 if (slave < 0) in mlx4_set_vf_spoofchk()
3180 return -EINVAL; in mlx4_set_vf_spoofchk()
3182 port = mlx4_slaves_closest_port(dev, slave, port); in mlx4_set_vf_spoofchk()
3183 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_set_vf_spoofchk()
3185 u64_to_ether_addr(s_info->mac, mac); in mlx4_set_vf_spoofchk()
3187 mlx4_info(dev, "Illegal MAC with spoofchk\n"); in mlx4_set_vf_spoofchk()
3188 return -EPERM; in mlx4_set_vf_spoofchk()
3191 s_info->spoofchk = setting; in mlx4_set_vf_spoofchk()
3197 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf) in mlx4_get_vf_config() argument
3199 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_get_vf_config()
3201 int slave; in mlx4_get_vf_config() local
3203 if (!mlx4_is_master(dev)) in mlx4_get_vf_config()
3204 return -EPROTONOSUPPORT; in mlx4_get_vf_config()
3206 slave = mlx4_get_slave_indx(dev, vf); in mlx4_get_vf_config()
3207 if (slave < 0) in mlx4_get_vf_config()
3208 return -EINVAL; in mlx4_get_vf_config()
3210 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_get_vf_config()
3211 ivf->vf = vf; in mlx4_get_vf_config()
3214 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff); in mlx4_get_vf_config()
3215 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff); in mlx4_get_vf_config()
3216 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff); in mlx4_get_vf_config()
3217 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff); in mlx4_get_vf_config()
3218 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); in mlx4_get_vf_config()
3219 ivf->mac[5] = ((s_info->mac) & 0xff); in mlx4_get_vf_config()
3221 ivf->vlan = s_info->default_vlan; in mlx4_get_vf_config()
3222 ivf->qos = s_info->default_qos; in mlx4_get_vf_config()
3223 ivf->vlan_proto = s_info->vlan_proto; in mlx4_get_vf_config()
3225 if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info)) in mlx4_get_vf_config()
3226 ivf->max_tx_rate = s_info->tx_rate; in mlx4_get_vf_config()
3228 ivf->max_tx_rate = 0; in mlx4_get_vf_config()
3230 ivf->min_tx_rate = 0; in mlx4_get_vf_config()
3231 ivf->spoofchk = s_info->spoofchk; in mlx4_get_vf_config()
3232 ivf->linkstate = s_info->link_state; in mlx4_get_vf_config()
3238 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) in mlx4_set_vf_link_state() argument
3240 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_vf_link_state()
3242 int slave; in mlx4_set_vf_link_state() local
3245 slave = mlx4_get_slave_indx(dev, vf); in mlx4_set_vf_link_state()
3246 if (slave < 0) in mlx4_set_vf_link_state()
3247 return -EINVAL; in mlx4_set_vf_link_state()
3249 port = mlx4_slaves_closest_port(dev, slave, port); in mlx4_set_vf_link_state()
3253 if (!priv->sense.do_sense_port[port]) in mlx4_set_vf_link_state()
3268 mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", in mlx4_set_vf_link_state()
3269 link_state, slave, port); in mlx4_set_vf_link_state()
3270 return -EINVAL; in mlx4_set_vf_link_state()
3272 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; in mlx4_set_vf_link_state()
3273 s_info->link_state = link_state; in mlx4_set_vf_link_state()
3276 mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); in mlx4_set_vf_link_state()
3278 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) in mlx4_set_vf_link_state()
3279 mlx4_dbg(dev, in mlx4_set_vf_link_state()
3286 int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index, in mlx4_get_counter_stats() argument
3295 return -EINVAL; in mlx4_get_counter_stats()
3297 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) in mlx4_get_counter_stats()
3300 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_get_counter_stats()
3304 memset(mailbox->buf, 0, sizeof(struct mlx4_counter)); in mlx4_get_counter_stats()
3308 err = mlx4_cmd_box(dev, 0, mailbox->dma, in mlx4_get_counter_stats()
3314 mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n", in mlx4_get_counter_stats()
3318 tmp_counter = (struct mlx4_counter *)mailbox->buf; in mlx4_get_counter_stats()
3319 counter_stats->counter_mode = tmp_counter->counter_mode; in mlx4_get_counter_stats()
3320 if (counter_stats->counter_mode == 0) { in mlx4_get_counter_stats()
3321 counter_stats->rx_frames = in mlx4_get_counter_stats()
3322 cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) + in mlx4_get_counter_stats()
3323 be64_to_cpu(tmp_counter->rx_frames)); in mlx4_get_counter_stats()
3324 counter_stats->tx_frames = in mlx4_get_counter_stats()
3325 cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) + in mlx4_get_counter_stats()
3326 be64_to_cpu(tmp_counter->tx_frames)); in mlx4_get_counter_stats()
3327 counter_stats->rx_bytes = in mlx4_get_counter_stats()
3328 cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) + in mlx4_get_counter_stats()
3329 be64_to_cpu(tmp_counter->rx_bytes)); in mlx4_get_counter_stats()
3330 counter_stats->tx_bytes = in mlx4_get_counter_stats()
3331 cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) + in mlx4_get_counter_stats()
3332 be64_to_cpu(tmp_counter->tx_bytes)); in mlx4_get_counter_stats()
3336 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_get_counter_stats()
3342 int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx, in mlx4_get_vf_stats() argument
3346 int slave; in mlx4_get_vf_stats() local
3350 return -EINVAL; in mlx4_get_vf_stats()
3352 if (!mlx4_is_master(dev)) in mlx4_get_vf_stats()
3353 return -EPROTONOSUPPORT; in mlx4_get_vf_stats()
3355 slave = mlx4_get_slave_indx(dev, vf_idx); in mlx4_get_vf_stats()
3356 if (slave < 0) in mlx4_get_vf_stats()
3357 return -EINVAL; in mlx4_get_vf_stats()
3359 port = mlx4_slaves_closest_port(dev, slave, port); in mlx4_get_vf_stats()
3360 err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats); in mlx4_get_vf_stats()
3362 vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames); in mlx4_get_vf_stats()
3363 vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames); in mlx4_get_vf_stats()
3364 vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes); in mlx4_get_vf_stats()
3365 vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes); in mlx4_get_vf_stats()
3372 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port) in mlx4_vf_smi_enabled() argument
3374 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_vf_smi_enabled()
3376 if (slave < 1 || slave >= dev->num_slaves || in mlx4_vf_smi_enabled()
3380 return priv->mfunc.master.vf_oper[slave].smi_enabled[port] == in mlx4_vf_smi_enabled()
3385 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port) in mlx4_vf_get_enable_smi_admin() argument
3387 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_vf_get_enable_smi_admin()
3389 if (slave == mlx4_master_func_num(dev)) in mlx4_vf_get_enable_smi_admin()
3392 if (slave < 1 || slave >= dev->num_slaves || in mlx4_vf_get_enable_smi_admin()
3396 return priv->mfunc.master.vf_admin[slave].enable_smi[port] == in mlx4_vf_get_enable_smi_admin()
3401 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, in mlx4_vf_set_enable_smi_admin() argument
3404 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_vf_set_enable_smi_admin()
3406 &priv->dev, slave); in mlx4_vf_set_enable_smi_admin()
3408 priv->dev.caps.num_ports) + 1; in mlx4_vf_set_enable_smi_admin()
3409 int max_port = min_port - 1 + in mlx4_vf_set_enable_smi_admin()
3410 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); in mlx4_vf_set_enable_smi_admin()
3412 if (slave == mlx4_master_func_num(dev)) in mlx4_vf_set_enable_smi_admin()
3415 if (slave < 1 || slave >= dev->num_slaves || in mlx4_vf_set_enable_smi_admin()
3418 return -EINVAL; in mlx4_vf_set_enable_smi_admin()
3420 if (min_port == max_port && dev->caps.num_ports > 1) { in mlx4_vf_set_enable_smi_admin()
3421 mlx4_info(dev, "SMI access disallowed for single ported VFs\n"); in mlx4_vf_set_enable_smi_admin()
3422 return -EPROTONOSUPPORT; in mlx4_vf_set_enable_smi_admin()
3425 priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled; in mlx4_vf_set_enable_smi_admin()