Lines Matching refs:dev

86 static u64 get_async_ev_mask(struct mlx4_dev *dev)  in get_async_ev_mask()  argument
89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) in get_async_ev_mask()
91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) in get_async_ev_mask()
144 struct mlx4_dev *dev = &priv->dev; in mlx4_gen_slave_eqe() local
156 mlx4_is_bonded(dev)) { in mlx4_gen_slave_eqe()
159 if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) in mlx4_gen_slave_eqe()
162 if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) in mlx4_gen_slave_eqe()
167 for (i = 0; i <= dev->persist->num_vfs; i++) { in mlx4_gen_slave_eqe()
172 slave_port = mlx4_phys_to_slave_port(dev, i, phys_port); in mlx4_gen_slave_eqe()
177 if (mlx4_GEN_EQE(dev, i, eqe)) in mlx4_gen_slave_eqe()
178 mlx4_warn(dev, "Failed to generate event for slave %d\n", in mlx4_gen_slave_eqe()
184 if (mlx4_GEN_EQE(dev, slave, eqe)) in mlx4_gen_slave_eqe()
185 mlx4_warn(dev, "Failed to generate event for slave %d\n", in mlx4_gen_slave_eqe()
194 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) in slave_event() argument
196 struct mlx4_priv *priv = mlx4_priv(dev); in slave_event()
205 …mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\… in slave_event()
223 static void mlx4_slave_event(struct mlx4_dev *dev, int slave, in mlx4_slave_event() argument
226 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_slave_event()
228 if (slave < 0 || slave > dev->persist->num_vfs || in mlx4_slave_event()
229 slave == dev->caps.function || in mlx4_slave_event()
233 slave_event(dev, slave, eqe); in mlx4_slave_event()
240 struct mlx4_dev *dev = &priv->dev; in mlx4_set_eq_affinity_hint() local
249 mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err); in mlx4_set_eq_affinity_hint()
253 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) in mlx4_gen_pkey_eqe() argument
257 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_gen_pkey_eqe()
267 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); in mlx4_gen_pkey_eqe()
269 return mlx4_GEN_EQE(dev, slave, &eqe); in mlx4_gen_pkey_eqe()
273 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) in mlx4_gen_guid_change_eqe() argument
278 if (dev->persist->num_vfs < slave) in mlx4_gen_guid_change_eqe()
284 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); in mlx4_gen_guid_change_eqe()
286 return mlx4_GEN_EQE(dev, slave, &eqe); in mlx4_gen_guid_change_eqe()
290 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, in mlx4_gen_port_state_change_eqe() argument
294 u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port); in mlx4_gen_port_state_change_eqe()
297 if (dev->persist->num_vfs < slave) in mlx4_gen_port_state_change_eqe()
305 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, in mlx4_gen_port_state_change_eqe()
307 return mlx4_GEN_EQE(dev, slave, &eqe); in mlx4_gen_port_state_change_eqe()
311 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) in mlx4_get_slave_port_state() argument
313 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_get_slave_port_state()
315 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); in mlx4_get_slave_port_state()
317 if (slave >= dev->num_slaves || port > dev->caps.num_ports || in mlx4_get_slave_port_state()
327 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, in mlx4_set_slave_port_state() argument
330 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_slave_port_state()
332 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); in mlx4_set_slave_port_state()
334 if (slave >= dev->num_slaves || port > dev->caps.num_ports || in mlx4_set_slave_port_state()
345 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) in set_all_slave_state() argument
349 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, in set_all_slave_state()
352 for (i = 0; i < dev->persist->num_vfs + 1; i++) in set_all_slave_state()
354 set_and_calc_slave_port_state(dev, i, port, in set_all_slave_state()
366 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, in set_and_calc_slave_port_state() argument
370 struct mlx4_priv *priv = mlx4_priv(dev); in set_and_calc_slave_port_state()
374 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); in set_and_calc_slave_port_state()
376 mlx4_get_slave_port_state(dev, slave, port); in set_and_calc_slave_port_state()
380 if (slave >= dev->num_slaves || port > dev->caps.num_ports || in set_and_calc_slave_port_state()
393 mlx4_set_slave_port_state(dev, slave, port, in set_and_calc_slave_port_state()
398 mlx4_set_slave_port_state(dev, slave, port, in set_and_calc_slave_port_state()
401 mlx4_set_slave_port_state(dev, slave, port, in set_and_calc_slave_port_state()
408 mlx4_set_slave_port_state(dev, slave, port, in set_and_calc_slave_port_state()
413 mlx4_set_slave_port_state(dev, slave, port, in set_and_calc_slave_port_state()
423 ret = mlx4_get_slave_port_state(dev, slave, port); in set_and_calc_slave_port_state()
432 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) in mlx4_gen_slaves_port_mgt_ev() argument
444 slave_event(dev, ALL_SLAVES, &eqe); in mlx4_gen_slaves_port_mgt_ev()
458 struct mlx4_dev *dev = &priv->dev; in mlx4_master_handle_slave_flr() local
464 mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); in mlx4_master_handle_slave_flr()
466 for (i = 0 ; i < dev->num_slaves; i++) { in mlx4_master_handle_slave_flr()
469 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", in mlx4_master_handle_slave_flr()
476 if (dev->persist->interface_state & in mlx4_master_handle_slave_flr()
478 mlx4_delete_all_resources_for_slave(dev, i); in mlx4_master_handle_slave_flr()
485 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, in mlx4_master_handle_slave_flr()
488 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n", in mlx4_master_handle_slave_flr()
494 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) in mlx4_eq_int() argument
496 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_eq_int()
510 int eqe_size = dev->caps.eqe_size; in mlx4_eq_int()
512 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { in mlx4_eq_int()
522 mlx4_cq_completion(dev, cqn); in mlx4_eq_int()
533 mlx4_dbg(dev, "event %d arrived\n", eqe->type); in mlx4_eq_int()
534 if (mlx4_is_master(dev)) { in mlx4_eq_int()
536 ret = mlx4_get_slave_from_resource_id(dev, in mlx4_eq_int()
541 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", in mlx4_eq_int()
547 if (!ret && slave != dev->caps.function) { in mlx4_eq_int()
548 mlx4_slave_event(dev, slave, eqe); in mlx4_eq_int()
553 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & in mlx4_eq_int()
558 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", in mlx4_eq_int()
563 if (mlx4_is_master(dev)) { in mlx4_eq_int()
565 ret = mlx4_get_slave_from_resource_id(dev, in mlx4_eq_int()
571 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", in mlx4_eq_int()
578 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", in mlx4_eq_int()
583 if (!ret && slave != dev->caps.function) { in mlx4_eq_int()
586 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", in mlx4_eq_int()
589 mlx4_slave_event(dev, slave, eqe); in mlx4_eq_int()
593 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & in mlx4_eq_int()
598 mlx4_cmd_event(dev, in mlx4_eq_int()
607 slaves_port = mlx4_phys_to_slaves_pport(dev, port); in mlx4_eq_int()
610 dev, MLX4_DEV_EVENT_PORT_DOWN, &port); in mlx4_eq_int()
611 mlx4_priv(dev)->sense.do_sense_port[port] = 1; in mlx4_eq_int()
612 if (!mlx4_is_master(dev)) in mlx4_eq_int()
614 for (i = 0; i < dev->persist->num_vfs + 1; in mlx4_eq_int()
616 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); in mlx4_eq_int()
618 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) in mlx4_eq_int()
620 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { in mlx4_eq_int()
621 if (i == mlx4_master_func_num(dev)) in mlx4_eq_int()
623 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", in mlx4_eq_int()
631 mlx4_slave_event(dev, i, eqe); in mlx4_eq_int()
634 set_and_calc_slave_port_state(dev, i, port, in mlx4_eq_int()
639 if (i == mlx4_master_func_num(dev)) in mlx4_eq_int()
644 | (mlx4_phys_to_slave_port(dev, i, port) << 28)); in mlx4_eq_int()
645 mlx4_slave_event(dev, i, eqe); in mlx4_eq_int()
650 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, in mlx4_eq_int()
653 mlx4_priv(dev)->sense.do_sense_port[port] = 0; in mlx4_eq_int()
655 if (!mlx4_is_master(dev)) in mlx4_eq_int()
657 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) in mlx4_eq_int()
659 i < dev->persist->num_vfs + 1; in mlx4_eq_int()
661 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); in mlx4_eq_int()
663 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) in mlx4_eq_int()
665 if (i == mlx4_master_func_num(dev)) in mlx4_eq_int()
673 mlx4_slave_event(dev, i, eqe); in mlx4_eq_int()
680 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); in mlx4_eq_int()
686 mlx4_warn(dev, "CQ %s on CQN %06x\n", in mlx4_eq_int()
690 if (mlx4_is_master(dev)) { in mlx4_eq_int()
691 ret = mlx4_get_slave_from_resource_id(dev, in mlx4_eq_int()
696 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", in mlx4_eq_int()
702 if (!ret && slave != dev->caps.function) { in mlx4_eq_int()
703 mlx4_slave_event(dev, slave, eqe); in mlx4_eq_int()
707 mlx4_cq_event(dev, in mlx4_eq_int()
714 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); in mlx4_eq_int()
726 if (!mlx4_is_master(dev)) { in mlx4_eq_int()
727 mlx4_warn(dev, "Received comm channel event for non master device\n"); in mlx4_eq_int()
739 if (!mlx4_is_master(dev)) { in mlx4_eq_int()
740 mlx4_warn(dev, "Non-master function received FLR event\n"); in mlx4_eq_int()
744 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); in mlx4_eq_int()
746 if (flr_slave >= dev->num_slaves) { in mlx4_eq_int()
747 mlx4_warn(dev, in mlx4_eq_int()
761 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, in mlx4_eq_int()
769 if (mlx4_is_master(dev)) in mlx4_eq_int()
770 for (i = 0; i < dev->num_slaves; i++) { in mlx4_eq_int()
771 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n", in mlx4_eq_int()
773 if (i == dev->caps.function) in mlx4_eq_int()
775 mlx4_slave_event(dev, i, eqe); in mlx4_eq_int()
777 …mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperatu… in mlx4_eq_int()
781 …mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x,… in mlx4_eq_int()
792 dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, eqe); in mlx4_eq_int()
798 mlx4_warn(dev, "Bad cable detected on port %u\n", in mlx4_eq_int()
802 mlx4_warn(dev, "Unsupported cable detected\n"); in mlx4_eq_int()
805 mlx4_dbg(dev, in mlx4_eq_int()
818 …mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ow… in mlx4_eq_int()
851 struct mlx4_dev *dev = dev_ptr; in mlx4_interrupt() local
852 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_interrupt()
858 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) in mlx4_interrupt()
859 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); in mlx4_interrupt()
867 struct mlx4_dev *dev = eq->dev; in mlx4_msi_x_interrupt() local
869 mlx4_eq_int(dev, eq); in mlx4_msi_x_interrupt()
875 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_MAP_EQ_wrapper() argument
881 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_MAP_EQ_wrapper()
890 if (slave == dev->caps.function) in mlx4_MAP_EQ_wrapper()
891 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, in mlx4_MAP_EQ_wrapper()
902 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, in mlx4_MAP_EQ() argument
905 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, in mlx4_MAP_EQ()
910 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_SW2HW_EQ() argument
913 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, in mlx4_SW2HW_EQ()
918 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) in mlx4_HW2SW_EQ() argument
920 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, in mlx4_HW2SW_EQ()
924 static int mlx4_num_eq_uar(struct mlx4_dev *dev) in mlx4_num_eq_uar() argument
931 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - in mlx4_num_eq_uar()
932 dev->caps.reserved_eqs / 4 + 1; in mlx4_num_eq_uar()
935 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) in mlx4_get_eq_uar() argument
937 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_get_eq_uar()
940 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; in mlx4_get_eq_uar()
945 pci_resource_start(dev->persist->pdev, 2) + in mlx4_get_eq_uar()
946 ((eq->eqn / 4) << (dev->uar_page_shift)), in mlx4_get_eq_uar()
947 (1 << (dev->uar_page_shift))); in mlx4_get_eq_uar()
949 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", in mlx4_get_eq_uar()
958 static void mlx4_unmap_uar(struct mlx4_dev *dev) in mlx4_unmap_uar() argument
960 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_unmap_uar()
963 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) in mlx4_unmap_uar()
970 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, in mlx4_create_eq() argument
973 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_create_eq()
983 eq->dev = dev; in mlx4_create_eq()
988 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; in mlx4_create_eq()
1002 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_create_eq()
1008 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> in mlx4_create_eq()
1009 pdev->dev, in mlx4_create_eq()
1023 eq->doorbell = mlx4_get_eq_uar(dev, eq); in mlx4_create_eq()
1029 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); in mlx4_create_eq()
1033 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq()
1043 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); in mlx4_create_eq()
1047 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); in mlx4_create_eq()
1049 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); in mlx4_create_eq()
1054 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_create_eq()
1066 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_create_eq()
1074 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, in mlx4_create_eq()
1078 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_create_eq()
1088 static void mlx4_free_eq(struct mlx4_dev *dev, in mlx4_free_eq() argument
1091 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_free_eq()
1097 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; in mlx4_free_eq()
1099 err = mlx4_HW2SW_EQ(dev, eq->eqn); in mlx4_free_eq()
1101 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); in mlx4_free_eq()
1106 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_free_eq()
1108 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, in mlx4_free_eq()
1116 static void mlx4_free_irqs(struct mlx4_dev *dev) in mlx4_free_irqs() argument
1118 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; in mlx4_free_irqs()
1122 free_irq(dev->persist->pdev->irq, dev); in mlx4_free_irqs()
1124 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) in mlx4_free_irqs()
1135 static int mlx4_map_clr_int(struct mlx4_dev *dev) in mlx4_map_clr_int() argument
1137 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_map_clr_int()
1139 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, in mlx4_map_clr_int()
1143 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); in mlx4_map_clr_int()
1150 static void mlx4_unmap_clr_int(struct mlx4_dev *dev) in mlx4_unmap_clr_int() argument
1152 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_unmap_clr_int()
1157 int mlx4_alloc_eq_table(struct mlx4_dev *dev) in mlx4_alloc_eq_table() argument
1159 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_alloc_eq_table()
1161 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, in mlx4_alloc_eq_table()
1169 void mlx4_free_eq_table(struct mlx4_dev *dev) in mlx4_free_eq_table() argument
1171 kfree(mlx4_priv(dev)->eq_table.eq); in mlx4_free_eq_table()
1174 int mlx4_init_eq_table(struct mlx4_dev *dev) in mlx4_init_eq_table() argument
1176 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_eq_table()
1180 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), in mlx4_init_eq_table()
1189 roundup_pow_of_two(dev->caps.num_eqs), in mlx4_init_eq_table()
1190 dev->caps.num_eqs - 1, in mlx4_init_eq_table()
1191 dev->caps.reserved_eqs, in mlx4_init_eq_table()
1192 roundup_pow_of_two(dev->caps.num_eqs) - in mlx4_init_eq_table()
1193 dev->caps.num_eqs); in mlx4_init_eq_table()
1197 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) in mlx4_init_eq_table()
1200 if (!mlx4_is_slave(dev)) { in mlx4_init_eq_table()
1201 err = mlx4_map_clr_int(dev); in mlx4_init_eq_table()
1213 (dev->caps.num_comp_vectors + 1), in mlx4_init_eq_table()
1220 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { in mlx4_init_eq_table()
1222 err = mlx4_create_eq(dev, in mlx4_init_eq_table()
1229 dev->caps.num_ports) + 1; in mlx4_init_eq_table()
1231 if (port <= dev->caps.num_ports) { in mlx4_init_eq_table()
1233 &mlx4_priv(dev)->port[port]; in mlx4_init_eq_table()
1237 mlx4_get_eqs_per_port(dev, port)); in mlx4_init_eq_table()
1239 mlx4_warn(dev, "Failed to allocate cpu rmap\n"); in mlx4_init_eq_table()
1248 mlx4_warn(dev, "Failed adding irq rmap\n"); in mlx4_init_eq_table()
1251 err = mlx4_create_eq(dev, dev->quotas.cq + in mlx4_init_eq_table()
1253 (dev->flags & MLX4_FLAG_MSI_X) ? in mlx4_init_eq_table()
1261 if (dev->flags & MLX4_FLAG_MSI_X) { in mlx4_init_eq_table()
1268 pci_name(dev->persist->pdev)); in mlx4_init_eq_table()
1283 pci_name(dev->persist->pdev)); in mlx4_init_eq_table()
1284 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, in mlx4_init_eq_table()
1285 IRQF_SHARED, priv->eq_table.irq_names, dev); in mlx4_init_eq_table()
1292 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, in mlx4_init_eq_table()
1295 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", in mlx4_init_eq_table()
1305 mlx4_free_eq(dev, &priv->eq_table.eq[--i]); in mlx4_init_eq_table()
1307 for (i = 1; i <= dev->caps.num_ports; i++) { in mlx4_init_eq_table()
1308 if (mlx4_priv(dev)->port[i].rmap) { in mlx4_init_eq_table()
1309 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); in mlx4_init_eq_table()
1310 mlx4_priv(dev)->port[i].rmap = NULL; in mlx4_init_eq_table()
1314 mlx4_free_irqs(dev); in mlx4_init_eq_table()
1317 if (!mlx4_is_slave(dev)) in mlx4_init_eq_table()
1318 mlx4_unmap_clr_int(dev); in mlx4_init_eq_table()
1321 mlx4_unmap_uar(dev); in mlx4_init_eq_table()
1330 void mlx4_cleanup_eq_table(struct mlx4_dev *dev) in mlx4_cleanup_eq_table() argument
1332 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cleanup_eq_table()
1335 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, in mlx4_cleanup_eq_table()
1339 for (i = 1; i <= dev->caps.num_ports; i++) { in mlx4_cleanup_eq_table()
1340 if (mlx4_priv(dev)->port[i].rmap) { in mlx4_cleanup_eq_table()
1341 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); in mlx4_cleanup_eq_table()
1342 mlx4_priv(dev)->port[i].rmap = NULL; in mlx4_cleanup_eq_table()
1346 mlx4_free_irqs(dev); in mlx4_cleanup_eq_table()
1348 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) in mlx4_cleanup_eq_table()
1349 mlx4_free_eq(dev, &priv->eq_table.eq[i]); in mlx4_cleanup_eq_table()
1351 if (!mlx4_is_slave(dev)) in mlx4_cleanup_eq_table()
1352 mlx4_unmap_clr_int(dev); in mlx4_cleanup_eq_table()
1354 mlx4_unmap_uar(dev); in mlx4_cleanup_eq_table()
1363 int mlx4_test_async(struct mlx4_dev *dev) in mlx4_test_async() argument
1365 return mlx4_NOP(dev); in mlx4_test_async()
1373 int mlx4_test_interrupt(struct mlx4_dev *dev, int vector) in mlx4_test_interrupt() argument
1375 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_test_interrupt()
1379 mlx4_cmd_use_polling(dev); in mlx4_test_interrupt()
1382 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, in mlx4_test_interrupt()
1385 mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); in mlx4_test_interrupt()
1390 mlx4_cmd_use_events(dev); in mlx4_test_interrupt()
1391 err = mlx4_NOP(dev); in mlx4_test_interrupt()
1394 mlx4_cmd_use_polling(dev); in mlx4_test_interrupt()
1396 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, in mlx4_test_interrupt()
1398 mlx4_cmd_use_events(dev); in mlx4_test_interrupt()
1404 bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) in mlx4_is_eq_vector_valid() argument
1406 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_is_eq_vector_valid()
1409 if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || in mlx4_is_eq_vector_valid()
1417 u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) in mlx4_get_eqs_per_port() argument
1419 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_get_eqs_per_port()
1423 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) in mlx4_get_eqs_per_port()
1431 int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) in mlx4_is_eq_shared() argument
1433 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_is_eq_shared()
1436 if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) in mlx4_is_eq_shared()
1440 dev->caps.num_ports) > 1); in mlx4_is_eq_shared()
1444 struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port) in mlx4_get_cpu_rmap() argument
1446 return mlx4_priv(dev)->port[port].rmap; in mlx4_get_cpu_rmap()
1450 int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) in mlx4_assign_eq() argument
1452 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_assign_eq()
1460 if (requested_vector < (dev->caps.num_comp_vectors + 1) && in mlx4_assign_eq()
1470 requested_vector += mlx4_get_eqs_per_port(dev, i++)) in mlx4_assign_eq()
1474 if (requested_vector < dev->caps.num_comp_vectors + 1 && in mlx4_assign_eq()
1483 for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; in mlx4_assign_eq()
1503 dev->flags & MLX4_FLAG_MSI_X) { in mlx4_assign_eq()
1508 *prequested_vector, dev_name(&dev->persist->pdev->dev)); in mlx4_assign_eq()
1542 int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) in mlx4_eq_get_irq() argument
1544 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_eq_get_irq()
1550 void mlx4_release_eq(struct mlx4_dev *dev, int vec) in mlx4_release_eq() argument
1552 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_release_eq()