Lines Matching refs:dev

84 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)  in mlx5_cmd_destroy_eq()  argument
90 return mlx5_cmd_exec_in(dev, destroy_eq, in); in mlx5_cmd_destroy_eq()
139 dev_dbg_ratelimited(eq->dev->device, in mlx5_eq_comp_int()
206 struct mlx5_core_dev *dev; in mlx5_eq_async_int() local
212 dev = eq->dev; in mlx5_eq_async_int()
213 eqt = dev->priv.eq_table; in mlx5_eq_async_int()
243 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev) in mlx5_cmd_eq_recover() argument
245 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq; in mlx5_cmd_eq_recover()
250 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes); in mlx5_cmd_eq_recover()
265 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, in create_map_eq() argument
272 struct mlx5_priv *priv = &dev->priv; in create_map_eq()
288 err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride), in create_map_eq()
289 &eq->frag_buf, dev->priv.numa_node); in create_map_eq()
312 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) in create_map_eq()
326 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); in create_map_eq()
332 eq->irqn = pci_irq_vector(dev->pdev, vecidx); in create_map_eq()
333 eq->dev = dev; in create_map_eq()
336 err = mlx5_debug_eq_add(dev, eq); in create_map_eq()
344 mlx5_cmd_destroy_eq(dev, eq->eqn); in create_map_eq()
350 mlx5_frag_buf_free(dev, &eq->frag_buf); in create_map_eq()
364 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, in mlx5_eq_enable() argument
385 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, in mlx5_eq_disable() argument
392 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) in destroy_unmap_eq() argument
396 mlx5_debug_eq_remove(dev, eq); in destroy_unmap_eq()
398 err = mlx5_cmd_destroy_eq(dev, eq->eqn); in destroy_unmap_eq()
400 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", in destroy_unmap_eq()
403 mlx5_frag_buf_free(dev, &eq->frag_buf); in destroy_unmap_eq()
429 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", in mlx5_eq_del_cq()
435 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", in mlx5_eq_del_cq()
439 int mlx5_eq_table_init(struct mlx5_core_dev *dev) in mlx5_eq_table_init() argument
445 dev->priv.numa_node); in mlx5_eq_table_init()
449 dev->priv.eq_table = eq_table; in mlx5_eq_table_init()
451 mlx5_eq_debugfs_init(dev); in mlx5_eq_table_init()
457 eq_table->irq_table = mlx5_irq_table_get(dev); in mlx5_eq_table_init()
466 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev) in mlx5_eq_table_cleanup() argument
468 struct mlx5_eq_table *table = dev->priv.eq_table; in mlx5_eq_table_cleanup()
470 mlx5_eq_debugfs_cleanup(dev); in mlx5_eq_table_cleanup()
478 static int create_async_eq(struct mlx5_core_dev *dev, in create_async_eq() argument
481 struct mlx5_eq_table *eq_table = dev->priv.eq_table; in create_async_eq()
485 err = create_map_eq(dev, eq, param); in create_async_eq()
490 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) in destroy_async_eq() argument
492 struct mlx5_eq_table *eq_table = dev->priv.eq_table; in destroy_async_eq()
496 err = destroy_unmap_eq(dev, eq); in destroy_async_eq()
517 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", in cq_err_event_notifier()
522 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); in cq_err_event_notifier()
534 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4]) in gather_user_async_events() argument
541 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events); in gather_user_async_events()
543 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events); in gather_user_async_events()
550 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) in gather_async_events_mask() argument
554 if (MLX5_VPORT_MANAGER(dev)) in gather_async_events_mask()
557 if (MLX5_CAP_GEN(dev, general_notification_event)) in gather_async_events_mask()
560 if (MLX5_CAP_GEN(dev, port_module_event)) in gather_async_events_mask()
563 mlx5_core_dbg(dev, "port_module_event is not set\n"); in gather_async_events_mask()
565 if (MLX5_PPS_CAP(dev)) in gather_async_events_mask()
568 if (MLX5_CAP_GEN(dev, fpga)) in gather_async_events_mask()
571 if (MLX5_CAP_GEN_MAX(dev, dct)) in gather_async_events_mask()
574 if (MLX5_CAP_GEN(dev, temp_warn_event)) in gather_async_events_mask()
577 if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) in gather_async_events_mask()
580 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) in gather_async_events_mask()
583 if (mlx5_eswitch_is_funcs_handler(dev)) in gather_async_events_mask()
587 if (MLX5_CAP_GEN_MAX(dev, vhca_state)) in gather_async_events_mask()
590 if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload)) in gather_async_events_mask()
593 if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) in gather_async_events_mask()
599 if (MLX5_CAP_GEN(dev, event_cap)) in gather_async_events_mask()
600 gather_user_async_events(dev, mask); in gather_async_events_mask()
604 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq, in setup_async_eq() argument
612 err = create_async_eq(dev, &eq->core, param); in setup_async_eq()
614 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err); in setup_async_eq()
617 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); in setup_async_eq()
619 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err); in setup_async_eq()
620 destroy_async_eq(dev, &eq->core); in setup_async_eq()
625 static void cleanup_async_eq(struct mlx5_core_dev *dev, in cleanup_async_eq() argument
630 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); in cleanup_async_eq()
631 err = destroy_async_eq(dev, &eq->core); in cleanup_async_eq()
633 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n", in cleanup_async_eq()
637 static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) in async_eq_depth_devlink_param_get() argument
639 struct devlink *devlink = priv_to_devlink(dev); in async_eq_depth_devlink_param_get()
648 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err); in async_eq_depth_devlink_param_get()
652 static int create_async_eqs(struct mlx5_core_dev *dev) in create_async_eqs() argument
654 struct mlx5_eq_table *table = dev->priv.eq_table; in create_async_eqs()
661 table->ctrl_irq = mlx5_ctrl_irq_request(dev); in create_async_eqs()
666 mlx5_eq_notifier_register(dev, &table->cq_err_nb); in create_async_eqs()
673 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ); in create_async_eqs()
674 err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd"); in create_async_eqs()
678 mlx5_cmd_use_events(dev); in create_async_eqs()
679 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); in create_async_eqs()
683 .nent = async_eq_depth_devlink_param_get(dev), in create_async_eqs()
686 gather_async_events_mask(dev, param.mask); in create_async_eqs()
687 err = setup_async_eq(dev, &table->async_eq, &param, "async"); in create_async_eqs()
697 err = setup_async_eq(dev, &table->pages_eq, &param, "pages"); in create_async_eqs()
704 cleanup_async_eq(dev, &table->async_eq, "async"); in create_async_eqs()
706 mlx5_cmd_use_polling(dev); in create_async_eqs()
707 cleanup_async_eq(dev, &table->cmd_eq, "cmd"); in create_async_eqs()
709 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); in create_async_eqs()
710 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); in create_async_eqs()
715 static void destroy_async_eqs(struct mlx5_core_dev *dev) in destroy_async_eqs() argument
717 struct mlx5_eq_table *table = dev->priv.eq_table; in destroy_async_eqs()
719 cleanup_async_eq(dev, &table->pages_eq, "pages"); in destroy_async_eqs()
720 cleanup_async_eq(dev, &table->async_eq, "async"); in destroy_async_eqs()
721 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ); in destroy_async_eqs()
722 mlx5_cmd_use_polling(dev); in destroy_async_eqs()
723 cleanup_async_eq(dev, &table->cmd_eq, "cmd"); in destroy_async_eqs()
724 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); in destroy_async_eqs()
725 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); in destroy_async_eqs()
729 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev) in mlx5_get_async_eq() argument
731 return &dev->priv.eq_table->async_eq.core; in mlx5_get_async_eq()
734 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev) in mlx5_eq_synchronize_async_irq() argument
736 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn); in mlx5_eq_synchronize_async_irq()
739 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev) in mlx5_eq_synchronize_cmd_irq() argument
741 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn); in mlx5_eq_synchronize_cmd_irq()
748 mlx5_eq_create_generic(struct mlx5_core_dev *dev, in mlx5_eq_create_generic() argument
752 dev->priv.numa_node); in mlx5_eq_create_generic()
758 param->irq = dev->priv.eq_table->ctrl_irq; in mlx5_eq_create_generic()
759 err = create_async_eq(dev, eq, param); in mlx5_eq_create_generic()
769 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq) in mlx5_eq_destroy_generic() argument
776 err = destroy_async_eq(dev, eq); in mlx5_eq_destroy_generic()
818 static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx) in comp_irq_release_pci() argument
820 struct mlx5_eq_table *table = dev->priv.eq_table; in comp_irq_release_pci()
855 static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev) in mlx5_eq_table_get_pci_rmap() argument
859 if (mlx5_core_is_sf(dev)) in mlx5_eq_table_get_pci_rmap()
860 return dev->priv.parent_mdev->priv.eq_table->rmap; in mlx5_eq_table_get_pci_rmap()
862 return dev->priv.eq_table->rmap; in mlx5_eq_table_get_pci_rmap()
868 static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx) in comp_irq_request_pci() argument
870 struct mlx5_eq_table *table = dev->priv.eq_table; in comp_irq_request_pci()
875 rmap = mlx5_eq_table_get_pci_rmap(dev); in comp_irq_request_pci()
876 cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx); in comp_irq_request_pci()
877 irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap); in comp_irq_request_pci()
884 static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx) in comp_irq_release_sf() argument
886 struct mlx5_eq_table *table = dev->priv.eq_table; in comp_irq_release_sf()
897 mlx5_irq_affinity_irq_release(dev, irq); in comp_irq_release_sf()
900 static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) in comp_irq_request_sf() argument
902 struct mlx5_eq_table *table = dev->priv.eq_table; in comp_irq_request_sf()
903 struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); in comp_irq_request_sf()
909 return comp_irq_request_pci(dev, vecidx); in comp_irq_request_sf()
919 mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", in comp_irq_request_sf()
920 pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), in comp_irq_request_sf()
927 static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx) in comp_irq_release() argument
929 mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) : in comp_irq_release()
930 comp_irq_release_pci(dev, vecidx); in comp_irq_release()
933 static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx) in comp_irq_request() argument
935 return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) : in comp_irq_request()
936 comp_irq_request_pci(dev, vecidx); in comp_irq_request()
973 static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx) in destroy_comp_eq() argument
975 struct mlx5_eq_table *table = dev->priv.eq_table; in destroy_comp_eq()
978 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); in destroy_comp_eq()
979 if (destroy_unmap_eq(dev, &eq->core)) in destroy_comp_eq()
980 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n", in destroy_comp_eq()
984 comp_irq_release(dev, vecidx); in destroy_comp_eq()
988 static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) in comp_eq_depth_devlink_param_get() argument
990 struct devlink *devlink = priv_to_devlink(dev); in comp_eq_depth_devlink_param_get()
999 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err); in comp_eq_depth_devlink_param_get()
1004 static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx) in create_comp_eq() argument
1006 struct mlx5_eq_table *table = dev->priv.eq_table; in create_comp_eq()
1015 mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n", in create_comp_eq()
1020 err = comp_irq_request(dev, vecidx); in create_comp_eq()
1024 nent = comp_eq_depth_devlink_param_get(dev); in create_comp_eq()
1026 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); in create_comp_eq()
1044 err = create_map_eq(dev, &eq->core, &param); in create_comp_eq()
1047 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); in create_comp_eq()
1049 destroy_unmap_eq(dev, &eq->core); in create_comp_eq()
1053 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn); in create_comp_eq()
1062 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); in create_comp_eq()
1066 comp_irq_release(dev, vecidx); in create_comp_eq()
1070 int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn) in mlx5_comp_eqn_get() argument
1072 struct mlx5_eq_table *table = dev->priv.eq_table; in mlx5_comp_eqn_get()
1083 ret = create_comp_eq(dev, vecidx); in mlx5_comp_eqn_get()
1096 int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn) in mlx5_comp_irqn_get() argument
1098 struct mlx5_eq_table *table = dev->priv.eq_table; in mlx5_comp_irqn_get()
1104 err = mlx5_comp_eqn_get(dev, vector, &eqn); in mlx5_comp_irqn_get()
1113 unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev) in mlx5_comp_vectors_max() argument
1115 return dev->priv.eq_table->max_comp_eqs; in mlx5_comp_vectors_max()
1120 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) in mlx5_comp_irq_get_affinity_mask() argument
1122 struct mlx5_eq_table *table = dev->priv.eq_table; in mlx5_comp_irq_get_affinity_mask()
1132 int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector) in mlx5_comp_vector_get_cpu() argument
1137 mask = mlx5_comp_irq_get_affinity_mask(dev, vector); in mlx5_comp_vector_get_cpu()
1141 cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector); in mlx5_comp_vector_get_cpu()
1148 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev) in mlx5_eq_table_get_rmap() argument
1150 return dev->priv.eq_table->rmap; in mlx5_eq_table_get_rmap()
1154 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn) in mlx5_eqn2comp_eq() argument
1156 struct mlx5_eq_table *table = dev->priv.eq_table; in mlx5_eqn2comp_eq()
1168 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) in mlx5_core_eq_free_irqs() argument
1170 mlx5_irq_table_free_irqs(dev); in mlx5_core_eq_free_irqs()
1179 static int get_num_eqs(struct mlx5_core_dev *dev) in get_num_eqs() argument
1181 struct mlx5_eq_table *eq_table = dev->priv.eq_table; in get_num_eqs()
1190 if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev)) in get_num_eqs()
1193 max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? in get_num_eqs()
1194 MLX5_CAP_GEN(dev, max_num_eqs) : in get_num_eqs()
1195 1 << MLX5_CAP_GEN(dev, log_max_eq); in get_num_eqs()
1199 if (mlx5_core_is_sf(dev)) { in get_num_eqs()
1208 int mlx5_eq_table_create(struct mlx5_core_dev *dev) in mlx5_eq_table_create() argument
1210 struct mlx5_eq_table *eq_table = dev->priv.eq_table; in mlx5_eq_table_create()
1213 eq_table->max_comp_eqs = get_num_eqs(dev); in mlx5_eq_table_create()
1214 err = create_async_eqs(dev); in mlx5_eq_table_create()
1216 mlx5_core_err(dev, "Failed to create async EQs\n"); in mlx5_eq_table_create()
1220 err = alloc_rmap(dev); in mlx5_eq_table_create()
1222 mlx5_core_err(dev, "Failed to allocate rmap\n"); in mlx5_eq_table_create()
1229 destroy_async_eqs(dev); in mlx5_eq_table_create()
1234 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev) in mlx5_eq_table_destroy() argument
1236 struct mlx5_eq_table *table = dev->priv.eq_table; in mlx5_eq_table_destroy()
1241 destroy_comp_eq(dev, eq, index); in mlx5_eq_table_destroy()
1243 free_rmap(dev); in mlx5_eq_table_destroy()
1244 destroy_async_eqs(dev); in mlx5_eq_table_destroy()
1247 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb) in mlx5_eq_notifier_register() argument
1249 struct mlx5_eq_table *eqt = dev->priv.eq_table; in mlx5_eq_notifier_register()
1255 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) in mlx5_eq_notifier_unregister() argument
1257 struct mlx5_eq_table *eqt = dev->priv.eq_table; in mlx5_eq_notifier_unregister()