1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2018 Mellanox Technologies. */ 3 4 #ifndef MLX5_CORE_EQ_H 5 #define MLX5_CORE_EQ_H 6 7 #define MLX5_NUM_CMD_EQE (32) 8 #define MLX5_NUM_ASYNC_EQE (0x1000) 9 #define MLX5_NUM_SPARE_EQE (0x80) 10 11 struct mlx5_eq; 12 struct mlx5_core_dev; 13 14 struct mlx5_eq_param { 15 u8 irq_index; 16 int nent; 17 u64 mask[4]; 18 cpumask_var_t affinity; 19 }; 20 21 struct mlx5_eq * 22 mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param); 23 int 24 mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 25 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 26 struct notifier_block *nb); 27 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 28 struct notifier_block *nb); 29 30 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); 31 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); 32 33 /* The HCA will think the queue has overflowed if we 34 * don't tell it we've been processing events. We 35 * create EQs with MLX5_NUM_SPARE_EQE extra entries, 36 * so we must update our consumer index at 37 * least that often. 38 * 39 * mlx5_eq_update_cc must be called on every EQE @EQ irq handler 40 */ 41 static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) 42 { 43 if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) { 44 mlx5_eq_update_ci(eq, cc, 0); 45 cc = 0; 46 } 47 return cc; 48 } 49 50 struct mlx5_nb { 51 struct notifier_block nb; 52 u8 event_type; 53 }; 54 55 #define mlx5_nb_cof(ptr, type, member) \ 56 (container_of(container_of(ptr, struct mlx5_nb, nb), type, member)) 57 58 #define MLX5_NB_INIT(name, handler, event) do { \ 59 (name)->nb.notifier_call = handler; \ 60 (name)->event_type = MLX5_EVENT_TYPE_##event; \ 61 } while (0) 62 63 #endif /* MLX5_CORE_EQ_H */ 64