xref: /openbmc/linux/include/linux/mlx5/eq.h (revision e4e3f24b)
17701707cSSaeed Mahameed /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
27701707cSSaeed Mahameed /* Copyright (c) 2018 Mellanox Technologies. */
37701707cSSaeed Mahameed 
47701707cSSaeed Mahameed #ifndef MLX5_CORE_EQ_H
57701707cSSaeed Mahameed #define MLX5_CORE_EQ_H
67701707cSSaeed Mahameed 
781bfa206SAriel Levkovich #define MLX5_IRQ_VEC_COMP_BASE 1
8d5d284b8SSaeed Mahameed #define MLX5_NUM_CMD_EQE   (32)
9d5d284b8SSaeed Mahameed #define MLX5_NUM_ASYNC_EQE (0x1000)
10d5d284b8SSaeed Mahameed #define MLX5_NUM_SPARE_EQE (0x80)
11d5d284b8SSaeed Mahameed 
127701707cSSaeed Mahameed struct mlx5_eq;
130f597ed4SSaeed Mahameed struct mlx5_core_dev;
147701707cSSaeed Mahameed 
157701707cSSaeed Mahameed struct mlx5_eq_param {
1681bfa206SAriel Levkovich 	u8             irq_index;
177701707cSSaeed Mahameed 	int            nent;
18b9a7ba55SYishai Hadas 	u64            mask[4];
19*e4e3f24bSLeon Romanovsky 	cpumask_var_t  affinity;
207701707cSSaeed Mahameed };
217701707cSSaeed Mahameed 
227701707cSSaeed Mahameed struct mlx5_eq *
2324163189SYuval Avnery mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param);
247701707cSSaeed Mahameed int
257701707cSSaeed Mahameed mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
261f8a7beeSYuval Avnery int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
271f8a7beeSYuval Avnery 		   struct notifier_block *nb);
281f8a7beeSYuval Avnery void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
291f8a7beeSYuval Avnery 		     struct notifier_block *nb);
307701707cSSaeed Mahameed 
317701707cSSaeed Mahameed struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
327701707cSSaeed Mahameed void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
337701707cSSaeed Mahameed 
34d5d284b8SSaeed Mahameed /* The HCA will think the queue has overflowed if we
35d5d284b8SSaeed Mahameed  * don't tell it we've been processing events.  We
36d5d284b8SSaeed Mahameed  * create EQs with MLX5_NUM_SPARE_EQE extra entries,
37d5d284b8SSaeed Mahameed  * so we must update our consumer index at
38d5d284b8SSaeed Mahameed  * least that often.
39d5d284b8SSaeed Mahameed  *
40d5d284b8SSaeed Mahameed  * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
41d5d284b8SSaeed Mahameed  */
42d5d284b8SSaeed Mahameed static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
43d5d284b8SSaeed Mahameed {
44d5d284b8SSaeed Mahameed 	if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
45d5d284b8SSaeed Mahameed 		mlx5_eq_update_ci(eq, cc, 0);
46d5d284b8SSaeed Mahameed 		cc = 0;
47d5d284b8SSaeed Mahameed 	}
48d5d284b8SSaeed Mahameed 	return cc;
49d5d284b8SSaeed Mahameed }
50d5d284b8SSaeed Mahameed 
510f597ed4SSaeed Mahameed struct mlx5_nb {
520f597ed4SSaeed Mahameed 	struct notifier_block nb;
530f597ed4SSaeed Mahameed 	u8 event_type;
540f597ed4SSaeed Mahameed };
550f597ed4SSaeed Mahameed 
560f597ed4SSaeed Mahameed #define mlx5_nb_cof(ptr, type, member) \
570f597ed4SSaeed Mahameed 	(container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
580f597ed4SSaeed Mahameed 
590f597ed4SSaeed Mahameed #define MLX5_NB_INIT(name, handler, event) do {              \
600f597ed4SSaeed Mahameed 	(name)->nb.notifier_call = handler;                  \
610f597ed4SSaeed Mahameed 	(name)->event_type = MLX5_EVENT_TYPE_##event;        \
620f597ed4SSaeed Mahameed } while (0)
630f597ed4SSaeed Mahameed 
647701707cSSaeed Mahameed #endif /* MLX5_CORE_EQ_H */
65