17701707cSSaeed Mahameed /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
27701707cSSaeed Mahameed /* Copyright (c) 2018 Mellanox Technologies. */
37701707cSSaeed Mahameed
47701707cSSaeed Mahameed #ifndef MLX5_CORE_EQ_H
57701707cSSaeed Mahameed #define MLX5_CORE_EQ_H
67701707cSSaeed Mahameed
7d5d284b8SSaeed Mahameed #define MLX5_NUM_CMD_EQE (32)
86b367174SJakub Kicinski #define MLX5_NUM_ASYNC_EQE (0x1000)
9d5d284b8SSaeed Mahameed #define MLX5_NUM_SPARE_EQE (0x80)
10d5d284b8SSaeed Mahameed
117701707cSSaeed Mahameed struct mlx5_eq;
12*79b60ca8SShay Drory struct mlx5_irq;
130f597ed4SSaeed Mahameed struct mlx5_core_dev;
147701707cSSaeed Mahameed
157701707cSSaeed Mahameed struct mlx5_eq_param {
167701707cSSaeed Mahameed int nent;
17b9a7ba55SYishai Hadas u64 mask[4];
18*79b60ca8SShay Drory struct mlx5_irq *irq;
197701707cSSaeed Mahameed };
207701707cSSaeed Mahameed
217701707cSSaeed Mahameed struct mlx5_eq *
2224163189SYuval Avnery mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param);
237701707cSSaeed Mahameed int
247701707cSSaeed Mahameed mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
251f8a7beeSYuval Avnery int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
261f8a7beeSYuval Avnery struct notifier_block *nb);
271f8a7beeSYuval Avnery void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
281f8a7beeSYuval Avnery struct notifier_block *nb);
297701707cSSaeed Mahameed
307701707cSSaeed Mahameed struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
317701707cSSaeed Mahameed void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
327701707cSSaeed Mahameed
33d5d284b8SSaeed Mahameed /* The HCA will think the queue has overflowed if we
34d5d284b8SSaeed Mahameed * don't tell it we've been processing events. We
35d5d284b8SSaeed Mahameed * create EQs with MLX5_NUM_SPARE_EQE extra entries,
36d5d284b8SSaeed Mahameed * so we must update our consumer index at
37d5d284b8SSaeed Mahameed * least that often.
38d5d284b8SSaeed Mahameed *
39d5d284b8SSaeed Mahameed * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
40d5d284b8SSaeed Mahameed */
mlx5_eq_update_cc(struct mlx5_eq * eq,u32 cc)41d5d284b8SSaeed Mahameed static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
42d5d284b8SSaeed Mahameed {
43d5d284b8SSaeed Mahameed if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
44d5d284b8SSaeed Mahameed mlx5_eq_update_ci(eq, cc, 0);
45d5d284b8SSaeed Mahameed cc = 0;
46d5d284b8SSaeed Mahameed }
47d5d284b8SSaeed Mahameed return cc;
48d5d284b8SSaeed Mahameed }
49d5d284b8SSaeed Mahameed
500f597ed4SSaeed Mahameed struct mlx5_nb {
510f597ed4SSaeed Mahameed struct notifier_block nb;
520f597ed4SSaeed Mahameed u8 event_type;
530f597ed4SSaeed Mahameed };
540f597ed4SSaeed Mahameed
550f597ed4SSaeed Mahameed #define mlx5_nb_cof(ptr, type, member) \
560f597ed4SSaeed Mahameed (container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
570f597ed4SSaeed Mahameed
580f597ed4SSaeed Mahameed #define MLX5_NB_INIT(name, handler, event) do { \
590f597ed4SSaeed Mahameed (name)->nb.notifier_call = handler; \
600f597ed4SSaeed Mahameed (name)->event_type = MLX5_EVENT_TYPE_##event; \
610f597ed4SSaeed Mahameed } while (0)
620f597ed4SSaeed Mahameed
637701707cSSaeed Mahameed #endif /* MLX5_CORE_EQ_H */
64