1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies */
3 
4 #ifndef __LIB_MLX5_EQ_H__
5 #define __LIB_MLX5_EQ_H__
6 #include <linux/mlx5/driver.h>
7 #include <linux/mlx5/eq.h>
8 #include <linux/mlx5/cq.h>
9 
10 #define MLX5_MAX_IRQ_NAME   (32)
11 #define MLX5_EQE_SIZE       (sizeof(struct mlx5_eqe))
12 
13 struct mlx5_eq_tasklet {
14 	struct list_head      list;
15 	struct list_head      process_list;
16 	struct tasklet_struct task;
17 	spinlock_t            lock; /* lock completion tasklet list */
18 };
19 
20 struct mlx5_cq_table {
21 	spinlock_t              lock;	/* protect radix tree */
22 	struct radix_tree_root  tree;
23 };
24 
25 struct mlx5_eq {
26 	struct mlx5_core_dev    *dev;
27 	struct mlx5_cq_table    cq_table;
28 	__be32 __iomem	        *doorbell;
29 	u32                     cons_index;
30 	struct mlx5_frag_buf    buf;
31 	int                     size;
32 	unsigned int            vecidx;
33 	unsigned int            irqn;
34 	u8                      eqn;
35 	int                     nent;
36 	struct mlx5_rsc_debug   *dbg;
37 };
38 
39 struct mlx5_eq_comp {
40 	struct mlx5_eq          core; /* Must be first */
41 	struct mlx5_eq_tasklet  tasklet_ctx;
42 	struct list_head        list;
43 };
44 
45 static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
46 {
47 	return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
48 }
49 
50 static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
51 {
52 	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
53 
54 	return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
55 }
56 
57 static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
58 {
59 	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
60 	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
61 
62 	__raw_writel((__force u32)cpu_to_be32(val), addr);
63 	/* We still want ordering, just not swabbing, so add a barrier */
64 	mb();
65 }
66 
67 int mlx5_eq_table_init(struct mlx5_core_dev *dev);
68 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
69 int mlx5_eq_table_create(struct mlx5_core_dev *dev);
70 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
71 
72 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
73 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
74 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
75 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
76 void mlx5_cq_tasklet_cb(unsigned long data);
77 struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
78 
79 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
80 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
81 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
82 
83 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
84 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
85 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
86 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
87 
88 /* This function should only be called after mlx5_cmd_force_teardown_hca */
89 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
90 
91 #ifdef CONFIG_RFS_ACCEL
92 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
93 #endif
94 
95 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
96 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
97 
98 #endif
99