1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6 
7 #ifndef __ERDMA_H__
8 #define __ERDMA_H__
9 
10 #include <linux/bitfield.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/xarray.h>
14 #include <rdma/ib_verbs.h>
15 
16 #include "erdma_hw.h"
17 
18 #define DRV_MODULE_NAME "erdma"
19 #define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
20 
21 struct erdma_eq {
22 	void *qbuf;
23 	dma_addr_t qbuf_dma_addr;
24 
25 	spinlock_t lock;
26 
27 	u32 depth;
28 
29 	u16 ci;
30 	u16 rsvd;
31 
32 	atomic64_t event_num;
33 	atomic64_t notify_num;
34 
35 	u64 __iomem *db_addr;
36 	u64 *db_record;
37 };
38 
39 struct erdma_cmdq_sq {
40 	void *qbuf;
41 	dma_addr_t qbuf_dma_addr;
42 
43 	spinlock_t lock;
44 
45 	u32 depth;
46 	u16 ci;
47 	u16 pi;
48 
49 	u16 wqebb_cnt;
50 
51 	u64 *db_record;
52 };
53 
54 struct erdma_cmdq_cq {
55 	void *qbuf;
56 	dma_addr_t qbuf_dma_addr;
57 
58 	spinlock_t lock;
59 
60 	u32 depth;
61 	u32 ci;
62 	u32 cmdsn;
63 
64 	u64 *db_record;
65 
66 	atomic64_t armed_num;
67 };
68 
69 enum {
70 	ERDMA_CMD_STATUS_INIT,
71 	ERDMA_CMD_STATUS_ISSUED,
72 	ERDMA_CMD_STATUS_FINISHED,
73 	ERDMA_CMD_STATUS_TIMEOUT
74 };
75 
76 struct erdma_comp_wait {
77 	struct completion wait_event;
78 	u32 cmd_status;
79 	u32 ctx_id;
80 	u16 sq_pi;
81 	u8 comp_status;
82 	u8 rsvd;
83 	u32 comp_data[4];
84 };
85 
86 enum {
87 	ERDMA_CMDQ_STATE_OK_BIT = 0,
88 	ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
89 	ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
90 };
91 
92 #define ERDMA_CMDQ_TIMEOUT_MS 15000
93 #define ERDMA_REG_ACCESS_WAIT_MS 20
94 #define ERDMA_WAIT_DEV_DONE_CNT 500
95 
96 struct erdma_cmdq {
97 	unsigned long *comp_wait_bitmap;
98 	struct erdma_comp_wait *wait_pool;
99 	spinlock_t lock;
100 
101 	bool use_event;
102 
103 	struct erdma_cmdq_sq sq;
104 	struct erdma_cmdq_cq cq;
105 	struct erdma_eq eq;
106 
107 	unsigned long state;
108 
109 	struct semaphore credits;
110 	u16 max_outstandings;
111 };
112 
113 #define COMPROMISE_CC ERDMA_CC_CUBIC
114 enum erdma_cc_alg {
115 	ERDMA_CC_NEWRENO = 0,
116 	ERDMA_CC_CUBIC,
117 	ERDMA_CC_HPCC_RTT,
118 	ERDMA_CC_HPCC_ECN,
119 	ERDMA_CC_HPCC_INT,
120 	ERDMA_CC_METHODS_NUM
121 };
122 
123 struct erdma_devattr {
124 	u32 fw_version;
125 
126 	unsigned char peer_addr[ETH_ALEN];
127 	unsigned long cap_flags;
128 
129 	int numa_node;
130 	enum erdma_cc_alg cc;
131 	u32 grp_num;
132 	u32 irq_num;
133 
134 	bool disable_dwqe;
135 	u16 dwqe_pages;
136 	u16 dwqe_entries;
137 
138 	u32 max_qp;
139 	u32 max_send_wr;
140 	u32 max_recv_wr;
141 	u32 max_ord;
142 	u32 max_ird;
143 
144 	u32 max_send_sge;
145 	u32 max_recv_sge;
146 	u32 max_sge_rd;
147 	u32 max_cq;
148 	u32 max_cqe;
149 	u64 max_mr_size;
150 	u32 max_mr;
151 	u32 max_pd;
152 	u32 max_mw;
153 	u32 local_dma_key;
154 };
155 
156 #define ERDMA_IRQNAME_SIZE 50
157 
158 struct erdma_irq {
159 	char name[ERDMA_IRQNAME_SIZE];
160 	u32 msix_vector;
161 	cpumask_t affinity_hint_mask;
162 };
163 
164 struct erdma_eq_cb {
165 	bool ready;
166 	void *dev; /* All EQs use this fields to get erdma_dev struct */
167 	struct erdma_irq irq;
168 	struct erdma_eq eq;
169 	struct tasklet_struct tasklet;
170 };
171 
172 struct erdma_resource_cb {
173 	unsigned long *bitmap;
174 	spinlock_t lock;
175 	u32 next_alloc_idx;
176 	u32 max_cap;
177 };
178 
179 enum {
180 	ERDMA_RES_TYPE_PD = 0,
181 	ERDMA_RES_TYPE_STAG_IDX = 1,
182 	ERDMA_RES_CNT = 2,
183 };
184 
185 #define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
186 #define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
187 
188 struct erdma_dev {
189 	struct ib_device ibdev;
190 	struct net_device *netdev;
191 	struct pci_dev *pdev;
192 	struct notifier_block netdev_nb;
193 
194 	resource_size_t func_bar_addr;
195 	resource_size_t func_bar_len;
196 	u8 __iomem *func_bar;
197 
198 	struct erdma_devattr attrs;
199 	/* physical port state (only one port per device) */
200 	enum ib_port_state state;
201 	u32 mtu;
202 
203 	/* cmdq and aeq use the same msix vector */
204 	struct erdma_irq comm_irq;
205 	struct erdma_cmdq cmdq;
206 	struct erdma_eq aeq;
207 	struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
208 
209 	spinlock_t lock;
210 	struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
211 	struct xarray qp_xa;
212 	struct xarray cq_xa;
213 
214 	u32 next_alloc_qpn;
215 	u32 next_alloc_cqn;
216 
217 	spinlock_t db_bitmap_lock;
218 	/* We provide max 64 uContexts that each has one SQ doorbell Page. */
219 	DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
220 	/*
221 	 * We provide max 496 uContexts that each has one SQ normal Db,
222 	 * and one directWQE db。
223 	 */
224 	DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
225 
226 	atomic_t num_ctx;
227 	struct list_head cep_list;
228 };
229 
230 static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
231 {
232 	idx &= (depth - 1);
233 
234 	return qbuf + (idx << shift);
235 }
236 
237 static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
238 {
239 	return container_of(ibdev, struct erdma_dev, ibdev);
240 }
241 
242 static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
243 {
244 	return readl(dev->func_bar + reg);
245 }
246 
247 static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
248 {
249 	return readq(dev->func_bar + reg);
250 }
251 
252 static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
253 {
254 	writel(value, dev->func_bar + reg);
255 }
256 
257 static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
258 {
259 	writeq(value, dev->func_bar + reg);
260 }
261 
262 static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
263 					 u32 filed_mask)
264 {
265 	u32 val = erdma_reg_read32(dev, reg);
266 
267 	return FIELD_GET(filed_mask, val);
268 }
269 
270 int erdma_cmdq_init(struct erdma_dev *dev);
271 void erdma_finish_cmdq_init(struct erdma_dev *dev);
272 void erdma_cmdq_destroy(struct erdma_dev *dev);
273 
274 void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
275 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
276 			u64 *resp0, u64 *resp1);
277 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
278 
279 int erdma_ceqs_init(struct erdma_dev *dev);
280 void erdma_ceqs_uninit(struct erdma_dev *dev);
281 void notify_eq(struct erdma_eq *eq);
282 void *get_next_valid_eqe(struct erdma_eq *eq);
283 
284 int erdma_aeq_init(struct erdma_dev *dev);
285 void erdma_aeq_destroy(struct erdma_dev *dev);
286 
287 void erdma_aeq_event_handler(struct erdma_dev *dev);
288 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
289 
290 #endif
291