1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6 
7 #ifndef __ERDMA_VERBS_H__
8 #define __ERDMA_VERBS_H__
9 
10 #include "erdma.h"
11 
12 /* RDMA Capability. */
13 #define ERDMA_MAX_PD (128 * 1024)
14 #define ERDMA_MAX_SEND_WR 8192
15 #define ERDMA_MAX_ORD 128
16 #define ERDMA_MAX_IRD 128
17 #define ERDMA_MAX_SGE_RD 1
18 #define ERDMA_MAX_CONTEXT (128 * 1024)
19 #define ERDMA_MAX_SEND_SGE 6
20 #define ERDMA_MAX_RECV_SGE 1
21 #define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
22 #define ERDMA_MAX_FRMR_PA 512
23 
24 enum {
25 	ERDMA_MMAP_IO_NC = 0, /* no cache */
26 };
27 
28 struct erdma_user_mmap_entry {
29 	struct rdma_user_mmap_entry rdma_entry;
30 	u64 address;
31 	u8 mmap_flag;
32 };
33 
34 struct erdma_ucontext {
35 	struct ib_ucontext ibucontext;
36 
37 	u32 sdb_type;
38 	u32 sdb_idx;
39 	u32 sdb_page_idx;
40 	u32 sdb_page_off;
41 	u64 sdb;
42 	u64 rdb;
43 	u64 cdb;
44 
45 	struct rdma_user_mmap_entry *sq_db_mmap_entry;
46 	struct rdma_user_mmap_entry *rq_db_mmap_entry;
47 	struct rdma_user_mmap_entry *cq_db_mmap_entry;
48 
49 	/* doorbell records */
50 	struct list_head dbrecords_page_list;
51 	struct mutex dbrecords_page_mutex;
52 };
53 
54 struct erdma_pd {
55 	struct ib_pd ibpd;
56 	u32 pdn;
57 };
58 
59 /*
60  * MemoryRegion definition.
61  */
62 #define ERDMA_MAX_INLINE_MTT_ENTRIES 4
63 #define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
64 #define ERDMA_MR_MAX_MTT_CNT 524288
65 #define ERDMA_MTT_ENTRY_SIZE 8
66 
67 #define ERDMA_MR_TYPE_NORMAL 0
68 #define ERDMA_MR_TYPE_FRMR 1
69 #define ERDMA_MR_TYPE_DMA 2
70 
71 #define ERDMA_MR_INLINE_MTT 0
72 #define ERDMA_MR_INDIRECT_MTT 1
73 
74 #define ERDMA_MR_ACC_RA BIT(0)
75 #define ERDMA_MR_ACC_LR BIT(1)
76 #define ERDMA_MR_ACC_LW BIT(2)
77 #define ERDMA_MR_ACC_RR BIT(3)
78 #define ERDMA_MR_ACC_RW BIT(4)
79 
80 static inline u8 to_erdma_access_flags(int access)
81 {
82 	return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
83 	       (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
84 	       (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0) |
85 	       (access & IB_ACCESS_REMOTE_ATOMIC ? ERDMA_MR_ACC_RA : 0);
86 }
87 
88 struct erdma_mem {
89 	struct ib_umem *umem;
90 	void *mtt_buf;
91 	u32 mtt_type;
92 	u32 page_size;
93 	u32 page_offset;
94 	u32 page_cnt;
95 	u32 mtt_nents;
96 
97 	u64 va;
98 	u64 len;
99 
100 	u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
101 };
102 
103 struct erdma_mr {
104 	struct ib_mr ibmr;
105 	struct erdma_mem mem;
106 	u8 type;
107 	u8 access;
108 	u8 valid;
109 };
110 
111 struct erdma_user_dbrecords_page {
112 	struct list_head list;
113 	struct ib_umem *umem;
114 	u64 va;
115 	int refcnt;
116 };
117 
118 struct erdma_uqp {
119 	struct erdma_mem sq_mtt;
120 	struct erdma_mem rq_mtt;
121 
122 	dma_addr_t sq_db_info_dma_addr;
123 	dma_addr_t rq_db_info_dma_addr;
124 
125 	struct erdma_user_dbrecords_page *user_dbr_page;
126 
127 	u32 rq_offset;
128 };
129 
130 struct erdma_kqp {
131 	u16 sq_pi;
132 	u16 sq_ci;
133 
134 	u16 rq_pi;
135 	u16 rq_ci;
136 
137 	u64 *swr_tbl;
138 	u64 *rwr_tbl;
139 
140 	void __iomem *hw_sq_db;
141 	void __iomem *hw_rq_db;
142 
143 	void *sq_buf;
144 	dma_addr_t sq_buf_dma_addr;
145 
146 	void *rq_buf;
147 	dma_addr_t rq_buf_dma_addr;
148 
149 	void *sq_db_info;
150 	void *rq_db_info;
151 
152 	u8 sig_all;
153 };
154 
155 enum erdma_qp_state {
156 	ERDMA_QP_STATE_IDLE = 0,
157 	ERDMA_QP_STATE_RTR = 1,
158 	ERDMA_QP_STATE_RTS = 2,
159 	ERDMA_QP_STATE_CLOSING = 3,
160 	ERDMA_QP_STATE_TERMINATE = 4,
161 	ERDMA_QP_STATE_ERROR = 5,
162 	ERDMA_QP_STATE_UNDEF = 7,
163 	ERDMA_QP_STATE_COUNT = 8
164 };
165 
166 enum erdma_qp_attr_mask {
167 	ERDMA_QP_ATTR_STATE = (1 << 0),
168 	ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
169 	ERDMA_QP_ATTR_ORD = (1 << 3),
170 	ERDMA_QP_ATTR_IRD = (1 << 4),
171 	ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
172 	ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
173 	ERDMA_QP_ATTR_MPA = (1 << 7)
174 };
175 
176 enum erdma_qp_flags {
177 	ERDMA_QP_IN_FLUSHING = (1 << 0),
178 };
179 
180 struct erdma_qp_attrs {
181 	enum erdma_qp_state state;
182 	enum erdma_cc_alg cc; /* Congestion control algorithm */
183 	u32 sq_size;
184 	u32 rq_size;
185 	u32 orq_size;
186 	u32 irq_size;
187 	u32 max_send_sge;
188 	u32 max_recv_sge;
189 	u32 cookie;
190 #define ERDMA_QP_ACTIVE 0
191 #define ERDMA_QP_PASSIVE 1
192 	u8 qp_type;
193 	u8 pd_len;
194 };
195 
196 struct erdma_qp {
197 	struct ib_qp ibqp;
198 	struct kref ref;
199 	struct completion safe_free;
200 	struct erdma_dev *dev;
201 	struct erdma_cep *cep;
202 	struct rw_semaphore state_lock;
203 
204 	unsigned long flags;
205 	struct delayed_work reflush_dwork;
206 
207 	union {
208 		struct erdma_kqp kern_qp;
209 		struct erdma_uqp user_qp;
210 	};
211 
212 	struct erdma_cq *scq;
213 	struct erdma_cq *rcq;
214 
215 	struct erdma_qp_attrs attrs;
216 	spinlock_t lock;
217 };
218 
219 struct erdma_kcq_info {
220 	void *qbuf;
221 	dma_addr_t qbuf_dma_addr;
222 	u32 ci;
223 	u32 cmdsn;
224 	u32 notify_cnt;
225 
226 	spinlock_t lock;
227 	u8 __iomem *db;
228 	u64 *db_record;
229 };
230 
231 struct erdma_ucq_info {
232 	struct erdma_mem qbuf_mtt;
233 	struct erdma_user_dbrecords_page *user_dbr_page;
234 	dma_addr_t db_info_dma_addr;
235 };
236 
237 struct erdma_cq {
238 	struct ib_cq ibcq;
239 	u32 cqn;
240 
241 	u32 depth;
242 	u32 assoc_eqn;
243 
244 	union {
245 		struct erdma_kcq_info kern_cq;
246 		struct erdma_ucq_info user_cq;
247 	};
248 };
249 
250 #define QP_ID(qp) ((qp)->ibqp.qp_num)
251 
252 static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
253 {
254 	return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
255 }
256 
257 static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
258 {
259 	return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
260 }
261 
262 void erdma_qp_get(struct erdma_qp *qp);
263 void erdma_qp_put(struct erdma_qp *qp);
264 int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
265 			     enum erdma_qp_attr_mask mask);
266 void erdma_qp_llp_close(struct erdma_qp *qp);
267 void erdma_qp_cm_drop(struct erdma_qp *qp);
268 
269 static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
270 {
271 	return container_of(ibctx, struct erdma_ucontext, ibucontext);
272 }
273 
274 static inline struct erdma_pd *to_epd(struct ib_pd *pd)
275 {
276 	return container_of(pd, struct erdma_pd, ibpd);
277 }
278 
279 static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
280 {
281 	return container_of(ibmr, struct erdma_mr, ibmr);
282 }
283 
284 static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
285 {
286 	return container_of(qp, struct erdma_qp, ibqp);
287 }
288 
289 static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
290 {
291 	return container_of(ibcq, struct erdma_cq, ibcq);
292 }
293 
294 static inline struct erdma_user_mmap_entry *
295 to_emmap(struct rdma_user_mmap_entry *ibmmap)
296 {
297 	return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
298 }
299 
300 int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
301 void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
302 int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
303 		       struct ib_udata *data);
304 int erdma_get_port_immutable(struct ib_device *dev, u32 port,
305 			     struct ib_port_immutable *ib_port_immutable);
306 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
307 		    struct ib_udata *data);
308 int erdma_query_port(struct ib_device *dev, u32 port,
309 		     struct ib_port_attr *attr);
310 int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
311 		    union ib_gid *gid);
312 int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
313 int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
314 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
315 		    struct ib_udata *data);
316 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
317 		   struct ib_qp_init_attr *init_attr);
318 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
319 		    struct ib_udata *data);
320 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
321 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
322 int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
323 struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
324 				u64 virt, int access, struct ib_udata *udata);
325 struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
326 int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
327 int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
328 void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
329 void erdma_qp_get_ref(struct ib_qp *ibqp);
330 void erdma_qp_put_ref(struct ib_qp *ibqp);
331 struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
332 int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
333 		    const struct ib_send_wr **bad_send_wr);
334 int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
335 		    const struct ib_recv_wr **bad_recv_wr);
336 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
337 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
338 				u32 max_num_sg);
339 int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
340 		    unsigned int *sg_offset);
341 void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
342 void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
343 
344 #endif
345