1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_VERBS_H
4 #define IRDMA_VERBS_H
5 
6 #define IRDMA_MAX_SAVED_PHY_PGADDR	4
7 #define IRDMA_FLUSH_DELAY_MS		20
8 
9 #define IRDMA_PKEY_TBL_SZ		1
10 #define IRDMA_DEFAULT_PKEY		0xFFFF
11 
12 struct irdma_ucontext {
13 	struct ib_ucontext ibucontext;
14 	struct irdma_device *iwdev;
15 	struct rdma_user_mmap_entry *db_mmap_entry;
16 	struct list_head cq_reg_mem_list;
17 	spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
18 	struct list_head qp_reg_mem_list;
19 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
20 	int abi_ver;
21 	u8 legacy_mode : 1;
22 	u8 use_raw_attrs : 1;
23 };
24 
25 struct irdma_pd {
26 	struct ib_pd ibpd;
27 	struct irdma_sc_pd sc_pd;
28 };
29 
30 union irdma_sockaddr {
31 	struct sockaddr_in saddr_in;
32 	struct sockaddr_in6 saddr_in6;
33 };
34 
35 struct irdma_av {
36 	u8 macaddr[16];
37 	struct rdma_ah_attr attrs;
38 	union irdma_sockaddr sgid_addr;
39 	union irdma_sockaddr dgid_addr;
40 	u8 net_type;
41 };
42 
43 struct irdma_ah {
44 	struct ib_ah ibah;
45 	struct irdma_sc_ah sc_ah;
46 	struct irdma_pd *pd;
47 	struct irdma_av av;
48 	u8 sgid_index;
49 	union ib_gid dgid;
50 	struct hlist_node list;
51 	refcount_t refcnt;
52 	struct irdma_ah *parent_ah; /* AH from cached list */
53 };
54 
55 struct irdma_hmc_pble {
56 	union {
57 		u32 idx;
58 		dma_addr_t addr;
59 	};
60 };
61 
62 struct irdma_cq_mr {
63 	struct irdma_hmc_pble cq_pbl;
64 	dma_addr_t shadow;
65 	bool split;
66 };
67 
68 struct irdma_qp_mr {
69 	struct irdma_hmc_pble sq_pbl;
70 	struct irdma_hmc_pble rq_pbl;
71 	dma_addr_t shadow;
72 	struct page *sq_page;
73 };
74 
75 struct irdma_cq_buf {
76 	struct irdma_dma_mem kmem_buf;
77 	struct irdma_cq_uk cq_uk;
78 	struct irdma_hw *hw;
79 	struct list_head list;
80 	struct work_struct work;
81 };
82 
83 struct irdma_pbl {
84 	struct list_head list;
85 	union {
86 		struct irdma_qp_mr qp_mr;
87 		struct irdma_cq_mr cq_mr;
88 	};
89 
90 	bool pbl_allocated:1;
91 	bool on_list:1;
92 	u64 user_base;
93 	struct irdma_pble_alloc pble_alloc;
94 	struct irdma_mr *iwmr;
95 };
96 
97 struct irdma_mr {
98 	union {
99 		struct ib_mr ibmr;
100 		struct ib_mw ibmw;
101 	};
102 	struct ib_umem *region;
103 	u16 type;
104 	u32 page_cnt;
105 	u64 page_size;
106 	u32 npages;
107 	u32 stag;
108 	u64 len;
109 	u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
110 	struct irdma_pbl iwpbl;
111 };
112 
113 struct irdma_cq {
114 	struct ib_cq ibcq;
115 	struct irdma_sc_cq sc_cq;
116 	u16 cq_head;
117 	u16 cq_size;
118 	u16 cq_num;
119 	bool user_mode;
120 	atomic_t armed;
121 	enum irdma_cmpl_notify last_notify;
122 	u32 polled_cmpls;
123 	u32 cq_mem_size;
124 	struct irdma_dma_mem kmem;
125 	struct irdma_dma_mem kmem_shadow;
126 	struct completion free_cq;
127 	refcount_t refcnt;
128 	spinlock_t lock; /* for poll cq */
129 	struct irdma_pbl *iwpbl;
130 	struct irdma_pbl *iwpbl_shadow;
131 	struct list_head resize_list;
132 	struct irdma_cq_poll_info cur_cqe;
133 	struct list_head cmpl_generated;
134 };
135 
136 struct irdma_cmpl_gen {
137 	struct list_head list;
138 	struct irdma_cq_poll_info cpi;
139 };
140 
141 struct disconn_work {
142 	struct work_struct work;
143 	struct irdma_qp *iwqp;
144 };
145 
146 struct iw_cm_id;
147 
148 struct irdma_qp_kmode {
149 	struct irdma_dma_mem dma_mem;
150 	struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
151 	u64 *rq_wrid_mem;
152 };
153 
154 struct irdma_qp {
155 	struct ib_qp ibqp;
156 	struct irdma_sc_qp sc_qp;
157 	struct irdma_device *iwdev;
158 	struct irdma_cq *iwscq;
159 	struct irdma_cq *iwrcq;
160 	struct irdma_pd *iwpd;
161 	struct rdma_user_mmap_entry *push_wqe_mmap_entry;
162 	struct rdma_user_mmap_entry *push_db_mmap_entry;
163 	struct irdma_qp_host_ctx_info ctx_info;
164 	union {
165 		struct irdma_iwarp_offload_info iwarp_info;
166 		struct irdma_roce_offload_info roce_info;
167 	};
168 
169 	union {
170 		struct irdma_tcp_offload_info tcp_info;
171 		struct irdma_udp_offload_info udp_info;
172 	};
173 
174 	struct irdma_ah roce_ah;
175 	struct list_head teardown_entry;
176 	refcount_t refcnt;
177 	struct iw_cm_id *cm_id;
178 	struct irdma_cm_node *cm_node;
179 	struct delayed_work dwork_flush;
180 	struct ib_mr *lsmm_mr;
181 	atomic_t hw_mod_qp_pend;
182 	enum ib_qp_state ibqp_state;
183 	u32 qp_mem_size;
184 	u32 last_aeq;
185 	int max_send_wr;
186 	int max_recv_wr;
187 	atomic_t close_timer_started;
188 	spinlock_t lock; /* serialize posting WRs to SQ/RQ */
189 	struct irdma_qp_context *iwqp_context;
190 	void *pbl_vbase;
191 	dma_addr_t pbl_pbase;
192 	struct page *page;
193 	u8 active_conn : 1;
194 	u8 user_mode : 1;
195 	u8 hte_added : 1;
196 	u8 flush_issued : 1;
197 	u8 sig_all : 1;
198 	u8 pau_mode : 1;
199 	u8 suspend_pending : 1;
200 	u8 rsvd : 1;
201 	u8 iwarp_state;
202 	u16 term_sq_flush_code;
203 	u16 term_rq_flush_code;
204 	u8 hw_iwarp_state;
205 	u8 hw_tcp_state;
206 	struct irdma_qp_kmode kqp;
207 	struct irdma_dma_mem host_ctx;
208 	struct timer_list terminate_timer;
209 	struct irdma_pbl *iwpbl;
210 	struct irdma_dma_mem q2_ctx_mem;
211 	struct irdma_dma_mem ietf_mem;
212 	struct completion free_qp;
213 	wait_queue_head_t waitq;
214 	wait_queue_head_t mod_qp_waitq;
215 	u8 rts_ae_rcvd;
216 };
217 
218 enum irdma_mmap_flag {
219 	IRDMA_MMAP_IO_NC,
220 	IRDMA_MMAP_IO_WC,
221 };
222 
223 struct irdma_user_mmap_entry {
224 	struct rdma_user_mmap_entry rdma_entry;
225 	u64 bar_offset;
226 	u8 mmap_flag;
227 };
228 
irdma_fw_major_ver(struct irdma_sc_dev * dev)229 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
230 {
231 	return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
232 }
233 
irdma_fw_minor_ver(struct irdma_sc_dev * dev)234 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
235 {
236 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
237 }
238 
set_ib_wc_op_sq(struct irdma_cq_poll_info * cq_poll_info,struct ib_wc * entry)239 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
240 				   struct ib_wc *entry)
241 {
242 	switch (cq_poll_info->op_type) {
243 	case IRDMA_OP_TYPE_RDMA_WRITE:
244 	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
245 		entry->opcode = IB_WC_RDMA_WRITE;
246 		break;
247 	case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
248 	case IRDMA_OP_TYPE_RDMA_READ:
249 		entry->opcode = IB_WC_RDMA_READ;
250 		break;
251 	case IRDMA_OP_TYPE_SEND_SOL:
252 	case IRDMA_OP_TYPE_SEND_SOL_INV:
253 	case IRDMA_OP_TYPE_SEND_INV:
254 	case IRDMA_OP_TYPE_SEND:
255 		entry->opcode = IB_WC_SEND;
256 		break;
257 	case IRDMA_OP_TYPE_FAST_REG_NSMR:
258 		entry->opcode = IB_WC_REG_MR;
259 		break;
260 	case IRDMA_OP_TYPE_INV_STAG:
261 		entry->opcode = IB_WC_LOCAL_INV;
262 		break;
263 	default:
264 		entry->status = IB_WC_GENERAL_ERR;
265 	}
266 }
267 
set_ib_wc_op_rq(struct irdma_cq_poll_info * cq_poll_info,struct ib_wc * entry,bool send_imm_support)268 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
269 				   struct ib_wc *entry, bool send_imm_support)
270 {
271 	/**
272 	 * iWARP does not support sendImm, so the presence of Imm data
273 	 * must be WriteImm.
274 	 */
275 	if (!send_imm_support) {
276 		entry->opcode = cq_poll_info->imm_valid ?
277 					IB_WC_RECV_RDMA_WITH_IMM :
278 					IB_WC_RECV;
279 		return;
280 	}
281 
282 	switch (cq_poll_info->op_type) {
283 	case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
284 	case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
285 		entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
286 		break;
287 	default:
288 		entry->opcode = IB_WC_RECV;
289 	}
290 }
291 
292 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
293 int irdma_ib_register_device(struct irdma_device *iwdev);
294 void irdma_ib_unregister_device(struct irdma_device *iwdev);
295 void irdma_ib_dealloc_device(struct ib_device *ibdev);
296 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
297 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
298 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
299 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
300 #endif /* IRDMA_VERBS_H */
301