xref: /openbmc/linux/drivers/infiniband/hw/irdma/uk.c (revision 801b27e8)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "defs.h"
5 #include "user.h"
6 #include "irdma.h"
7 
8 /**
9  * irdma_set_fragment - set fragment in wqe
10  * @wqe: wqe for setting fragment
11  * @offset: offset value
12  * @sge: sge length and stag
13  * @valid: The wqe valid
14  */
15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
16 			       u8 valid)
17 {
18 	if (sge) {
19 		set_64bit_val(wqe, offset,
20 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 		set_64bit_val(wqe, offset + 8,
22 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
25 	} else {
26 		set_64bit_val(wqe, offset, 0);
27 		set_64bit_val(wqe, offset + 8,
28 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
29 	}
30 }
31 
32 /**
33  * irdma_set_fragment_gen_1 - set fragment in wqe
34  * @wqe: wqe for setting fragment
35  * @offset: offset value
36  * @sge: sge length and stag
37  * @valid: wqe valid flag
38  */
39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 				     struct ib_sge *sge, u8 valid)
41 {
42 	if (sge) {
43 		set_64bit_val(wqe, offset,
44 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 		set_64bit_val(wqe, offset + 8,
46 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
48 	} else {
49 		set_64bit_val(wqe, offset, 0);
50 		set_64bit_val(wqe, offset + 8, 0);
51 	}
52 }
53 
54 /**
55  * irdma_nop_1 - insert a NOP wqe
56  * @qp: hw qp ptr
57  */
58 static int irdma_nop_1(struct irdma_qp_uk *qp)
59 {
60 	u64 hdr;
61 	__le64 *wqe;
62 	u32 wqe_idx;
63 	bool signaled = false;
64 
65 	if (!qp->sq_ring.head)
66 		return -EINVAL;
67 
68 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 	wqe = qp->sq_base[wqe_idx].elem;
70 
71 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
72 
73 	set_64bit_val(wqe, 0, 0);
74 	set_64bit_val(wqe, 8, 0);
75 	set_64bit_val(wqe, 16, 0);
76 
77 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
80 
81 	/* make sure WQE is written before valid bit is set */
82 	dma_wmb();
83 
84 	set_64bit_val(wqe, 24, hdr);
85 
86 	return 0;
87 }
88 
89 /**
90  * irdma_clr_wqes - clear next 128 sq entries
91  * @qp: hw qp ptr
92  * @qp_wqe_idx: wqe_idx
93  */
94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
95 {
96 	struct irdma_qp_quanta *sq;
97 	u32 wqe_idx;
98 
99 	if (!(qp_wqe_idx & 0x7F)) {
100 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 		sq = qp->sq_base + wqe_idx;
102 		if (wqe_idx)
103 			memset(sq, qp->swqe_polarity ? 0 : 0xFF,
104 			       128 * sizeof(*sq));
105 		else
106 			memset(sq, qp->swqe_polarity ? 0xFF : 0,
107 			       128 * sizeof(*sq));
108 	}
109 }
110 
111 /**
112  * irdma_uk_qp_post_wr - ring doorbell
113  * @qp: hw qp ptr
114  */
115 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
116 {
117 	u64 temp;
118 	u32 hw_sq_tail;
119 	u32 sw_sq_head;
120 
121 	/* valid bit is written and loads completed before reading shadow */
122 	mb();
123 
124 	/* read the doorbell shadow area */
125 	get_64bit_val(qp->shadow_area, 0, &temp);
126 
127 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
128 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
129 	if (sw_sq_head != qp->initial_ring.head) {
130 		if (qp->push_dropped) {
131 			writel(qp->qp_id, qp->wqe_alloc_db);
132 			qp->push_dropped = false;
133 		} else if (sw_sq_head != hw_sq_tail) {
134 			if (sw_sq_head > qp->initial_ring.head) {
135 				if (hw_sq_tail >= qp->initial_ring.head &&
136 				    hw_sq_tail < sw_sq_head)
137 					writel(qp->qp_id, qp->wqe_alloc_db);
138 			} else {
139 				if (hw_sq_tail >= qp->initial_ring.head ||
140 				    hw_sq_tail < sw_sq_head)
141 					writel(qp->qp_id, qp->wqe_alloc_db);
142 			}
143 		}
144 	}
145 
146 	qp->initial_ring.head = qp->sq_ring.head;
147 }
148 
149 /**
150  * irdma_qp_ring_push_db -  ring qp doorbell
151  * @qp: hw qp ptr
152  * @wqe_idx: wqe index
153  */
154 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
155 {
156 	set_32bit_val(qp->push_db, 0,
157 		      FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
158 	qp->initial_ring.head = qp->sq_ring.head;
159 	qp->push_mode = true;
160 	qp->push_dropped = false;
161 }
162 
163 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
164 		       u32 wqe_idx, bool post_sq)
165 {
166 	__le64 *push;
167 
168 	if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
169 		    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
170 	    !qp->push_mode) {
171 		if (post_sq)
172 			irdma_uk_qp_post_wr(qp);
173 	} else {
174 		push = (__le64 *)((uintptr_t)qp->push_wqe +
175 				  (wqe_idx & 0x7) * 0x20);
176 		memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
177 		irdma_qp_ring_push_db(qp, wqe_idx);
178 	}
179 }
180 
181 /**
182  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
183  * @qp: hw qp ptr
184  * @wqe_idx: return wqe index
185  * @quanta: size of WR in quanta
186  * @total_size: size of WR in bytes
187  * @info: info on WR
188  */
189 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
190 				   u16 quanta, u32 total_size,
191 				   struct irdma_post_sq_info *info)
192 {
193 	__le64 *wqe;
194 	__le64 *wqe_0 = NULL;
195 	u32 nop_wqe_idx;
196 	u16 avail_quanta;
197 	u16 i;
198 
199 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
200 		       (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
201 		       qp->uk_attrs->max_hw_sq_chunk);
202 	if (quanta <= avail_quanta) {
203 		/* WR fits in current chunk */
204 		if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
205 			return NULL;
206 	} else {
207 		/* Need to pad with NOP */
208 		if (quanta + avail_quanta >
209 			IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
210 			return NULL;
211 
212 		nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
213 		for (i = 0; i < avail_quanta; i++) {
214 			irdma_nop_1(qp);
215 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
216 		}
217 		if (qp->push_db && info->push_wqe)
218 			irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
219 					  avail_quanta, nop_wqe_idx, true);
220 	}
221 
222 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
223 	if (!*wqe_idx)
224 		qp->swqe_polarity = !qp->swqe_polarity;
225 
226 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
227 
228 	wqe = qp->sq_base[*wqe_idx].elem;
229 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
230 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
231 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
232 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
233 	}
234 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
235 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
236 	qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
237 
238 	return wqe;
239 }
240 
241 /**
242  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
243  * @qp: hw qp ptr
244  * @wqe_idx: return wqe index
245  */
246 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
247 {
248 	__le64 *wqe;
249 	int ret_code;
250 
251 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
252 		return NULL;
253 
254 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
255 	if (ret_code)
256 		return NULL;
257 
258 	if (!*wqe_idx)
259 		qp->rwqe_polarity = !qp->rwqe_polarity;
260 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
261 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
262 
263 	return wqe;
264 }
265 
266 /**
267  * irdma_uk_rdma_write - rdma write operation
268  * @qp: hw qp ptr
269  * @info: post sq information
270  * @post_sq: flag to post sq
271  */
272 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
273 			bool post_sq)
274 {
275 	u64 hdr;
276 	__le64 *wqe;
277 	struct irdma_rdma_write *op_info;
278 	u32 i, wqe_idx;
279 	u32 total_size = 0, byte_off;
280 	int ret_code;
281 	u32 frag_cnt, addl_frag_cnt;
282 	bool read_fence = false;
283 	u16 quanta;
284 
285 	info->push_wqe = qp->push_db ? true : false;
286 
287 	op_info = &info->op.rdma_write;
288 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
289 		return -EINVAL;
290 
291 	for (i = 0; i < op_info->num_lo_sges; i++)
292 		total_size += op_info->lo_sg_list[i].length;
293 
294 	read_fence |= info->read_fence;
295 
296 	if (info->imm_data_valid)
297 		frag_cnt = op_info->num_lo_sges + 1;
298 	else
299 		frag_cnt = op_info->num_lo_sges;
300 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
301 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
302 	if (ret_code)
303 		return ret_code;
304 
305 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
306 					 info);
307 	if (!wqe)
308 		return -ENOMEM;
309 
310 	irdma_clr_wqes(qp, wqe_idx);
311 
312 	set_64bit_val(wqe, 16,
313 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
314 
315 	if (info->imm_data_valid) {
316 		set_64bit_val(wqe, 0,
317 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
318 		i = 0;
319 	} else {
320 		qp->wqe_ops.iw_set_fragment(wqe, 0,
321 					    op_info->lo_sg_list,
322 					    qp->swqe_polarity);
323 		i = 1;
324 	}
325 
326 	for (byte_off = 32; i < op_info->num_lo_sges; i++) {
327 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
328 					    &op_info->lo_sg_list[i],
329 					    qp->swqe_polarity);
330 		byte_off += 16;
331 	}
332 
333 	/* if not an odd number set valid bit in next fragment */
334 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
335 	    frag_cnt) {
336 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
337 					    qp->swqe_polarity);
338 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
339 			++addl_frag_cnt;
340 	}
341 
342 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
343 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
344 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
345 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
346 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
347 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
348 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
349 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
350 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
351 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
352 
353 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
354 
355 	set_64bit_val(wqe, 24, hdr);
356 	if (info->push_wqe) {
357 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
358 	} else {
359 		if (post_sq)
360 			irdma_uk_qp_post_wr(qp);
361 	}
362 
363 	return 0;
364 }
365 
366 /**
367  * irdma_uk_rdma_read - rdma read command
368  * @qp: hw qp ptr
369  * @info: post sq information
370  * @inv_stag: flag for inv_stag
371  * @post_sq: flag to post sq
372  */
373 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
374 		       bool inv_stag, bool post_sq)
375 {
376 	struct irdma_rdma_read *op_info;
377 	int ret_code;
378 	u32 i, byte_off, total_size = 0;
379 	bool local_fence = false;
380 	u32 addl_frag_cnt;
381 	__le64 *wqe;
382 	u32 wqe_idx;
383 	u16 quanta;
384 	u64 hdr;
385 
386 	info->push_wqe = qp->push_db ? true : false;
387 
388 	op_info = &info->op.rdma_read;
389 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
390 		return -EINVAL;
391 
392 	for (i = 0; i < op_info->num_lo_sges; i++)
393 		total_size += op_info->lo_sg_list[i].length;
394 
395 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
396 	if (ret_code)
397 		return ret_code;
398 
399 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
400 					 info);
401 	if (!wqe)
402 		return -ENOMEM;
403 
404 	irdma_clr_wqes(qp, wqe_idx);
405 
406 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
407 			(op_info->num_lo_sges - 1) : 0;
408 	local_fence |= info->local_fence;
409 
410 	qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
411 				    qp->swqe_polarity);
412 	for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
413 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
414 					    &op_info->lo_sg_list[i],
415 					    qp->swqe_polarity);
416 		byte_off += 16;
417 	}
418 
419 	/* if not an odd number set valid bit in next fragment */
420 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
421 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
422 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
423 					    qp->swqe_polarity);
424 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
425 			++addl_frag_cnt;
426 	}
427 	set_64bit_val(wqe, 16,
428 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
429 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
430 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
431 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
432 	      FIELD_PREP(IRDMAQPSQ_OPCODE,
433 			 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
434 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
435 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
436 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
437 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
438 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
439 
440 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
441 
442 	set_64bit_val(wqe, 24, hdr);
443 	if (info->push_wqe) {
444 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
445 	} else {
446 		if (post_sq)
447 			irdma_uk_qp_post_wr(qp);
448 	}
449 
450 	return 0;
451 }
452 
453 /**
454  * irdma_uk_send - rdma send command
455  * @qp: hw qp ptr
456  * @info: post sq information
457  * @post_sq: flag to post sq
458  */
459 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
460 		  bool post_sq)
461 {
462 	__le64 *wqe;
463 	struct irdma_post_send *op_info;
464 	u64 hdr;
465 	u32 i, wqe_idx, total_size = 0, byte_off;
466 	int ret_code;
467 	u32 frag_cnt, addl_frag_cnt;
468 	bool read_fence = false;
469 	u16 quanta;
470 
471 	info->push_wqe = qp->push_db ? true : false;
472 
473 	op_info = &info->op.send;
474 	if (qp->max_sq_frag_cnt < op_info->num_sges)
475 		return -EINVAL;
476 
477 	for (i = 0; i < op_info->num_sges; i++)
478 		total_size += op_info->sg_list[i].length;
479 
480 	if (info->imm_data_valid)
481 		frag_cnt = op_info->num_sges + 1;
482 	else
483 		frag_cnt = op_info->num_sges;
484 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
485 	if (ret_code)
486 		return ret_code;
487 
488 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
489 					 info);
490 	if (!wqe)
491 		return -ENOMEM;
492 
493 	irdma_clr_wqes(qp, wqe_idx);
494 
495 	read_fence |= info->read_fence;
496 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
497 	if (info->imm_data_valid) {
498 		set_64bit_val(wqe, 0,
499 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
500 		i = 0;
501 	} else {
502 		qp->wqe_ops.iw_set_fragment(wqe, 0,
503 					    frag_cnt ? op_info->sg_list : NULL,
504 					    qp->swqe_polarity);
505 		i = 1;
506 	}
507 
508 	for (byte_off = 32; i < op_info->num_sges; i++) {
509 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
510 					    qp->swqe_polarity);
511 		byte_off += 16;
512 	}
513 
514 	/* if not an odd number set valid bit in next fragment */
515 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
516 	    frag_cnt) {
517 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
518 					    qp->swqe_polarity);
519 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
520 			++addl_frag_cnt;
521 	}
522 
523 	set_64bit_val(wqe, 16,
524 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
525 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
526 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
527 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
528 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
529 			 (info->imm_data_valid ? 1 : 0)) |
530 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
531 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
532 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
533 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
534 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
535 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
536 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
537 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
538 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
539 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
540 
541 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
542 
543 	set_64bit_val(wqe, 24, hdr);
544 	if (info->push_wqe) {
545 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
546 	} else {
547 		if (post_sq)
548 			irdma_uk_qp_post_wr(qp);
549 	}
550 
551 	return 0;
552 }
553 
554 /**
555  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
556  * @wqe: wqe for setting fragment
557  * @op_info: info for setting bind wqe values
558  */
559 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
560 					struct irdma_bind_window *op_info)
561 {
562 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
563 	set_64bit_val(wqe, 8,
564 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
565 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
566 	set_64bit_val(wqe, 16, op_info->bind_len);
567 }
568 
569 /**
570  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
571  * @wqe: pointer to wqe
572  * @sge_list: table of pointers to inline data
573  * @num_sges: Total inline data length
574  * @polarity: compatibility parameter
575  */
576 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
577 					 u32 num_sges, u8 polarity)
578 {
579 	u32 quanta_bytes_remaining = 16;
580 	int i;
581 
582 	for (i = 0; i < num_sges; i++) {
583 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
584 		u32 sge_len = sge_list[i].length;
585 
586 		while (sge_len) {
587 			u32 bytes_copied;
588 
589 			bytes_copied = min(sge_len, quanta_bytes_remaining);
590 			memcpy(wqe, cur_sge, bytes_copied);
591 			wqe += bytes_copied;
592 			cur_sge += bytes_copied;
593 			quanta_bytes_remaining -= bytes_copied;
594 			sge_len -= bytes_copied;
595 
596 			if (!quanta_bytes_remaining) {
597 				/* Remaining inline bytes reside after hdr */
598 				wqe += 16;
599 				quanta_bytes_remaining = 32;
600 			}
601 		}
602 	}
603 }
604 
605 /**
606  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
607  * @data_size: data size for inline
608  *
609  * Gets the quanta based on inline and immediate data.
610  */
611 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
612 {
613 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
614 }
615 
616 /**
617  * irdma_set_mw_bind_wqe - set mw bind in wqe
618  * @wqe: wqe for setting mw bind
619  * @op_info: info for setting wqe values
620  */
621 static void irdma_set_mw_bind_wqe(__le64 *wqe,
622 				  struct irdma_bind_window *op_info)
623 {
624 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
625 	set_64bit_val(wqe, 8,
626 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
627 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
628 	set_64bit_val(wqe, 16, op_info->bind_len);
629 }
630 
631 /**
632  * irdma_copy_inline_data - Copy inline data to wqe
633  * @wqe: pointer to wqe
634  * @sge_list: table of pointers to inline data
635  * @num_sges: number of SGE's
636  * @polarity: polarity of wqe valid bit
637  */
638 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
639 				   u32 num_sges, u8 polarity)
640 {
641 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
642 	u32 quanta_bytes_remaining = 8;
643 	bool first_quanta = true;
644 	int i;
645 
646 	wqe += 8;
647 
648 	for (i = 0; i < num_sges; i++) {
649 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
650 		u32 sge_len = sge_list[i].length;
651 
652 		while (sge_len) {
653 			u32 bytes_copied;
654 
655 			bytes_copied = min(sge_len, quanta_bytes_remaining);
656 			memcpy(wqe, cur_sge, bytes_copied);
657 			wqe += bytes_copied;
658 			cur_sge += bytes_copied;
659 			quanta_bytes_remaining -= bytes_copied;
660 			sge_len -= bytes_copied;
661 
662 			if (!quanta_bytes_remaining) {
663 				quanta_bytes_remaining = 31;
664 
665 				/* Remaining inline bytes reside after hdr */
666 				if (first_quanta) {
667 					first_quanta = false;
668 					wqe += 16;
669 				} else {
670 					*wqe = inline_valid;
671 					wqe++;
672 				}
673 			}
674 		}
675 	}
676 	if (!first_quanta && quanta_bytes_remaining < 31)
677 		*(wqe + quanta_bytes_remaining) = inline_valid;
678 }
679 
680 /**
681  * irdma_inline_data_size_to_quanta - based on inline data, quanta
682  * @data_size: data size for inline
683  *
684  * Gets the quanta based on inline and immediate data.
685  */
686 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
687 {
688 	if (data_size <= 8)
689 		return IRDMA_QP_WQE_MIN_QUANTA;
690 	else if (data_size <= 39)
691 		return 2;
692 	else if (data_size <= 70)
693 		return 3;
694 	else if (data_size <= 101)
695 		return 4;
696 	else if (data_size <= 132)
697 		return 5;
698 	else if (data_size <= 163)
699 		return 6;
700 	else if (data_size <= 194)
701 		return 7;
702 	else
703 		return 8;
704 }
705 
706 /**
707  * irdma_uk_inline_rdma_write - inline rdma write operation
708  * @qp: hw qp ptr
709  * @info: post sq information
710  * @post_sq: flag to post sq
711  */
712 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
713 			       struct irdma_post_sq_info *info, bool post_sq)
714 {
715 	__le64 *wqe;
716 	struct irdma_rdma_write *op_info;
717 	u64 hdr = 0;
718 	u32 wqe_idx;
719 	bool read_fence = false;
720 	u32 i, total_size = 0;
721 	u16 quanta;
722 
723 	info->push_wqe = qp->push_db ? true : false;
724 	op_info = &info->op.rdma_write;
725 
726 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
727 		return -EINVAL;
728 
729 	for (i = 0; i < op_info->num_lo_sges; i++)
730 		total_size += op_info->lo_sg_list[i].length;
731 
732 	if (unlikely(total_size > qp->max_inline_data))
733 		return -EINVAL;
734 
735 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
736 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
737 					 info);
738 	if (!wqe)
739 		return -ENOMEM;
740 
741 	irdma_clr_wqes(qp, wqe_idx);
742 
743 	read_fence |= info->read_fence;
744 	set_64bit_val(wqe, 16,
745 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
746 
747 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
748 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
749 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
750 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
751 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
752 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
753 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
754 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
755 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
756 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
757 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
758 
759 	if (info->imm_data_valid)
760 		set_64bit_val(wqe, 0,
761 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
762 
763 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
764 					op_info->num_lo_sges,
765 					qp->swqe_polarity);
766 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
767 
768 	set_64bit_val(wqe, 24, hdr);
769 
770 	if (info->push_wqe) {
771 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
772 	} else {
773 		if (post_sq)
774 			irdma_uk_qp_post_wr(qp);
775 	}
776 
777 	return 0;
778 }
779 
780 /**
781  * irdma_uk_inline_send - inline send operation
782  * @qp: hw qp ptr
783  * @info: post sq information
784  * @post_sq: flag to post sq
785  */
786 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
787 			 struct irdma_post_sq_info *info, bool post_sq)
788 {
789 	__le64 *wqe;
790 	struct irdma_post_send *op_info;
791 	u64 hdr;
792 	u32 wqe_idx;
793 	bool read_fence = false;
794 	u32 i, total_size = 0;
795 	u16 quanta;
796 
797 	info->push_wqe = qp->push_db ? true : false;
798 	op_info = &info->op.send;
799 
800 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
801 		return -EINVAL;
802 
803 	for (i = 0; i < op_info->num_sges; i++)
804 		total_size += op_info->sg_list[i].length;
805 
806 	if (unlikely(total_size > qp->max_inline_data))
807 		return -EINVAL;
808 
809 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
810 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
811 					 info);
812 	if (!wqe)
813 		return -ENOMEM;
814 
815 	irdma_clr_wqes(qp, wqe_idx);
816 
817 	set_64bit_val(wqe, 16,
818 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
819 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
820 
821 	read_fence |= info->read_fence;
822 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
823 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
824 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
825 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
826 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
827 			 (info->imm_data_valid ? 1 : 0)) |
828 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
829 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
830 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
831 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
832 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
833 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
834 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
835 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
836 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
837 
838 	if (info->imm_data_valid)
839 		set_64bit_val(wqe, 0,
840 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
841 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
842 					op_info->num_sges, qp->swqe_polarity);
843 
844 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
845 
846 	set_64bit_val(wqe, 24, hdr);
847 
848 	if (info->push_wqe) {
849 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
850 	} else {
851 		if (post_sq)
852 			irdma_uk_qp_post_wr(qp);
853 	}
854 
855 	return 0;
856 }
857 
858 /**
859  * irdma_uk_stag_local_invalidate - stag invalidate operation
860  * @qp: hw qp ptr
861  * @info: post sq information
862  * @post_sq: flag to post sq
863  */
864 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
865 				   struct irdma_post_sq_info *info,
866 				   bool post_sq)
867 {
868 	__le64 *wqe;
869 	struct irdma_inv_local_stag *op_info;
870 	u64 hdr;
871 	u32 wqe_idx;
872 	bool local_fence = false;
873 	struct ib_sge sge = {};
874 
875 	info->push_wqe = qp->push_db ? true : false;
876 	op_info = &info->op.inv_local_stag;
877 	local_fence = info->local_fence;
878 
879 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
880 					 0, info);
881 	if (!wqe)
882 		return -ENOMEM;
883 
884 	irdma_clr_wqes(qp, wqe_idx);
885 
886 	sge.lkey = op_info->target_stag;
887 	qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
888 
889 	set_64bit_val(wqe, 16, 0);
890 
891 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
892 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
893 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
894 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
895 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
896 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
897 
898 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
899 
900 	set_64bit_val(wqe, 24, hdr);
901 
902 	if (info->push_wqe) {
903 		irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
904 				  post_sq);
905 	} else {
906 		if (post_sq)
907 			irdma_uk_qp_post_wr(qp);
908 	}
909 
910 	return 0;
911 }
912 
913 /**
914  * irdma_uk_post_receive - post receive wqe
915  * @qp: hw qp ptr
916  * @info: post rq information
917  */
918 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
919 			  struct irdma_post_rq_info *info)
920 {
921 	u32 wqe_idx, i, byte_off;
922 	u32 addl_frag_cnt;
923 	__le64 *wqe;
924 	u64 hdr;
925 
926 	if (qp->max_rq_frag_cnt < info->num_sges)
927 		return -EINVAL;
928 
929 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
930 	if (!wqe)
931 		return -ENOMEM;
932 
933 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
934 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
935 	qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
936 				    qp->rwqe_polarity);
937 
938 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
939 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
940 					    qp->rwqe_polarity);
941 		byte_off += 16;
942 	}
943 
944 	/* if not an odd number set valid bit in next fragment */
945 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
946 	    info->num_sges) {
947 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
948 					    qp->rwqe_polarity);
949 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
950 			++addl_frag_cnt;
951 	}
952 
953 	set_64bit_val(wqe, 16, 0);
954 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
955 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
956 
957 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
958 
959 	set_64bit_val(wqe, 24, hdr);
960 
961 	return 0;
962 }
963 
964 /**
965  * irdma_uk_cq_resize - reset the cq buffer info
966  * @cq: cq to resize
967  * @cq_base: new cq buffer addr
968  * @cq_size: number of cqes
969  */
970 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
971 {
972 	cq->cq_base = cq_base;
973 	cq->cq_size = cq_size;
974 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
975 	cq->polarity = 1;
976 }
977 
978 /**
979  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
980  * @cq: cq to resize
981  * @cq_cnt: the count of the resized cq buffers
982  */
983 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
984 {
985 	u64 temp_val;
986 	u16 sw_cq_sel;
987 	u8 arm_next_se;
988 	u8 arm_next;
989 	u8 arm_seq_num;
990 
991 	get_64bit_val(cq->shadow_area, 32, &temp_val);
992 
993 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
994 	sw_cq_sel += cq_cnt;
995 
996 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
997 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
998 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
999 
1000 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1001 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1002 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1003 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1004 
1005 	set_64bit_val(cq->shadow_area, 32, temp_val);
1006 }
1007 
1008 /**
1009  * irdma_uk_cq_request_notification - cq notification request (door bell)
1010  * @cq: hw cq
1011  * @cq_notify: notification type
1012  */
1013 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1014 				      enum irdma_cmpl_notify cq_notify)
1015 {
1016 	u64 temp_val;
1017 	u16 sw_cq_sel;
1018 	u8 arm_next_se = 0;
1019 	u8 arm_next = 0;
1020 	u8 arm_seq_num;
1021 
1022 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1023 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1024 	arm_seq_num++;
1025 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1026 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1027 	arm_next_se |= 1;
1028 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1029 		arm_next = 1;
1030 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1031 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1032 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1033 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1034 
1035 	set_64bit_val(cq->shadow_area, 32, temp_val);
1036 
1037 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1038 
1039 	writel(cq->cq_id, cq->cqe_alloc_db);
1040 }
1041 
1042 /**
1043  * irdma_uk_cq_poll_cmpl - get cq completion info
1044  * @cq: hw cq
1045  * @info: cq poll information returned
1046  */
1047 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1048 			  struct irdma_cq_poll_info *info)
1049 {
1050 	u64 comp_ctx, qword0, qword2, qword3;
1051 	__le64 *cqe;
1052 	struct irdma_qp_uk *qp;
1053 	struct irdma_ring *pring = NULL;
1054 	u32 wqe_idx;
1055 	int ret_code;
1056 	bool move_cq_head = true;
1057 	u8 polarity;
1058 	bool ext_valid;
1059 	__le64 *ext_cqe;
1060 
1061 	if (cq->avoid_mem_cflct)
1062 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1063 	else
1064 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1065 
1066 	get_64bit_val(cqe, 24, &qword3);
1067 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1068 	if (polarity != cq->polarity)
1069 		return -ENOENT;
1070 
1071 	/* Ensure CQE contents are read after valid bit is checked */
1072 	dma_rmb();
1073 
1074 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1075 	if (ext_valid) {
1076 		u64 qword6, qword7;
1077 		u32 peek_head;
1078 
1079 		if (cq->avoid_mem_cflct) {
1080 			ext_cqe = (__le64 *)((u8 *)cqe + 32);
1081 			get_64bit_val(ext_cqe, 24, &qword7);
1082 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1083 		} else {
1084 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1085 			ext_cqe = cq->cq_base[peek_head].buf;
1086 			get_64bit_val(ext_cqe, 24, &qword7);
1087 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1088 			if (!peek_head)
1089 				polarity ^= 1;
1090 		}
1091 		if (polarity != cq->polarity)
1092 			return -ENOENT;
1093 
1094 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1095 		dma_rmb();
1096 
1097 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1098 		if (info->imm_valid) {
1099 			u64 qword4;
1100 
1101 			get_64bit_val(ext_cqe, 0, &qword4);
1102 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1103 		}
1104 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1105 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1106 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1107 			get_64bit_val(ext_cqe, 16, &qword6);
1108 			if (info->ud_vlan_valid)
1109 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1110 			if (info->ud_smac_valid) {
1111 				info->ud_smac[5] = qword6 & 0xFF;
1112 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1113 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1114 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1115 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1116 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1117 			}
1118 		}
1119 	} else {
1120 		info->imm_valid = false;
1121 		info->ud_smac_valid = false;
1122 		info->ud_vlan_valid = false;
1123 	}
1124 
1125 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1126 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1127 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1128 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1129 	if (info->error) {
1130 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1131 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1132 		if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1133 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1134 			/* Set the min error to standard flush error code for remaining cqes */
1135 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1136 				qword3 &= ~IRDMA_CQ_MINERR;
1137 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1138 				set_64bit_val(cqe, 24, qword3);
1139 			}
1140 		} else {
1141 			info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1142 		}
1143 	} else {
1144 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1145 	}
1146 
1147 	get_64bit_val(cqe, 0, &qword0);
1148 	get_64bit_val(cqe, 16, &qword2);
1149 
1150 	info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1151 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1152 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1153 
1154 	get_64bit_val(cqe, 8, &comp_ctx);
1155 
1156 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1157 	qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1158 	if (!qp || qp->destroy_pending) {
1159 		ret_code = -EFAULT;
1160 		goto exit;
1161 	}
1162 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1163 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1164 	info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1165 
1166 	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1167 		u32 array_idx;
1168 
1169 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1170 
1171 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1172 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1173 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1174 				ret_code = -ENOENT;
1175 				goto exit;
1176 			}
1177 
1178 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1179 			array_idx = qp->rq_ring.tail;
1180 		} else {
1181 			info->wr_id = qp->rq_wrid_array[array_idx];
1182 		}
1183 
1184 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1185 
1186 		if (qword3 & IRDMACQ_STAG) {
1187 			info->stag_invalid_set = true;
1188 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1189 		} else {
1190 			info->stag_invalid_set = false;
1191 		}
1192 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1193 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1194 			qp->rq_flush_seen = true;
1195 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1196 				qp->rq_flush_complete = true;
1197 			else
1198 				move_cq_head = false;
1199 		}
1200 		pring = &qp->rq_ring;
1201 	} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1202 		if (qp->first_sq_wq) {
1203 			if (wqe_idx + 1 >= qp->conn_wqes)
1204 				qp->first_sq_wq = false;
1205 
1206 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1207 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1208 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1209 				set_64bit_val(cq->shadow_area, 0,
1210 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1211 				memset(info, 0,
1212 				       sizeof(struct irdma_cq_poll_info));
1213 				return irdma_uk_cq_poll_cmpl(cq, info);
1214 			}
1215 		}
1216 		/*cease posting push mode on push drop*/
1217 		if (info->push_dropped) {
1218 			qp->push_mode = false;
1219 			qp->push_dropped = true;
1220 		}
1221 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1222 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1223 			if (!info->comp_status)
1224 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1225 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1226 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1227 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1228 		} else {
1229 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1230 				ret_code = -ENOENT;
1231 				goto exit;
1232 			}
1233 
1234 			do {
1235 				__le64 *sw_wqe;
1236 				u64 wqe_qword;
1237 				u32 tail;
1238 
1239 				tail = qp->sq_ring.tail;
1240 				sw_wqe = qp->sq_base[tail].elem;
1241 				get_64bit_val(sw_wqe, 24,
1242 					      &wqe_qword);
1243 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1244 							      wqe_qword);
1245 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1246 						    tail + qp->sq_wrtrk_array[tail].quanta);
1247 				if (info->op_type != IRDMAQP_OP_NOP) {
1248 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1249 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1250 					break;
1251 				}
1252 			} while (1);
1253 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1254 			    info->minor_err == FLUSH_PROT_ERR)
1255 				info->minor_err = FLUSH_MW_BIND_ERR;
1256 			qp->sq_flush_seen = true;
1257 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1258 				qp->sq_flush_complete = true;
1259 		}
1260 		pring = &qp->sq_ring;
1261 	}
1262 
1263 	ret_code = 0;
1264 
1265 exit:
1266 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1267 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1268 			move_cq_head = false;
1269 
1270 	if (move_cq_head) {
1271 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1272 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1273 			cq->polarity ^= 1;
1274 
1275 		if (ext_valid && !cq->avoid_mem_cflct) {
1276 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1277 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1278 				cq->polarity ^= 1;
1279 		}
1280 
1281 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1282 		if (!cq->avoid_mem_cflct && ext_valid)
1283 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1284 		set_64bit_val(cq->shadow_area, 0,
1285 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1286 	} else {
1287 		qword3 &= ~IRDMA_CQ_WQEIDX;
1288 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1289 		set_64bit_val(cqe, 24, qword3);
1290 	}
1291 
1292 	return ret_code;
1293 }
1294 
1295 /**
1296  * irdma_qp_round_up - return round up qp wq depth
1297  * @wqdepth: wq depth in quanta to round up
1298  */
1299 static int irdma_qp_round_up(u32 wqdepth)
1300 {
1301 	int scount = 1;
1302 
1303 	for (wqdepth--; scount <= 16; scount *= 2)
1304 		wqdepth |= wqdepth >> scount;
1305 
1306 	return ++wqdepth;
1307 }
1308 
1309 /**
1310  * irdma_get_wqe_shift - get shift count for maximum wqe size
1311  * @uk_attrs: qp HW attributes
1312  * @sge: Maximum Scatter Gather Elements wqe
1313  * @inline_data: Maximum inline data size
1314  * @shift: Returns the shift needed based on sge
1315  *
1316  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1317  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1318  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1319  * size of 64 bytes).
1320  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1321  * size of 256 bytes).
1322  */
1323 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1324 			 u32 inline_data, u8 *shift)
1325 {
1326 	*shift = 0;
1327 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1328 		if (sge > 1 || inline_data > 8) {
1329 			if (sge < 4 && inline_data <= 39)
1330 				*shift = 1;
1331 			else if (sge < 8 && inline_data <= 101)
1332 				*shift = 2;
1333 			else
1334 				*shift = 3;
1335 		}
1336 	} else if (sge > 1 || inline_data > 16) {
1337 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1338 	}
1339 }
1340 
1341 /*
1342  * irdma_get_sqdepth - get SQ depth (quanta)
1343  * @uk_attrs: qp HW attributes
1344  * @sq_size: SQ size
1345  * @shift: shift which determines size of WQE
1346  * @sqdepth: depth of SQ
1347  *
1348  */
1349 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1350 		      u32 *sqdepth)
1351 {
1352 	*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1353 
1354 	if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1355 		*sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1356 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1357 		return -EINVAL;
1358 
1359 	return 0;
1360 }
1361 
1362 /*
1363  * irdma_get_rqdepth - get RQ depth (quanta)
1364  * @uk_attrs: qp HW attributes
1365  * @rq_size: RQ size
1366  * @shift: shift which determines size of WQE
1367  * @rqdepth: depth of RQ
1368  */
1369 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1370 		      u32 *rqdepth)
1371 {
1372 	*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1373 
1374 	if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1375 		*rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1376 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1377 		return -EINVAL;
1378 
1379 	return 0;
1380 }
1381 
1382 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1383 	.iw_copy_inline_data = irdma_copy_inline_data,
1384 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1385 	.iw_set_fragment = irdma_set_fragment,
1386 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1387 };
1388 
1389 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1390 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1391 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1392 	.iw_set_fragment = irdma_set_fragment_gen_1,
1393 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1394 };
1395 
1396 /**
1397  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1398  * connection.
1399  * @qp: hw qp (user and kernel)
1400  * @info: qp initialization info
1401  */
1402 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1403 					struct irdma_qp_uk_init_info *info)
1404 {
1405 	u16 move_cnt = 1;
1406 
1407 	if (!info->legacy_mode &&
1408 	    (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1409 		move_cnt = 3;
1410 
1411 	qp->conn_wqes = move_cnt;
1412 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1413 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1414 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1415 }
1416 
1417 /**
1418  * irdma_uk_qp_init - initialize shared qp
1419  * @qp: hw qp (user and kernel)
1420  * @info: qp initialization info
1421  *
1422  * initializes the vars used in both user and kernel mode.
1423  * size of the wqe depends on numbers of max. fragements
1424  * allowed. Then size of wqe * the number of wqes should be the
1425  * amount of memory allocated for sq and rq.
1426  */
1427 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1428 {
1429 	int ret_code = 0;
1430 	u32 sq_ring_size;
1431 	u8 sqshift, rqshift;
1432 
1433 	qp->uk_attrs = info->uk_attrs;
1434 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1435 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1436 		return -EINVAL;
1437 
1438 	irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1439 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1440 		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1441 				    info->max_inline_data, &sqshift);
1442 		if (info->abi_ver > 4)
1443 			rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1444 	} else {
1445 		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1446 				    info->max_inline_data, &sqshift);
1447 	}
1448 	qp->qp_caps = info->qp_caps;
1449 	qp->sq_base = info->sq;
1450 	qp->rq_base = info->rq;
1451 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1452 	qp->shadow_area = info->shadow_area;
1453 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1454 
1455 	qp->rq_wrid_array = info->rq_wrid_array;
1456 	qp->wqe_alloc_db = info->wqe_alloc_db;
1457 	qp->qp_id = info->qp_id;
1458 	qp->sq_size = info->sq_size;
1459 	qp->push_mode = false;
1460 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1461 	sq_ring_size = qp->sq_size << sqshift;
1462 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1463 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1464 	if (info->first_sq_wq) {
1465 		irdma_setup_connection_wqes(qp, info);
1466 		qp->swqe_polarity = 1;
1467 		qp->first_sq_wq = true;
1468 	} else {
1469 		qp->swqe_polarity = 0;
1470 	}
1471 	qp->swqe_polarity_deferred = 1;
1472 	qp->rwqe_polarity = 0;
1473 	qp->rq_size = info->rq_size;
1474 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1475 	qp->max_inline_data = info->max_inline_data;
1476 	qp->rq_wqe_size = rqshift;
1477 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1478 	qp->rq_wqe_size_multiplier = 1 << rqshift;
1479 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1480 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1481 	else
1482 		qp->wqe_ops = iw_wqe_uk_ops;
1483 	return ret_code;
1484 }
1485 
1486 /**
1487  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1488  * @cq: hw cq
1489  * @info: hw cq initialization info
1490  */
1491 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1492 		      struct irdma_cq_uk_init_info *info)
1493 {
1494 	cq->cq_base = info->cq_base;
1495 	cq->cq_id = info->cq_id;
1496 	cq->cq_size = info->cq_size;
1497 	cq->cqe_alloc_db = info->cqe_alloc_db;
1498 	cq->cq_ack_db = info->cq_ack_db;
1499 	cq->shadow_area = info->shadow_area;
1500 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1501 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1502 	cq->polarity = 1;
1503 }
1504 
1505 /**
1506  * irdma_uk_clean_cq - clean cq entries
1507  * @q: completion context
1508  * @cq: cq to clean
1509  */
1510 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1511 {
1512 	__le64 *cqe;
1513 	u64 qword3, comp_ctx;
1514 	u32 cq_head;
1515 	u8 polarity, temp;
1516 
1517 	cq_head = cq->cq_ring.head;
1518 	temp = cq->polarity;
1519 	do {
1520 		if (cq->avoid_mem_cflct)
1521 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1522 		else
1523 			cqe = cq->cq_base[cq_head].buf;
1524 		get_64bit_val(cqe, 24, &qword3);
1525 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1526 
1527 		if (polarity != temp)
1528 			break;
1529 
1530 		get_64bit_val(cqe, 8, &comp_ctx);
1531 		if ((void *)(unsigned long)comp_ctx == q)
1532 			set_64bit_val(cqe, 8, 0);
1533 
1534 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1535 		if (!cq_head)
1536 			temp ^= 1;
1537 	} while (true);
1538 }
1539 
1540 /**
1541  * irdma_nop - post a nop
1542  * @qp: hw qp ptr
1543  * @wr_id: work request id
1544  * @signaled: signaled for completion
1545  * @post_sq: ring doorbell
1546  */
1547 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1548 {
1549 	__le64 *wqe;
1550 	u64 hdr;
1551 	u32 wqe_idx;
1552 	struct irdma_post_sq_info info = {};
1553 
1554 	info.push_wqe = false;
1555 	info.wr_id = wr_id;
1556 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1557 					 0, &info);
1558 	if (!wqe)
1559 		return -ENOMEM;
1560 
1561 	irdma_clr_wqes(qp, wqe_idx);
1562 
1563 	set_64bit_val(wqe, 0, 0);
1564 	set_64bit_val(wqe, 8, 0);
1565 	set_64bit_val(wqe, 16, 0);
1566 
1567 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1568 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1569 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1570 
1571 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1572 
1573 	set_64bit_val(wqe, 24, hdr);
1574 	if (post_sq)
1575 		irdma_uk_qp_post_wr(qp);
1576 
1577 	return 0;
1578 }
1579 
1580 /**
1581  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1582  * @frag_cnt: number of fragments
1583  * @quanta: quanta for frag_cnt
1584  */
1585 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1586 {
1587 	switch (frag_cnt) {
1588 	case 0:
1589 	case 1:
1590 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1591 		break;
1592 	case 2:
1593 	case 3:
1594 		*quanta = 2;
1595 		break;
1596 	case 4:
1597 	case 5:
1598 		*quanta = 3;
1599 		break;
1600 	case 6:
1601 	case 7:
1602 		*quanta = 4;
1603 		break;
1604 	case 8:
1605 	case 9:
1606 		*quanta = 5;
1607 		break;
1608 	case 10:
1609 	case 11:
1610 		*quanta = 6;
1611 		break;
1612 	case 12:
1613 	case 13:
1614 		*quanta = 7;
1615 		break;
1616 	case 14:
1617 	case 15: /* when immediate data is present */
1618 		*quanta = 8;
1619 		break;
1620 	default:
1621 		return -EINVAL;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 /**
1628  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1629  * @frag_cnt: number of fragments
1630  * @wqe_size: size in bytes given frag_cnt
1631  */
1632 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1633 {
1634 	switch (frag_cnt) {
1635 	case 0:
1636 	case 1:
1637 		*wqe_size = 32;
1638 		break;
1639 	case 2:
1640 	case 3:
1641 		*wqe_size = 64;
1642 		break;
1643 	case 4:
1644 	case 5:
1645 	case 6:
1646 	case 7:
1647 		*wqe_size = 128;
1648 		break;
1649 	case 8:
1650 	case 9:
1651 	case 10:
1652 	case 11:
1653 	case 12:
1654 	case 13:
1655 	case 14:
1656 		*wqe_size = 256;
1657 		break;
1658 	default:
1659 		return -EINVAL;
1660 	}
1661 
1662 	return 0;
1663 }
1664