1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2021, Alibaba Group */
6 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
7 /* Copyright (c) 2008-2019, IBM Corporation */
8 
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/scatterlist.h>
12 #include <linux/types.h>
13 
14 #include <rdma/ib_user_verbs.h>
15 #include <rdma/ib_verbs.h>
16 
17 #include "erdma.h"
18 #include "erdma_cm.h"
19 #include "erdma_verbs.h"
20 
21 void erdma_qp_llp_close(struct erdma_qp *qp)
22 {
23 	struct erdma_qp_attrs qp_attrs;
24 
25 	down_write(&qp->state_lock);
26 
27 	switch (qp->attrs.state) {
28 	case ERDMA_QP_STATE_RTS:
29 	case ERDMA_QP_STATE_RTR:
30 	case ERDMA_QP_STATE_IDLE:
31 	case ERDMA_QP_STATE_TERMINATE:
32 		qp_attrs.state = ERDMA_QP_STATE_CLOSING;
33 		erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
34 		break;
35 	case ERDMA_QP_STATE_CLOSING:
36 		qp->attrs.state = ERDMA_QP_STATE_IDLE;
37 		break;
38 	default:
39 		break;
40 	}
41 
42 	if (qp->cep) {
43 		erdma_cep_put(qp->cep);
44 		qp->cep = NULL;
45 	}
46 
47 	up_write(&qp->state_lock);
48 }
49 
50 struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
51 {
52 	struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
53 
54 	if (qp)
55 		return &qp->ibqp;
56 
57 	return NULL;
58 }
59 
60 static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
61 					struct erdma_qp_attrs *attrs,
62 					enum erdma_qp_attr_mask mask)
63 {
64 	int ret;
65 	struct erdma_dev *dev = qp->dev;
66 	struct erdma_cmdq_modify_qp_req req;
67 	struct tcp_sock *tp;
68 	struct erdma_cep *cep = qp->cep;
69 	struct sockaddr_storage local_addr, remote_addr;
70 
71 	if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
72 		return -EINVAL;
73 
74 	if (!(mask & ERDMA_QP_ATTR_MPA))
75 		return -EINVAL;
76 
77 	ret = getname_local(cep->sock, &local_addr);
78 	if (ret < 0)
79 		return ret;
80 
81 	ret = getname_peer(cep->sock, &remote_addr);
82 	if (ret < 0)
83 		return ret;
84 
85 	qp->attrs.state = ERDMA_QP_STATE_RTS;
86 
87 	tp = tcp_sk(qp->cep->sock->sk);
88 
89 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
90 				CMDQ_OPCODE_MODIFY_QP);
91 
92 	req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
93 		  FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
94 		  FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
95 
96 	req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
97 	req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
98 	req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
99 	req.dport = to_sockaddr_in(remote_addr).sin_port;
100 	req.sport = to_sockaddr_in(local_addr).sin_port;
101 
102 	req.send_nxt = tp->snd_nxt;
103 	/* rsvd tcp seq for mpa-rsp in server. */
104 	if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
105 		req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
106 	req.recv_nxt = tp->rcv_nxt;
107 
108 	return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
109 				   NULL);
110 }
111 
112 static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
113 					 struct erdma_qp_attrs *attrs,
114 					 enum erdma_qp_attr_mask mask)
115 {
116 	struct erdma_dev *dev = qp->dev;
117 	struct erdma_cmdq_modify_qp_req req;
118 
119 	qp->attrs.state = attrs->state;
120 
121 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
122 				CMDQ_OPCODE_MODIFY_QP);
123 
124 	req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
125 		  FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
126 
127 	return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
128 				   NULL);
129 }
130 
131 int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
132 			     enum erdma_qp_attr_mask mask)
133 {
134 	int drop_conn, ret = 0;
135 
136 	if (!mask)
137 		return 0;
138 
139 	if (!(mask & ERDMA_QP_ATTR_STATE))
140 		return 0;
141 
142 	switch (qp->attrs.state) {
143 	case ERDMA_QP_STATE_IDLE:
144 	case ERDMA_QP_STATE_RTR:
145 		if (attrs->state == ERDMA_QP_STATE_RTS) {
146 			ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
147 		} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
148 			qp->attrs.state = ERDMA_QP_STATE_ERROR;
149 			if (qp->cep) {
150 				erdma_cep_put(qp->cep);
151 				qp->cep = NULL;
152 			}
153 			ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
154 		}
155 		break;
156 	case ERDMA_QP_STATE_RTS:
157 		drop_conn = 0;
158 
159 		if (attrs->state == ERDMA_QP_STATE_CLOSING) {
160 			ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
161 			drop_conn = 1;
162 		} else if (attrs->state == ERDMA_QP_STATE_TERMINATE) {
163 			qp->attrs.state = ERDMA_QP_STATE_TERMINATE;
164 			ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
165 			drop_conn = 1;
166 		} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
167 			ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
168 			qp->attrs.state = ERDMA_QP_STATE_ERROR;
169 			drop_conn = 1;
170 		}
171 
172 		if (drop_conn)
173 			erdma_qp_cm_drop(qp);
174 
175 		break;
176 	case ERDMA_QP_STATE_TERMINATE:
177 		if (attrs->state == ERDMA_QP_STATE_ERROR)
178 			qp->attrs.state = ERDMA_QP_STATE_ERROR;
179 		break;
180 	case ERDMA_QP_STATE_CLOSING:
181 		if (attrs->state == ERDMA_QP_STATE_IDLE) {
182 			qp->attrs.state = ERDMA_QP_STATE_IDLE;
183 		} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
184 			ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
185 			qp->attrs.state = ERDMA_QP_STATE_ERROR;
186 		} else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
187 			return -ECONNABORTED;
188 		}
189 		break;
190 	default:
191 		break;
192 	}
193 
194 	return ret;
195 }
196 
197 static void erdma_qp_safe_free(struct kref *ref)
198 {
199 	struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
200 
201 	complete(&qp->safe_free);
202 }
203 
204 void erdma_qp_put(struct erdma_qp *qp)
205 {
206 	WARN_ON(kref_read(&qp->ref) < 1);
207 	kref_put(&qp->ref, erdma_qp_safe_free);
208 }
209 
210 void erdma_qp_get(struct erdma_qp *qp)
211 {
212 	kref_get(&qp->ref);
213 }
214 
215 static int fill_inline_data(struct erdma_qp *qp,
216 			    const struct ib_send_wr *send_wr, u16 wqe_idx,
217 			    u32 sgl_offset, __le32 *length_field)
218 {
219 	u32 remain_size, copy_size, data_off, bytes = 0;
220 	char *data;
221 	int i = 0;
222 
223 	wqe_idx += (sgl_offset >> SQEBB_SHIFT);
224 	sgl_offset &= (SQEBB_SIZE - 1);
225 	data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
226 			       SQEBB_SHIFT);
227 
228 	while (i < send_wr->num_sge) {
229 		bytes += send_wr->sg_list[i].length;
230 		if (bytes > (int)ERDMA_MAX_INLINE)
231 			return -EINVAL;
232 
233 		remain_size = send_wr->sg_list[i].length;
234 		data_off = 0;
235 
236 		while (1) {
237 			copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
238 
239 			memcpy(data + sgl_offset,
240 			       (void *)(uintptr_t)send_wr->sg_list[i].addr +
241 				       data_off,
242 			       copy_size);
243 			remain_size -= copy_size;
244 			data_off += copy_size;
245 			sgl_offset += copy_size;
246 			wqe_idx += (sgl_offset >> SQEBB_SHIFT);
247 			sgl_offset &= (SQEBB_SIZE - 1);
248 
249 			data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
250 					       qp->attrs.sq_size, SQEBB_SHIFT);
251 			if (!remain_size)
252 				break;
253 		}
254 
255 		i++;
256 	}
257 	*length_field = cpu_to_le32(bytes);
258 
259 	return bytes;
260 }
261 
262 static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
263 		    u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
264 {
265 	int i = 0;
266 	u32 bytes = 0;
267 	char *sgl;
268 
269 	if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
270 		return -EINVAL;
271 
272 	if (sgl_offset & 0xF)
273 		return -EINVAL;
274 
275 	while (i < send_wr->num_sge) {
276 		wqe_idx += (sgl_offset >> SQEBB_SHIFT);
277 		sgl_offset &= (SQEBB_SIZE - 1);
278 		sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
279 				      qp->attrs.sq_size, SQEBB_SHIFT);
280 
281 		bytes += send_wr->sg_list[i].length;
282 		memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
283 		       sizeof(struct ib_sge));
284 
285 		sgl_offset += sizeof(struct ib_sge);
286 		i++;
287 	}
288 
289 	*length_field = cpu_to_le32(bytes);
290 	return 0;
291 }
292 
293 static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
294 			      const struct ib_send_wr *send_wr)
295 {
296 	u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
297 	u32 idx = *pi & (qp->attrs.sq_size - 1);
298 	enum ib_wr_opcode op = send_wr->opcode;
299 	struct erdma_readreq_sqe *read_sqe;
300 	struct erdma_reg_mr_sqe *regmr_sge;
301 	struct erdma_write_sqe *write_sqe;
302 	struct erdma_send_sqe *send_sqe;
303 	struct ib_rdma_wr *rdma_wr;
304 	struct erdma_mr *mr;
305 	__le32 *length_field;
306 	u64 wqe_hdr, *entry;
307 	struct ib_sge *sge;
308 	u32 attrs;
309 	int ret;
310 
311 	entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
312 				SQEBB_SHIFT);
313 
314 	/* Clear the SQE header section. */
315 	*entry = 0;
316 
317 	qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
318 	flags = send_wr->send_flags;
319 	wqe_hdr = FIELD_PREP(
320 		ERDMA_SQE_HDR_CE_MASK,
321 		((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
322 	wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
323 			      flags & IB_SEND_SOLICITED ? 1 : 0);
324 	wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
325 			      flags & IB_SEND_FENCE ? 1 : 0);
326 	wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
327 			      flags & IB_SEND_INLINE ? 1 : 0);
328 	wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
329 
330 	switch (op) {
331 	case IB_WR_RDMA_WRITE:
332 	case IB_WR_RDMA_WRITE_WITH_IMM:
333 		hw_op = ERDMA_OP_WRITE;
334 		if (op == IB_WR_RDMA_WRITE_WITH_IMM)
335 			hw_op = ERDMA_OP_WRITE_WITH_IMM;
336 		wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
337 		rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
338 		write_sqe = (struct erdma_write_sqe *)entry;
339 
340 		write_sqe->imm_data = send_wr->ex.imm_data;
341 		write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
342 		write_sqe->sink_to_h =
343 			cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
344 		write_sqe->sink_to_l =
345 			cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
346 
347 		length_field = &write_sqe->length;
348 		wqe_size = sizeof(struct erdma_write_sqe);
349 		sgl_offset = wqe_size;
350 		break;
351 	case IB_WR_RDMA_READ:
352 	case IB_WR_RDMA_READ_WITH_INV:
353 		read_sqe = (struct erdma_readreq_sqe *)entry;
354 		if (unlikely(send_wr->num_sge != 1))
355 			return -EINVAL;
356 		hw_op = ERDMA_OP_READ;
357 		if (op == IB_WR_RDMA_READ_WITH_INV) {
358 			hw_op = ERDMA_OP_READ_WITH_INV;
359 			read_sqe->invalid_stag =
360 				cpu_to_le32(send_wr->ex.invalidate_rkey);
361 		}
362 
363 		wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
364 		rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
365 		read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
366 		read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
367 		read_sqe->sink_to_l =
368 			cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
369 		read_sqe->sink_to_h =
370 			cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
371 
372 		sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
373 				      qp->attrs.sq_size, SQEBB_SHIFT);
374 		sge->addr = rdma_wr->remote_addr;
375 		sge->lkey = rdma_wr->rkey;
376 		sge->length = send_wr->sg_list[0].length;
377 		wqe_size = sizeof(struct erdma_readreq_sqe) +
378 			   send_wr->num_sge * sizeof(struct ib_sge);
379 
380 		goto out;
381 	case IB_WR_SEND:
382 	case IB_WR_SEND_WITH_IMM:
383 	case IB_WR_SEND_WITH_INV:
384 		send_sqe = (struct erdma_send_sqe *)entry;
385 		hw_op = ERDMA_OP_SEND;
386 		if (op == IB_WR_SEND_WITH_IMM) {
387 			hw_op = ERDMA_OP_SEND_WITH_IMM;
388 			send_sqe->imm_data = send_wr->ex.imm_data;
389 		} else if (op == IB_WR_SEND_WITH_INV) {
390 			hw_op = ERDMA_OP_SEND_WITH_INV;
391 			send_sqe->invalid_stag =
392 				cpu_to_le32(send_wr->ex.invalidate_rkey);
393 		}
394 		wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
395 		length_field = &send_sqe->length;
396 		wqe_size = sizeof(struct erdma_send_sqe);
397 		sgl_offset = wqe_size;
398 
399 		break;
400 	case IB_WR_REG_MR:
401 		wqe_hdr |=
402 			FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
403 		regmr_sge = (struct erdma_reg_mr_sqe *)entry;
404 		mr = to_emr(reg_wr(send_wr)->mr);
405 
406 		mr->access = ERDMA_MR_ACC_LR |
407 			     to_erdma_access_flags(reg_wr(send_wr)->access);
408 		regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
409 		regmr_sge->length = cpu_to_le32(mr->ibmr.length);
410 		regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
411 		attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
412 			FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
413 			FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
414 				   mr->mem.mtt_nents);
415 
416 		if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
417 			attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
418 			/* Copy SGLs to SQE content to accelerate */
419 			memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
420 					       qp->attrs.sq_size, SQEBB_SHIFT),
421 			       mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
422 			wqe_size = sizeof(struct erdma_reg_mr_sqe) +
423 				   MTT_SIZE(mr->mem.mtt_nents);
424 		} else {
425 			attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
426 			wqe_size = sizeof(struct erdma_reg_mr_sqe);
427 		}
428 
429 		regmr_sge->attrs = cpu_to_le32(attrs);
430 		goto out;
431 	case IB_WR_LOCAL_INV:
432 		wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
433 				      ERDMA_OP_LOCAL_INV);
434 		regmr_sge = (struct erdma_reg_mr_sqe *)entry;
435 		regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
436 		wqe_size = sizeof(struct erdma_reg_mr_sqe);
437 		goto out;
438 	default:
439 		return -EOPNOTSUPP;
440 	}
441 
442 	if (flags & IB_SEND_INLINE) {
443 		ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
444 				       length_field);
445 		if (ret < 0)
446 			return -EINVAL;
447 		wqe_size += ret;
448 		wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
449 	} else {
450 		ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
451 		if (ret)
452 			return -EINVAL;
453 		wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
454 		wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
455 				      send_wr->num_sge);
456 	}
457 
458 out:
459 	wqebb_cnt = SQEBB_COUNT(wqe_size);
460 	wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
461 	*pi += wqebb_cnt;
462 	wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
463 
464 	*entry = wqe_hdr;
465 
466 	return 0;
467 }
468 
469 static void kick_sq_db(struct erdma_qp *qp, u16 pi)
470 {
471 	u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
472 		      FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
473 
474 	*(u64 *)qp->kern_qp.sq_db_info = db_data;
475 	writeq(db_data, qp->kern_qp.hw_sq_db);
476 }
477 
478 int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
479 		    const struct ib_send_wr **bad_send_wr)
480 {
481 	struct erdma_qp *qp = to_eqp(ibqp);
482 	int ret = 0;
483 	const struct ib_send_wr *wr = send_wr;
484 	unsigned long flags;
485 	u16 sq_pi;
486 
487 	if (!send_wr)
488 		return -EINVAL;
489 
490 	spin_lock_irqsave(&qp->lock, flags);
491 	sq_pi = qp->kern_qp.sq_pi;
492 
493 	while (wr) {
494 		if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
495 			ret = -ENOMEM;
496 			*bad_send_wr = send_wr;
497 			break;
498 		}
499 
500 		ret = erdma_push_one_sqe(qp, &sq_pi, wr);
501 		if (ret) {
502 			*bad_send_wr = wr;
503 			break;
504 		}
505 		qp->kern_qp.sq_pi = sq_pi;
506 		kick_sq_db(qp, sq_pi);
507 
508 		wr = wr->next;
509 	}
510 	spin_unlock_irqrestore(&qp->lock, flags);
511 
512 	return ret;
513 }
514 
515 static int erdma_post_recv_one(struct erdma_qp *qp,
516 			       const struct ib_recv_wr *recv_wr)
517 {
518 	struct erdma_rqe *rqe =
519 		get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
520 				qp->attrs.rq_size, RQE_SHIFT);
521 
522 	rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
523 	rqe->qpn = cpu_to_le32(QP_ID(qp));
524 
525 	if (recv_wr->num_sge == 0) {
526 		rqe->length = 0;
527 	} else if (recv_wr->num_sge == 1) {
528 		rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
529 		rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
530 		rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
531 	} else {
532 		return -EINVAL;
533 	}
534 
535 	*(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
536 	writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
537 
538 	qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
539 		recv_wr->wr_id;
540 	qp->kern_qp.rq_pi++;
541 
542 	return 0;
543 }
544 
545 int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
546 		    const struct ib_recv_wr **bad_recv_wr)
547 {
548 	const struct ib_recv_wr *wr = recv_wr;
549 	struct erdma_qp *qp = to_eqp(ibqp);
550 	unsigned long flags;
551 	int ret;
552 
553 	spin_lock_irqsave(&qp->lock, flags);
554 
555 	while (wr) {
556 		ret = erdma_post_recv_one(qp, wr);
557 		if (ret) {
558 			*bad_recv_wr = wr;
559 			break;
560 		}
561 		wr = wr->next;
562 	}
563 
564 	spin_unlock_irqrestore(&qp->lock, flags);
565 	return ret;
566 }
567