xref: /openbmc/linux/drivers/infiniband/hw/irdma/hw.c (revision 2fa5ebe3bc4e31e07a99196455498472417842f2)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4 
5 static struct irdma_rsrc_limits rsrc_limits_table[] = {
6 	[0] = {
7 		.qplimit = SZ_128,
8 	},
9 	[1] = {
10 		.qplimit = SZ_1K,
11 	},
12 	[2] = {
13 		.qplimit = SZ_2K,
14 	},
15 	[3] = {
16 		.qplimit = SZ_4K,
17 	},
18 	[4] = {
19 		.qplimit = SZ_16K,
20 	},
21 	[5] = {
22 		.qplimit = SZ_64K,
23 	},
24 	[6] = {
25 		.qplimit = SZ_128K,
26 	},
27 	[7] = {
28 		.qplimit = SZ_256K,
29 	},
30 };
31 
32 /* types of hmc objects */
33 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
34 	IRDMA_HMC_IW_QP,
35 	IRDMA_HMC_IW_CQ,
36 	IRDMA_HMC_IW_HTE,
37 	IRDMA_HMC_IW_ARP,
38 	IRDMA_HMC_IW_APBVT_ENTRY,
39 	IRDMA_HMC_IW_MR,
40 	IRDMA_HMC_IW_XF,
41 	IRDMA_HMC_IW_XFFL,
42 	IRDMA_HMC_IW_Q1,
43 	IRDMA_HMC_IW_Q1FL,
44 	IRDMA_HMC_IW_PBLE,
45 	IRDMA_HMC_IW_TIMER,
46 	IRDMA_HMC_IW_FSIMC,
47 	IRDMA_HMC_IW_FSIAV,
48 	IRDMA_HMC_IW_RRF,
49 	IRDMA_HMC_IW_RRFFL,
50 	IRDMA_HMC_IW_HDR,
51 	IRDMA_HMC_IW_MD,
52 	IRDMA_HMC_IW_OOISC,
53 	IRDMA_HMC_IW_OOISCFFL,
54 };
55 
56 /**
57  * irdma_iwarp_ce_handler - handle iwarp completions
58  * @iwcq: iwarp cq receiving event
59  */
60 static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
61 {
62 	struct irdma_cq *cq = iwcq->back_cq;
63 
64 	if (!cq->user_mode)
65 		atomic_set(&cq->armed, 0);
66 	if (cq->ibcq.comp_handler)
67 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
68 }
69 
70 /**
71  * irdma_puda_ce_handler - handle puda completion events
72  * @rf: RDMA PCI function
73  * @cq: puda completion q for event
74  */
75 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
76 				  struct irdma_sc_cq *cq)
77 {
78 	struct irdma_sc_dev *dev = &rf->sc_dev;
79 	u32 compl_error;
80 	int status;
81 
82 	do {
83 		status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
84 		if (status == -ENOENT)
85 			break;
86 		if (status) {
87 			ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
88 			break;
89 		}
90 		if (compl_error) {
91 			ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err  =0x%x\n",
92 				  compl_error);
93 			break;
94 		}
95 	} while (1);
96 
97 	irdma_sc_ccq_arm(cq);
98 }
99 
100 /**
101  * irdma_process_ceq - handle ceq for completions
102  * @rf: RDMA PCI function
103  * @ceq: ceq having cq for completion
104  */
105 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
106 {
107 	struct irdma_sc_dev *dev = &rf->sc_dev;
108 	struct irdma_sc_ceq *sc_ceq;
109 	struct irdma_sc_cq *cq;
110 	unsigned long flags;
111 
112 	sc_ceq = &ceq->sc_ceq;
113 	do {
114 		spin_lock_irqsave(&ceq->ce_lock, flags);
115 		cq = irdma_sc_process_ceq(dev, sc_ceq);
116 		if (!cq) {
117 			spin_unlock_irqrestore(&ceq->ce_lock, flags);
118 			break;
119 		}
120 
121 		if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
122 			irdma_iwarp_ce_handler(cq);
123 
124 		spin_unlock_irqrestore(&ceq->ce_lock, flags);
125 
126 		if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
127 			queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
128 		else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
129 			 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
130 			irdma_puda_ce_handler(rf, cq);
131 	} while (1);
132 }
133 
134 static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
135 				   struct irdma_aeqe_info *info)
136 {
137 	qp->sq_flush_code = info->sq;
138 	qp->rq_flush_code = info->rq;
139 	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
140 
141 	switch (info->ae_id) {
142 	case IRDMA_AE_AMP_BOUNDS_VIOLATION:
143 	case IRDMA_AE_AMP_INVALID_STAG:
144 	case IRDMA_AE_AMP_RIGHTS_VIOLATION:
145 	case IRDMA_AE_AMP_UNALLOCATED_STAG:
146 	case IRDMA_AE_AMP_BAD_PD:
147 	case IRDMA_AE_AMP_BAD_QP:
148 	case IRDMA_AE_AMP_BAD_STAG_KEY:
149 	case IRDMA_AE_AMP_BAD_STAG_INDEX:
150 	case IRDMA_AE_AMP_TO_WRAP:
151 	case IRDMA_AE_PRIV_OPERATION_DENIED:
152 		qp->flush_code = FLUSH_PROT_ERR;
153 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
154 		break;
155 	case IRDMA_AE_UDA_XMIT_BAD_PD:
156 	case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
157 		qp->flush_code = FLUSH_LOC_QP_OP_ERR;
158 		qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
159 		break;
160 	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
161 	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
162 	case IRDMA_AE_UDA_L4LEN_INVALID:
163 	case IRDMA_AE_DDP_UBE_INVALID_MO:
164 	case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
165 		qp->flush_code = FLUSH_LOC_LEN_ERR;
166 		qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
167 		break;
168 	case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
169 	case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
170 		qp->flush_code = FLUSH_REM_ACCESS_ERR;
171 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
172 		break;
173 	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
174 	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
175 	case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
176 	case IRDMA_AE_IB_REMOTE_OP_ERROR:
177 		qp->flush_code = FLUSH_REM_OP_ERR;
178 		qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
179 		break;
180 	case IRDMA_AE_LCE_QP_CATASTROPHIC:
181 		qp->flush_code = FLUSH_FATAL_ERR;
182 		qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
183 		break;
184 	case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
185 		qp->flush_code = FLUSH_GENERAL_ERR;
186 		break;
187 	case IRDMA_AE_LLP_TOO_MANY_RETRIES:
188 		qp->flush_code = FLUSH_RETRY_EXC_ERR;
189 		qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
190 		break;
191 	case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
192 	case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
193 	case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
194 		qp->flush_code = FLUSH_MW_BIND_ERR;
195 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
196 		break;
197 	case IRDMA_AE_IB_INVALID_REQUEST:
198 		qp->flush_code = FLUSH_REM_INV_REQ_ERR;
199 		qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
200 		break;
201 	default:
202 		qp->flush_code = FLUSH_GENERAL_ERR;
203 		qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
204 		break;
205 	}
206 }
207 
208 /**
209  * irdma_process_aeq - handle aeq events
210  * @rf: RDMA PCI function
211  */
212 static void irdma_process_aeq(struct irdma_pci_f *rf)
213 {
214 	struct irdma_sc_dev *dev = &rf->sc_dev;
215 	struct irdma_aeq *aeq = &rf->aeq;
216 	struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
217 	struct irdma_aeqe_info aeinfo;
218 	struct irdma_aeqe_info *info = &aeinfo;
219 	int ret;
220 	struct irdma_qp *iwqp = NULL;
221 	struct irdma_sc_cq *cq = NULL;
222 	struct irdma_cq *iwcq = NULL;
223 	struct irdma_sc_qp *qp = NULL;
224 	struct irdma_qp_host_ctx_info *ctx_info = NULL;
225 	struct irdma_device *iwdev = rf->iwdev;
226 	unsigned long flags;
227 
228 	u32 aeqcnt = 0;
229 
230 	if (!sc_aeq->size)
231 		return;
232 
233 	do {
234 		memset(info, 0, sizeof(*info));
235 		ret = irdma_sc_get_next_aeqe(sc_aeq, info);
236 		if (ret)
237 			break;
238 
239 		aeqcnt++;
240 		ibdev_dbg(&iwdev->ibdev,
241 			  "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
242 			  info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
243 			  info->iwarp_state, info->ae_src);
244 
245 		if (info->qp) {
246 			spin_lock_irqsave(&rf->qptable_lock, flags);
247 			iwqp = rf->qp_table[info->qp_cq_id];
248 			if (!iwqp) {
249 				spin_unlock_irqrestore(&rf->qptable_lock,
250 						       flags);
251 				if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
252 					atomic_dec(&iwdev->vsi.qp_suspend_reqs);
253 					wake_up(&iwdev->suspend_wq);
254 					continue;
255 				}
256 				ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
257 					  info->qp_cq_id);
258 				continue;
259 			}
260 			irdma_qp_add_ref(&iwqp->ibqp);
261 			spin_unlock_irqrestore(&rf->qptable_lock, flags);
262 			qp = &iwqp->sc_qp;
263 			spin_lock_irqsave(&iwqp->lock, flags);
264 			iwqp->hw_tcp_state = info->tcp_state;
265 			iwqp->hw_iwarp_state = info->iwarp_state;
266 			if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
267 				iwqp->last_aeq = info->ae_id;
268 			spin_unlock_irqrestore(&iwqp->lock, flags);
269 			ctx_info = &iwqp->ctx_info;
270 		} else {
271 			if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
272 				continue;
273 		}
274 
275 		switch (info->ae_id) {
276 			struct irdma_cm_node *cm_node;
277 		case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
278 			cm_node = iwqp->cm_node;
279 			if (cm_node->accept_pend) {
280 				atomic_dec(&cm_node->listener->pend_accepts_cnt);
281 				cm_node->accept_pend = 0;
282 			}
283 			iwqp->rts_ae_rcvd = 1;
284 			wake_up_interruptible(&iwqp->waitq);
285 			break;
286 		case IRDMA_AE_LLP_FIN_RECEIVED:
287 		case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
288 			if (qp->term_flags)
289 				break;
290 			if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
291 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
292 				if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
293 				    iwqp->ibqp_state == IB_QPS_RTS) {
294 					irdma_next_iw_state(iwqp,
295 							    IRDMA_QP_STATE_CLOSING,
296 							    0, 0, 0);
297 					irdma_cm_disconn(iwqp);
298 				}
299 				irdma_schedule_cm_timer(iwqp->cm_node,
300 							(struct irdma_puda_buf *)iwqp,
301 							IRDMA_TIMER_TYPE_CLOSE,
302 							1, 0);
303 			}
304 			break;
305 		case IRDMA_AE_LLP_CLOSE_COMPLETE:
306 			if (qp->term_flags)
307 				irdma_terminate_done(qp, 0);
308 			else
309 				irdma_cm_disconn(iwqp);
310 			break;
311 		case IRDMA_AE_BAD_CLOSE:
312 		case IRDMA_AE_RESET_SENT:
313 			irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
314 					    0);
315 			irdma_cm_disconn(iwqp);
316 			break;
317 		case IRDMA_AE_LLP_CONNECTION_RESET:
318 			if (atomic_read(&iwqp->close_timer_started))
319 				break;
320 			irdma_cm_disconn(iwqp);
321 			break;
322 		case IRDMA_AE_QP_SUSPEND_COMPLETE:
323 			if (iwqp->iwdev->vsi.tc_change_pending) {
324 				atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
325 				wake_up(&iwqp->iwdev->suspend_wq);
326 			}
327 			break;
328 		case IRDMA_AE_TERMINATE_SENT:
329 			irdma_terminate_send_fin(qp);
330 			break;
331 		case IRDMA_AE_LLP_TERMINATE_RECEIVED:
332 			irdma_terminate_received(qp, info);
333 			break;
334 		case IRDMA_AE_CQ_OPERATION_ERROR:
335 			ibdev_err(&iwdev->ibdev,
336 				  "Processing an iWARP related AE for CQ misc = 0x%04X\n",
337 				  info->ae_id);
338 			cq = (struct irdma_sc_cq *)(unsigned long)
339 			     info->compl_ctx;
340 
341 			iwcq = cq->back_cq;
342 
343 			if (iwcq->ibcq.event_handler) {
344 				struct ib_event ibevent;
345 
346 				ibevent.device = iwcq->ibcq.device;
347 				ibevent.event = IB_EVENT_CQ_ERR;
348 				ibevent.element.cq = &iwcq->ibcq;
349 				iwcq->ibcq.event_handler(&ibevent,
350 							 iwcq->ibcq.cq_context);
351 			}
352 			break;
353 		case IRDMA_AE_RESET_NOT_SENT:
354 		case IRDMA_AE_LLP_DOUBT_REACHABILITY:
355 		case IRDMA_AE_RESOURCE_EXHAUSTION:
356 			break;
357 		case IRDMA_AE_PRIV_OPERATION_DENIED:
358 		case IRDMA_AE_STAG_ZERO_INVALID:
359 		case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
360 		case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
361 		case IRDMA_AE_DDP_UBE_INVALID_MO:
362 		case IRDMA_AE_DDP_UBE_INVALID_QN:
363 		case IRDMA_AE_DDP_NO_L_BIT:
364 		case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
365 		case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
366 		case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
367 		case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
368 		case IRDMA_AE_INVALID_ARP_ENTRY:
369 		case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
370 		case IRDMA_AE_STALE_ARP_ENTRY:
371 		case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
372 		case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
373 		case IRDMA_AE_LLP_SYN_RECEIVED:
374 		case IRDMA_AE_LLP_TOO_MANY_RETRIES:
375 		case IRDMA_AE_LCE_QP_CATASTROPHIC:
376 		case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
377 		case IRDMA_AE_LCE_CQ_CATASTROPHIC:
378 		case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
379 		default:
380 			ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
381 				  info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
382 			if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
383 				ctx_info->roce_info->err_rq_idx_valid = info->rq;
384 				if (info->rq) {
385 					ctx_info->roce_info->err_rq_idx = info->wqe_idx;
386 					irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
387 								ctx_info);
388 				}
389 				irdma_set_flush_fields(qp, info);
390 				irdma_cm_disconn(iwqp);
391 				break;
392 			}
393 			ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
394 			if (info->rq) {
395 				ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
396 				ctx_info->tcp_info_valid = false;
397 				ctx_info->iwarp_info_valid = true;
398 				irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
399 						   ctx_info);
400 			}
401 			if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
402 			    iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
403 				irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
404 				irdma_cm_disconn(iwqp);
405 			} else {
406 				irdma_terminate_connection(qp, info);
407 			}
408 			break;
409 		}
410 		if (info->qp)
411 			irdma_qp_rem_ref(&iwqp->ibqp);
412 	} while (1);
413 
414 	if (aeqcnt)
415 		irdma_sc_repost_aeq_entries(dev, aeqcnt);
416 }
417 
418 /**
419  * irdma_ena_intr - set up device interrupts
420  * @dev: hardware control device structure
421  * @msix_id: id of the interrupt to be enabled
422  */
423 static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
424 {
425 	dev->irq_ops->irdma_en_irq(dev, msix_id);
426 }
427 
428 /**
429  * irdma_dpc - tasklet for aeq and ceq 0
430  * @t: tasklet_struct ptr
431  */
432 static void irdma_dpc(struct tasklet_struct *t)
433 {
434 	struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
435 
436 	if (rf->msix_shared)
437 		irdma_process_ceq(rf, rf->ceqlist);
438 	irdma_process_aeq(rf);
439 	irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
440 }
441 
442 /**
443  * irdma_ceq_dpc - dpc handler for CEQ
444  * @t: tasklet_struct ptr
445  */
446 static void irdma_ceq_dpc(struct tasklet_struct *t)
447 {
448 	struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
449 	struct irdma_pci_f *rf = iwceq->rf;
450 
451 	irdma_process_ceq(rf, iwceq);
452 	irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
453 }
454 
455 /**
456  * irdma_save_msix_info - copy msix vector information to iwarp device
457  * @rf: RDMA PCI function
458  *
459  * Allocate iwdev msix table and copy the msix info to the table
460  * Return 0 if successful, otherwise return error
461  */
462 static int irdma_save_msix_info(struct irdma_pci_f *rf)
463 {
464 	struct irdma_qvlist_info *iw_qvlist;
465 	struct irdma_qv_info *iw_qvinfo;
466 	struct msix_entry *pmsix;
467 	u32 ceq_idx;
468 	u32 i;
469 	size_t size;
470 
471 	if (!rf->msix_count)
472 		return -EINVAL;
473 
474 	size = sizeof(struct irdma_msix_vector) * rf->msix_count;
475 	size += struct_size(iw_qvlist, qv_info, rf->msix_count);
476 	rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
477 	if (!rf->iw_msixtbl)
478 		return -ENOMEM;
479 
480 	rf->iw_qvlist = (struct irdma_qvlist_info *)
481 			(&rf->iw_msixtbl[rf->msix_count]);
482 	iw_qvlist = rf->iw_qvlist;
483 	iw_qvinfo = iw_qvlist->qv_info;
484 	iw_qvlist->num_vectors = rf->msix_count;
485 	if (rf->msix_count <= num_online_cpus())
486 		rf->msix_shared = true;
487 	else if (rf->msix_count > num_online_cpus() + 1)
488 		rf->msix_count = num_online_cpus() + 1;
489 
490 	pmsix = rf->msix_entries;
491 	for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
492 		rf->iw_msixtbl[i].idx = pmsix->entry;
493 		rf->iw_msixtbl[i].irq = pmsix->vector;
494 		rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
495 		if (!i) {
496 			iw_qvinfo->aeq_idx = 0;
497 			if (rf->msix_shared)
498 				iw_qvinfo->ceq_idx = ceq_idx++;
499 			else
500 				iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
501 		} else {
502 			iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
503 			iw_qvinfo->ceq_idx = ceq_idx++;
504 		}
505 		iw_qvinfo->itr_idx = 3;
506 		iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
507 		pmsix++;
508 	}
509 
510 	return 0;
511 }
512 
513 /**
514  * irdma_irq_handler - interrupt handler for aeq and ceq0
515  * @irq: Interrupt request number
516  * @data: RDMA PCI function
517  */
518 static irqreturn_t irdma_irq_handler(int irq, void *data)
519 {
520 	struct irdma_pci_f *rf = data;
521 
522 	tasklet_schedule(&rf->dpc_tasklet);
523 
524 	return IRQ_HANDLED;
525 }
526 
527 /**
528  * irdma_ceq_handler - interrupt handler for ceq
529  * @irq: interrupt request number
530  * @data: ceq pointer
531  */
532 static irqreturn_t irdma_ceq_handler(int irq, void *data)
533 {
534 	struct irdma_ceq *iwceq = data;
535 
536 	if (iwceq->irq != irq)
537 		ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
538 			  iwceq->irq, irq);
539 	tasklet_schedule(&iwceq->dpc_tasklet);
540 
541 	return IRQ_HANDLED;
542 }
543 
544 /**
545  * irdma_destroy_irq - destroy device interrupts
546  * @rf: RDMA PCI function
547  * @msix_vec: msix vector to disable irq
548  * @dev_id: parameter to pass to free_irq (used during irq setup)
549  *
550  * The function is called when destroying aeq/ceq
551  */
552 static void irdma_destroy_irq(struct irdma_pci_f *rf,
553 			      struct irdma_msix_vector *msix_vec, void *dev_id)
554 {
555 	struct irdma_sc_dev *dev = &rf->sc_dev;
556 
557 	dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
558 	irq_update_affinity_hint(msix_vec->irq, NULL);
559 	free_irq(msix_vec->irq, dev_id);
560 }
561 
562 /**
563  * irdma_destroy_cqp  - destroy control qp
564  * @rf: RDMA PCI function
565  * @free_hwcqp: 1 if hw cqp should be freed
566  *
567  * Issue destroy cqp request and
568  * free the resources associated with the cqp
569  */
570 static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
571 {
572 	struct irdma_sc_dev *dev = &rf->sc_dev;
573 	struct irdma_cqp *cqp = &rf->cqp;
574 	int status = 0;
575 
576 	if (rf->cqp_cmpl_wq)
577 		destroy_workqueue(rf->cqp_cmpl_wq);
578 	if (free_hwcqp)
579 		status = irdma_sc_cqp_destroy(dev->cqp);
580 	if (status)
581 		ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
582 
583 	irdma_cleanup_pending_cqp_op(rf);
584 	dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
585 			  cqp->sq.pa);
586 	cqp->sq.va = NULL;
587 	kfree(cqp->scratch_array);
588 	cqp->scratch_array = NULL;
589 	kfree(cqp->cqp_requests);
590 	cqp->cqp_requests = NULL;
591 }
592 
593 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
594 {
595 	struct irdma_aeq *aeq = &rf->aeq;
596 	u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
597 	dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
598 
599 	irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
600 	irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
601 	vfree(aeq->mem.va);
602 }
603 
604 /**
605  * irdma_destroy_aeq - destroy aeq
606  * @rf: RDMA PCI function
607  *
608  * Issue a destroy aeq request and
609  * free the resources associated with the aeq
610  * The function is called during driver unload
611  */
612 static void irdma_destroy_aeq(struct irdma_pci_f *rf)
613 {
614 	struct irdma_sc_dev *dev = &rf->sc_dev;
615 	struct irdma_aeq *aeq = &rf->aeq;
616 	int status = -EBUSY;
617 
618 	if (!rf->msix_shared) {
619 		rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
620 		irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
621 	}
622 	if (rf->reset)
623 		goto exit;
624 
625 	aeq->sc_aeq.size = 0;
626 	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
627 	if (status)
628 		ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
629 
630 exit:
631 	if (aeq->virtual_map) {
632 		irdma_destroy_virt_aeq(rf);
633 	} else {
634 		dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
635 				  aeq->mem.pa);
636 		aeq->mem.va = NULL;
637 	}
638 }
639 
640 /**
641  * irdma_destroy_ceq - destroy ceq
642  * @rf: RDMA PCI function
643  * @iwceq: ceq to be destroyed
644  *
645  * Issue a destroy ceq request and
646  * free the resources associated with the ceq
647  */
648 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
649 {
650 	struct irdma_sc_dev *dev = &rf->sc_dev;
651 	int status;
652 
653 	if (rf->reset)
654 		goto exit;
655 
656 	status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
657 	if (status) {
658 		ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
659 		goto exit;
660 	}
661 
662 	status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
663 	if (status)
664 		ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
665 			  status);
666 exit:
667 	dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
668 			  iwceq->mem.pa);
669 	iwceq->mem.va = NULL;
670 }
671 
672 /**
673  * irdma_del_ceq_0 - destroy ceq 0
674  * @rf: RDMA PCI function
675  *
676  * Disable the ceq 0 interrupt and destroy the ceq 0
677  */
678 static void irdma_del_ceq_0(struct irdma_pci_f *rf)
679 {
680 	struct irdma_ceq *iwceq = rf->ceqlist;
681 	struct irdma_msix_vector *msix_vec;
682 
683 	if (rf->msix_shared) {
684 		msix_vec = &rf->iw_msixtbl[0];
685 		rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
686 						  msix_vec->ceq_id,
687 						  msix_vec->idx, false);
688 		irdma_destroy_irq(rf, msix_vec, rf);
689 	} else {
690 		msix_vec = &rf->iw_msixtbl[1];
691 		irdma_destroy_irq(rf, msix_vec, iwceq);
692 	}
693 
694 	irdma_destroy_ceq(rf, iwceq);
695 	rf->sc_dev.ceq_valid = false;
696 	rf->ceqs_count = 0;
697 }
698 
699 /**
700  * irdma_del_ceqs - destroy all ceq's except CEQ 0
701  * @rf: RDMA PCI function
702  *
703  * Go through all of the device ceq's, except 0, and for each
704  * ceq disable the ceq interrupt and destroy the ceq
705  */
706 static void irdma_del_ceqs(struct irdma_pci_f *rf)
707 {
708 	struct irdma_ceq *iwceq = &rf->ceqlist[1];
709 	struct irdma_msix_vector *msix_vec;
710 	u32 i = 0;
711 
712 	if (rf->msix_shared)
713 		msix_vec = &rf->iw_msixtbl[1];
714 	else
715 		msix_vec = &rf->iw_msixtbl[2];
716 
717 	for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
718 		rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
719 						  msix_vec->idx, false);
720 		irdma_destroy_irq(rf, msix_vec, iwceq);
721 		irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
722 				  IRDMA_OP_CEQ_DESTROY);
723 		dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
724 				  iwceq->mem.va, iwceq->mem.pa);
725 		iwceq->mem.va = NULL;
726 	}
727 	rf->ceqs_count = 1;
728 }
729 
730 /**
731  * irdma_destroy_ccq - destroy control cq
732  * @rf: RDMA PCI function
733  *
734  * Issue destroy ccq request and
735  * free the resources associated with the ccq
736  */
737 static void irdma_destroy_ccq(struct irdma_pci_f *rf)
738 {
739 	struct irdma_sc_dev *dev = &rf->sc_dev;
740 	struct irdma_ccq *ccq = &rf->ccq;
741 	int status = 0;
742 
743 	if (!rf->reset)
744 		status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
745 	if (status)
746 		ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
747 	dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
748 			  ccq->mem_cq.pa);
749 	ccq->mem_cq.va = NULL;
750 }
751 
752 /**
753  * irdma_close_hmc_objects_type - delete hmc objects of a given type
754  * @dev: iwarp device
755  * @obj_type: the hmc object type to be deleted
756  * @hmc_info: host memory info struct
757  * @privileged: permission to close HMC objects
758  * @reset: true if called before reset
759  */
760 static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
761 					 enum irdma_hmc_rsrc_type obj_type,
762 					 struct irdma_hmc_info *hmc_info,
763 					 bool privileged, bool reset)
764 {
765 	struct irdma_hmc_del_obj_info info = {};
766 
767 	info.hmc_info = hmc_info;
768 	info.rsrc_type = obj_type;
769 	info.count = hmc_info->hmc_obj[obj_type].cnt;
770 	info.privileged = privileged;
771 	if (irdma_sc_del_hmc_obj(dev, &info, reset))
772 		ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
773 			  obj_type);
774 }
775 
776 /**
777  * irdma_del_hmc_objects - remove all device hmc objects
778  * @dev: iwarp device
779  * @hmc_info: hmc_info to free
780  * @privileged: permission to delete HMC objects
781  * @reset: true if called before reset
782  * @vers: hardware version
783  */
784 static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
785 				  struct irdma_hmc_info *hmc_info, bool privileged,
786 				  bool reset, enum irdma_vers vers)
787 {
788 	unsigned int i;
789 
790 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
791 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
792 			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
793 						     hmc_info, privileged, reset);
794 		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
795 			break;
796 	}
797 }
798 
799 /**
800  * irdma_create_hmc_obj_type - create hmc object of a given type
801  * @dev: hardware control device structure
802  * @info: information for the hmc object to create
803  */
804 static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
805 				     struct irdma_hmc_create_obj_info *info)
806 {
807 	return irdma_sc_create_hmc_obj(dev, info);
808 }
809 
810 /**
811  * irdma_create_hmc_objs - create all hmc objects for the device
812  * @rf: RDMA PCI function
813  * @privileged: permission to create HMC objects
814  * @vers: HW version
815  *
816  * Create the device hmc objects and allocate hmc pages
817  * Return 0 if successful, otherwise clean up and return error
818  */
819 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
820 				 enum irdma_vers vers)
821 {
822 	struct irdma_sc_dev *dev = &rf->sc_dev;
823 	struct irdma_hmc_create_obj_info info = {};
824 	int i, status = 0;
825 
826 	info.hmc_info = dev->hmc_info;
827 	info.privileged = privileged;
828 	info.entry_type = rf->sd_type;
829 
830 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
831 		if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
832 			continue;
833 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
834 			info.rsrc_type = iw_hmc_obj_types[i];
835 			info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
836 			info.add_sd_cnt = 0;
837 			status = irdma_create_hmc_obj_type(dev, &info);
838 			if (status) {
839 				ibdev_dbg(to_ibdev(dev),
840 					  "ERR: create obj type %d status = %d\n",
841 					  iw_hmc_obj_types[i], status);
842 				break;
843 			}
844 		}
845 		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
846 			break;
847 	}
848 
849 	if (!status)
850 		return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
851 							   true, true);
852 
853 	while (i) {
854 		i--;
855 		/* destroy the hmc objects of a given type */
856 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
857 			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
858 						     dev->hmc_info, privileged,
859 						     false);
860 	}
861 
862 	return status;
863 }
864 
865 /**
866  * irdma_obj_aligned_mem - get aligned memory from device allocated memory
867  * @rf: RDMA PCI function
868  * @memptr: points to the memory addresses
869  * @size: size of memory needed
870  * @mask: mask for the aligned memory
871  *
872  * Get aligned memory of the requested size and
873  * update the memptr to point to the new aligned memory
874  * Return 0 if successful, otherwise return no memory error
875  */
876 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
877 				 struct irdma_dma_mem *memptr, u32 size,
878 				 u32 mask)
879 {
880 	unsigned long va, newva;
881 	unsigned long extra;
882 
883 	va = (unsigned long)rf->obj_next.va;
884 	newva = va;
885 	if (mask)
886 		newva = ALIGN(va, (unsigned long)mask + 1ULL);
887 	extra = newva - va;
888 	memptr->va = (u8 *)va + extra;
889 	memptr->pa = rf->obj_next.pa + extra;
890 	memptr->size = size;
891 	if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
892 		return -ENOMEM;
893 
894 	rf->obj_next.va = (u8 *)memptr->va + size;
895 	rf->obj_next.pa = memptr->pa + size;
896 
897 	return 0;
898 }
899 
900 /**
901  * irdma_create_cqp - create control qp
902  * @rf: RDMA PCI function
903  *
904  * Return 0, if the cqp and all the resources associated with it
905  * are successfully created, otherwise return error
906  */
907 static int irdma_create_cqp(struct irdma_pci_f *rf)
908 {
909 	u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
910 	struct irdma_dma_mem mem;
911 	struct irdma_sc_dev *dev = &rf->sc_dev;
912 	struct irdma_cqp_init_info cqp_init_info = {};
913 	struct irdma_cqp *cqp = &rf->cqp;
914 	u16 maj_err, min_err;
915 	int i, status;
916 
917 	cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
918 	if (!cqp->cqp_requests)
919 		return -ENOMEM;
920 
921 	cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
922 	if (!cqp->scratch_array) {
923 		kfree(cqp->cqp_requests);
924 		return -ENOMEM;
925 	}
926 
927 	dev->cqp = &cqp->sc_cqp;
928 	dev->cqp->dev = dev;
929 	cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
930 			     IRDMA_CQP_ALIGNMENT);
931 	cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
932 					&cqp->sq.pa, GFP_KERNEL);
933 	if (!cqp->sq.va) {
934 		kfree(cqp->scratch_array);
935 		kfree(cqp->cqp_requests);
936 		return -ENOMEM;
937 	}
938 
939 	status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
940 				       IRDMA_HOST_CTX_ALIGNMENT_M);
941 	if (status)
942 		goto exit;
943 
944 	dev->cqp->host_ctx_pa = mem.pa;
945 	dev->cqp->host_ctx = mem.va;
946 	/* populate the cqp init info */
947 	cqp_init_info.dev = dev;
948 	cqp_init_info.sq_size = sqsize;
949 	cqp_init_info.sq = cqp->sq.va;
950 	cqp_init_info.sq_pa = cqp->sq.pa;
951 	cqp_init_info.host_ctx_pa = mem.pa;
952 	cqp_init_info.host_ctx = mem.va;
953 	cqp_init_info.hmc_profile = rf->rsrc_profile;
954 	cqp_init_info.scratch_array = cqp->scratch_array;
955 	cqp_init_info.protocol_used = rf->protocol_used;
956 
957 	switch (rf->rdma_ver) {
958 	case IRDMA_GEN_1:
959 		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
960 		break;
961 	case IRDMA_GEN_2:
962 		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
963 		break;
964 	}
965 	status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
966 	if (status) {
967 		ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
968 		goto exit;
969 	}
970 
971 	spin_lock_init(&cqp->req_lock);
972 	spin_lock_init(&cqp->compl_lock);
973 
974 	status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
975 	if (status) {
976 		ibdev_dbg(to_ibdev(dev),
977 			  "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
978 			  status, maj_err, min_err);
979 		goto exit;
980 	}
981 
982 	INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
983 	INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
984 
985 	/* init the waitqueue of the cqp_requests and add them to the list */
986 	for (i = 0; i < sqsize; i++) {
987 		init_waitqueue_head(&cqp->cqp_requests[i].waitq);
988 		list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
989 	}
990 	init_waitqueue_head(&cqp->remove_wq);
991 	return 0;
992 
993 exit:
994 	irdma_destroy_cqp(rf, false);
995 
996 	return status;
997 }
998 
999 /**
1000  * irdma_create_ccq - create control cq
1001  * @rf: RDMA PCI function
1002  *
1003  * Return 0, if the ccq and the resources associated with it
1004  * are successfully created, otherwise return error
1005  */
1006 static int irdma_create_ccq(struct irdma_pci_f *rf)
1007 {
1008 	struct irdma_sc_dev *dev = &rf->sc_dev;
1009 	struct irdma_ccq_init_info info = {};
1010 	struct irdma_ccq *ccq = &rf->ccq;
1011 	int status;
1012 
1013 	dev->ccq = &ccq->sc_cq;
1014 	dev->ccq->dev = dev;
1015 	info.dev = dev;
1016 	ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1017 	ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
1018 				 IRDMA_CQ0_ALIGNMENT);
1019 	ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
1020 					    &ccq->mem_cq.pa, GFP_KERNEL);
1021 	if (!ccq->mem_cq.va)
1022 		return -ENOMEM;
1023 
1024 	status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1025 				       ccq->shadow_area.size,
1026 				       IRDMA_SHADOWAREA_M);
1027 	if (status)
1028 		goto exit;
1029 
1030 	ccq->sc_cq.back_cq = ccq;
1031 	/* populate the ccq init info */
1032 	info.cq_base = ccq->mem_cq.va;
1033 	info.cq_pa = ccq->mem_cq.pa;
1034 	info.num_elem = IW_CCQ_SIZE;
1035 	info.shadow_area = ccq->shadow_area.va;
1036 	info.shadow_area_pa = ccq->shadow_area.pa;
1037 	info.ceqe_mask = false;
1038 	info.ceq_id_valid = true;
1039 	info.shadow_read_threshold = 16;
1040 	info.vsi = &rf->default_vsi;
1041 	status = irdma_sc_ccq_init(dev->ccq, &info);
1042 	if (!status)
1043 		status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1044 exit:
1045 	if (status) {
1046 		dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
1047 				  ccq->mem_cq.va, ccq->mem_cq.pa);
1048 		ccq->mem_cq.va = NULL;
1049 	}
1050 
1051 	return status;
1052 }
1053 
1054 /**
1055  * irdma_alloc_set_mac - set up a mac address table entry
1056  * @iwdev: irdma device
1057  *
1058  * Allocate a mac ip entry and add it to the hw table Return 0
1059  * if successful, otherwise return error
1060  */
1061 static int irdma_alloc_set_mac(struct irdma_device *iwdev)
1062 {
1063 	int status;
1064 
1065 	status = irdma_alloc_local_mac_entry(iwdev->rf,
1066 					     &iwdev->mac_ip_table_idx);
1067 	if (!status) {
1068 		status = irdma_add_local_mac_entry(iwdev->rf,
1069 						   (const u8 *)iwdev->netdev->dev_addr,
1070 						   (u8)iwdev->mac_ip_table_idx);
1071 		if (status)
1072 			irdma_del_local_mac_entry(iwdev->rf,
1073 						  (u8)iwdev->mac_ip_table_idx);
1074 	}
1075 	return status;
1076 }
1077 
1078 /**
1079  * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1080  * ceq
1081  * @rf: RDMA PCI function
1082  * @iwceq: ceq associated with the vector
1083  * @ceq_id: the id number of the iwceq
1084  * @msix_vec: interrupt vector information
1085  *
1086  * Allocate interrupt resources and enable irq handling
1087  * Return 0 if successful, otherwise return error
1088  */
1089 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1090 				u32 ceq_id, struct irdma_msix_vector *msix_vec)
1091 {
1092 	int status;
1093 
1094 	if (rf->msix_shared && !ceq_id) {
1095 		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1096 		status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1097 				     "AEQCEQ", rf);
1098 	} else {
1099 		tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
1100 
1101 		status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
1102 				     "CEQ", iwceq);
1103 	}
1104 	cpumask_clear(&msix_vec->mask);
1105 	cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
1106 	irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
1107 	if (status) {
1108 		ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1109 		return status;
1110 	}
1111 
1112 	msix_vec->ceq_id = ceq_id;
1113 	rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
1114 
1115 	return 0;
1116 }
1117 
1118 /**
1119  * irdma_cfg_aeq_vector - set up the msix vector for aeq
1120  * @rf: RDMA PCI function
1121  *
1122  * Allocate interrupt resources and enable irq handling
1123  * Return 0 if successful, otherwise return error
1124  */
1125 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1126 {
1127 	struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1128 	u32 ret = 0;
1129 
1130 	if (!rf->msix_shared) {
1131 		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1132 		ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1133 				  "irdma", rf);
1134 	}
1135 	if (ret) {
1136 		ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1137 		return -EINVAL;
1138 	}
1139 
1140 	rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
1141 
1142 	return 0;
1143 }
1144 
1145 /**
1146  * irdma_create_ceq - create completion event queue
1147  * @rf: RDMA PCI function
1148  * @iwceq: pointer to the ceq resources to be created
1149  * @ceq_id: the id number of the iwceq
1150  * @vsi: SC vsi struct
1151  *
1152  * Return 0, if the ceq and the resources associated with it
1153  * are successfully created, otherwise return error
1154  */
1155 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1156 			    u32 ceq_id, struct irdma_sc_vsi *vsi)
1157 {
1158 	int status;
1159 	struct irdma_ceq_init_info info = {};
1160 	struct irdma_sc_dev *dev = &rf->sc_dev;
1161 	u64 scratch;
1162 	u32 ceq_size;
1163 
1164 	info.ceq_id = ceq_id;
1165 	iwceq->rf = rf;
1166 	ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1167 		       dev->hw_attrs.max_hw_ceq_size);
1168 	iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
1169 				IRDMA_CEQ_ALIGNMENT);
1170 	iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
1171 					   &iwceq->mem.pa, GFP_KERNEL);
1172 	if (!iwceq->mem.va)
1173 		return -ENOMEM;
1174 
1175 	info.ceq_id = ceq_id;
1176 	info.ceqe_base = iwceq->mem.va;
1177 	info.ceqe_pa = iwceq->mem.pa;
1178 	info.elem_cnt = ceq_size;
1179 	iwceq->sc_ceq.ceq_id = ceq_id;
1180 	info.dev = dev;
1181 	info.vsi = vsi;
1182 	scratch = (uintptr_t)&rf->cqp.sc_cqp;
1183 	status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1184 	if (!status) {
1185 		if (dev->ceq_valid)
1186 			status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1187 						   IRDMA_OP_CEQ_CREATE);
1188 		else
1189 			status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
1190 	}
1191 
1192 	if (status) {
1193 		dma_free_coherent(dev->hw->device, iwceq->mem.size,
1194 				  iwceq->mem.va, iwceq->mem.pa);
1195 		iwceq->mem.va = NULL;
1196 	}
1197 
1198 	return status;
1199 }
1200 
1201 /**
1202  * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1203  * @rf: RDMA PCI function
1204  *
1205  * Allocate a list for all device completion event queues
1206  * Create the ceq 0 and configure it's msix interrupt vector
1207  * Return 0, if successfully set up, otherwise return error
1208  */
1209 static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
1210 {
1211 	struct irdma_ceq *iwceq;
1212 	struct irdma_msix_vector *msix_vec;
1213 	u32 i;
1214 	int status = 0;
1215 	u32 num_ceqs;
1216 
1217 	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1218 	rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1219 	if (!rf->ceqlist) {
1220 		status = -ENOMEM;
1221 		goto exit;
1222 	}
1223 
1224 	iwceq = &rf->ceqlist[0];
1225 	status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
1226 	if (status) {
1227 		ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1228 			  status);
1229 		goto exit;
1230 	}
1231 
1232 	spin_lock_init(&iwceq->ce_lock);
1233 	i = rf->msix_shared ? 0 : 1;
1234 	msix_vec = &rf->iw_msixtbl[i];
1235 	iwceq->irq = msix_vec->irq;
1236 	iwceq->msix_idx = msix_vec->idx;
1237 	status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1238 	if (status) {
1239 		irdma_destroy_ceq(rf, iwceq);
1240 		goto exit;
1241 	}
1242 
1243 	irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1244 	rf->ceqs_count++;
1245 
1246 exit:
1247 	if (status && !rf->ceqs_count) {
1248 		kfree(rf->ceqlist);
1249 		rf->ceqlist = NULL;
1250 		return status;
1251 	}
1252 	rf->sc_dev.ceq_valid = true;
1253 
1254 	return 0;
1255 }
1256 
1257 /**
1258  * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1259  * @rf: RDMA PCI function
1260  * @vsi: VSI structure for this CEQ
1261  *
1262  * Allocate a list for all device completion event queues
1263  * Create the ceq's and configure their msix interrupt vectors
1264  * Return 0, if ceqs are successfully set up, otherwise return error
1265  */
1266 static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
1267 {
1268 	u32 i;
1269 	u32 ceq_id;
1270 	struct irdma_ceq *iwceq;
1271 	struct irdma_msix_vector *msix_vec;
1272 	int status;
1273 	u32 num_ceqs;
1274 
1275 	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1276 	i = (rf->msix_shared) ? 1 : 2;
1277 	for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1278 		iwceq = &rf->ceqlist[ceq_id];
1279 		status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
1280 		if (status) {
1281 			ibdev_dbg(&rf->iwdev->ibdev,
1282 				  "ERR: create ceq status = %d\n", status);
1283 			goto del_ceqs;
1284 		}
1285 		spin_lock_init(&iwceq->ce_lock);
1286 		msix_vec = &rf->iw_msixtbl[i];
1287 		iwceq->irq = msix_vec->irq;
1288 		iwceq->msix_idx = msix_vec->idx;
1289 		status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1290 		if (status) {
1291 			irdma_destroy_ceq(rf, iwceq);
1292 			goto del_ceqs;
1293 		}
1294 		irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1295 		rf->ceqs_count++;
1296 	}
1297 
1298 	return 0;
1299 
1300 del_ceqs:
1301 	irdma_del_ceqs(rf);
1302 
1303 	return status;
1304 }
1305 
1306 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1307 {
1308 	struct irdma_aeq *aeq = &rf->aeq;
1309 	dma_addr_t *pg_arr;
1310 	u32 pg_cnt;
1311 	int status;
1312 
1313 	if (rf->rdma_ver < IRDMA_GEN_2)
1314 		return -EOPNOTSUPP;
1315 
1316 	aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1317 	aeq->mem.va = vzalloc(aeq->mem.size);
1318 
1319 	if (!aeq->mem.va)
1320 		return -ENOMEM;
1321 
1322 	pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1323 	status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1324 	if (status) {
1325 		vfree(aeq->mem.va);
1326 		return status;
1327 	}
1328 
1329 	pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
1330 	status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1331 	if (status) {
1332 		irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1333 		vfree(aeq->mem.va);
1334 		return status;
1335 	}
1336 
1337 	return 0;
1338 }
1339 
1340 /**
1341  * irdma_create_aeq - create async event queue
1342  * @rf: RDMA PCI function
1343  *
1344  * Return 0, if the aeq and the resources associated with it
1345  * are successfully created, otherwise return error
1346  */
1347 static int irdma_create_aeq(struct irdma_pci_f *rf)
1348 {
1349 	struct irdma_aeq_init_info info = {};
1350 	struct irdma_sc_dev *dev = &rf->sc_dev;
1351 	struct irdma_aeq *aeq = &rf->aeq;
1352 	struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1353 	u32 aeq_size;
1354 	u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1355 	int status;
1356 
1357 	aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1358 		   hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1359 	aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1360 
1361 	aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
1362 			      IRDMA_AEQ_ALIGNMENT);
1363 	aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
1364 					 &aeq->mem.pa,
1365 					 GFP_KERNEL | __GFP_NOWARN);
1366 	if (aeq->mem.va)
1367 		goto skip_virt_aeq;
1368 
1369 	/* physically mapped aeq failed. setup virtual aeq */
1370 	status = irdma_create_virt_aeq(rf, aeq_size);
1371 	if (status)
1372 		return status;
1373 
1374 	info.virtual_map = true;
1375 	aeq->virtual_map = info.virtual_map;
1376 	info.pbl_chunk_size = 1;
1377 	info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1378 
1379 skip_virt_aeq:
1380 	info.aeqe_base = aeq->mem.va;
1381 	info.aeq_elem_pa = aeq->mem.pa;
1382 	info.elem_cnt = aeq_size;
1383 	info.dev = dev;
1384 	info.msix_idx = rf->iw_msixtbl->idx;
1385 	status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1386 	if (status)
1387 		goto err;
1388 
1389 	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1390 	if (status)
1391 		goto err;
1392 
1393 	return 0;
1394 
1395 err:
1396 	if (aeq->virtual_map) {
1397 		irdma_destroy_virt_aeq(rf);
1398 	} else {
1399 		dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
1400 				  aeq->mem.pa);
1401 		aeq->mem.va = NULL;
1402 	}
1403 
1404 	return status;
1405 }
1406 
1407 /**
1408  * irdma_setup_aeq - set up the device aeq
1409  * @rf: RDMA PCI function
1410  *
1411  * Create the aeq and configure its msix interrupt vector
1412  * Return 0 if successful, otherwise return error
1413  */
1414 static int irdma_setup_aeq(struct irdma_pci_f *rf)
1415 {
1416 	struct irdma_sc_dev *dev = &rf->sc_dev;
1417 	int status;
1418 
1419 	status = irdma_create_aeq(rf);
1420 	if (status)
1421 		return status;
1422 
1423 	status = irdma_cfg_aeq_vector(rf);
1424 	if (status) {
1425 		irdma_destroy_aeq(rf);
1426 		return status;
1427 	}
1428 
1429 	if (!rf->msix_shared)
1430 		irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1431 
1432 	return 0;
1433 }
1434 
1435 /**
1436  * irdma_initialize_ilq - create iwarp local queue for cm
1437  * @iwdev: irdma device
1438  *
1439  * Return 0 if successful, otherwise return error
1440  */
1441 static int irdma_initialize_ilq(struct irdma_device *iwdev)
1442 {
1443 	struct irdma_puda_rsrc_info info = {};
1444 	int status;
1445 
1446 	info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1447 	info.cq_id = 1;
1448 	info.qp_id = 1;
1449 	info.count = 1;
1450 	info.pd_id = 1;
1451 	info.abi_ver = IRDMA_ABI_VER;
1452 	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1453 	info.rq_size = info.sq_size;
1454 	info.buf_size = 1024;
1455 	info.tx_buf_cnt = 2 * info.sq_size;
1456 	info.receive = irdma_receive_ilq;
1457 	info.xmit_complete = irdma_free_sqbuf;
1458 	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1459 	if (status)
1460 		ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  * irdma_initialize_ieq - create iwarp exception queue
1467  * @iwdev: irdma device
1468  *
1469  * Return 0 if successful, otherwise return error
1470  */
1471 static int irdma_initialize_ieq(struct irdma_device *iwdev)
1472 {
1473 	struct irdma_puda_rsrc_info info = {};
1474 	int status;
1475 
1476 	info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1477 	info.cq_id = 2;
1478 	info.qp_id = iwdev->vsi.exception_lan_q;
1479 	info.count = 1;
1480 	info.pd_id = 2;
1481 	info.abi_ver = IRDMA_ABI_VER;
1482 	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1483 	info.rq_size = info.sq_size;
1484 	info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1485 	info.tx_buf_cnt = 4096;
1486 	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1487 	if (status)
1488 		ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
1489 
1490 	return status;
1491 }
1492 
1493 /**
1494  * irdma_reinitialize_ieq - destroy and re-create ieq
1495  * @vsi: VSI structure
1496  */
1497 void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1498 {
1499 	struct irdma_device *iwdev = vsi->back_vsi;
1500 	struct irdma_pci_f *rf = iwdev->rf;
1501 
1502 	irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1503 	if (irdma_initialize_ieq(iwdev)) {
1504 		iwdev->rf->reset = true;
1505 		rf->gen_ops.request_reset(rf);
1506 	}
1507 }
1508 
1509 /**
1510  * irdma_hmc_setup - create hmc objects for the device
1511  * @rf: RDMA PCI function
1512  *
1513  * Set up the device private memory space for the number and size of
1514  * the hmc objects and create the objects
1515  * Return 0 if successful, otherwise return error
1516  */
1517 static int irdma_hmc_setup(struct irdma_pci_f *rf)
1518 {
1519 	int status;
1520 	u32 qpcnt;
1521 
1522 	qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1523 
1524 	rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1525 	status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
1526 	if (status)
1527 		return status;
1528 
1529 	status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1530 
1531 	return status;
1532 }
1533 
1534 /**
1535  * irdma_del_init_mem - deallocate memory resources
1536  * @rf: RDMA PCI function
1537  */
1538 static void irdma_del_init_mem(struct irdma_pci_f *rf)
1539 {
1540 	struct irdma_sc_dev *dev = &rf->sc_dev;
1541 
1542 	kfree(dev->hmc_info->sd_table.sd_entry);
1543 	dev->hmc_info->sd_table.sd_entry = NULL;
1544 	kfree(rf->mem_rsrc);
1545 	rf->mem_rsrc = NULL;
1546 	dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1547 			  rf->obj_mem.pa);
1548 	rf->obj_mem.va = NULL;
1549 	if (rf->rdma_ver != IRDMA_GEN_1) {
1550 		bitmap_free(rf->allocated_ws_nodes);
1551 		rf->allocated_ws_nodes = NULL;
1552 	}
1553 	kfree(rf->ceqlist);
1554 	rf->ceqlist = NULL;
1555 	kfree(rf->iw_msixtbl);
1556 	rf->iw_msixtbl = NULL;
1557 	kfree(rf->hmc_info_mem);
1558 	rf->hmc_info_mem = NULL;
1559 }
1560 
1561 /**
1562  * irdma_initialize_dev - initialize device
1563  * @rf: RDMA PCI function
1564  *
1565  * Allocate memory for the hmc objects and initialize iwdev
1566  * Return 0 if successful, otherwise clean up the resources
1567  * and return error
1568  */
1569 static int irdma_initialize_dev(struct irdma_pci_f *rf)
1570 {
1571 	int status;
1572 	struct irdma_sc_dev *dev = &rf->sc_dev;
1573 	struct irdma_device_init_info info = {};
1574 	struct irdma_dma_mem mem;
1575 	u32 size;
1576 
1577 	size = sizeof(struct irdma_hmc_pble_rsrc) +
1578 	       sizeof(struct irdma_hmc_info) +
1579 	       (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1580 
1581 	rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1582 	if (!rf->hmc_info_mem)
1583 		return -ENOMEM;
1584 
1585 	rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1586 	dev->hmc_info = &rf->hw.hmc;
1587 	dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1588 				 (rf->pble_rsrc + 1);
1589 
1590 	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1591 				       IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1592 	if (status)
1593 		goto error;
1594 
1595 	info.fpm_query_buf_pa = mem.pa;
1596 	info.fpm_query_buf = mem.va;
1597 
1598 	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1599 				       IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1600 	if (status)
1601 		goto error;
1602 
1603 	info.fpm_commit_buf_pa = mem.pa;
1604 	info.fpm_commit_buf = mem.va;
1605 
1606 	info.bar0 = rf->hw.hw_addr;
1607 	info.hmc_fn_id = rf->pf_id;
1608 	info.hw = &rf->hw;
1609 	status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
1610 	if (status)
1611 		goto error;
1612 
1613 	return status;
1614 error:
1615 	kfree(rf->hmc_info_mem);
1616 	rf->hmc_info_mem = NULL;
1617 
1618 	return status;
1619 }
1620 
1621 /**
1622  * irdma_rt_deinit_hw - clean up the irdma device resources
1623  * @iwdev: irdma device
1624  *
1625  * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1626  * device queues and free the pble and the hmc objects
1627  */
1628 void irdma_rt_deinit_hw(struct irdma_device *iwdev)
1629 {
1630 	ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
1631 
1632 	switch (iwdev->init_state) {
1633 	case IP_ADDR_REGISTERED:
1634 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1635 			irdma_del_local_mac_entry(iwdev->rf,
1636 						  (u8)iwdev->mac_ip_table_idx);
1637 		fallthrough;
1638 	case AEQ_CREATED:
1639 	case PBLE_CHUNK_MEM:
1640 	case CEQS_CREATED:
1641 	case IEQ_CREATED:
1642 		if (!iwdev->roce_mode)
1643 			irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1644 					     iwdev->rf->reset);
1645 		fallthrough;
1646 	case ILQ_CREATED:
1647 		if (!iwdev->roce_mode)
1648 			irdma_puda_dele_rsrc(&iwdev->vsi,
1649 					     IRDMA_PUDA_RSRC_TYPE_ILQ,
1650 					     iwdev->rf->reset);
1651 		break;
1652 	default:
1653 		ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1654 		break;
1655 	}
1656 
1657 	irdma_cleanup_cm_core(&iwdev->cm_core);
1658 	if (iwdev->vsi.pestat) {
1659 		irdma_vsi_stats_free(&iwdev->vsi);
1660 		kfree(iwdev->vsi.pestat);
1661 	}
1662 	if (iwdev->cleanup_wq)
1663 		destroy_workqueue(iwdev->cleanup_wq);
1664 }
1665 
1666 static int irdma_setup_init_state(struct irdma_pci_f *rf)
1667 {
1668 	int status;
1669 
1670 	status = irdma_save_msix_info(rf);
1671 	if (status)
1672 		return status;
1673 
1674 	rf->hw.device = &rf->pcidev->dev;
1675 	rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
1676 	rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
1677 					    &rf->obj_mem.pa, GFP_KERNEL);
1678 	if (!rf->obj_mem.va) {
1679 		status = -ENOMEM;
1680 		goto clean_msixtbl;
1681 	}
1682 
1683 	rf->obj_next = rf->obj_mem;
1684 	status = irdma_initialize_dev(rf);
1685 	if (status)
1686 		goto clean_obj_mem;
1687 
1688 	return 0;
1689 
1690 clean_obj_mem:
1691 	dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1692 			  rf->obj_mem.pa);
1693 	rf->obj_mem.va = NULL;
1694 clean_msixtbl:
1695 	kfree(rf->iw_msixtbl);
1696 	rf->iw_msixtbl = NULL;
1697 	return status;
1698 }
1699 
1700 /**
1701  * irdma_get_used_rsrc - determine resources used internally
1702  * @iwdev: irdma device
1703  *
1704  * Called at the end of open to get all internal allocations
1705  */
1706 static void irdma_get_used_rsrc(struct irdma_device *iwdev)
1707 {
1708 	iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1709 						 iwdev->rf->max_pd);
1710 	iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1711 						 iwdev->rf->max_qp);
1712 	iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1713 						 iwdev->rf->max_cq);
1714 	iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1715 						 iwdev->rf->max_mr);
1716 }
1717 
1718 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1719 {
1720 	enum init_completion_state state = rf->init_state;
1721 
1722 	rf->init_state = INVALID_STATE;
1723 	if (rf->rsrc_created) {
1724 		irdma_destroy_aeq(rf);
1725 		irdma_destroy_pble_prm(rf->pble_rsrc);
1726 		irdma_del_ceqs(rf);
1727 		rf->rsrc_created = false;
1728 	}
1729 	switch (state) {
1730 	case CEQ0_CREATED:
1731 		irdma_del_ceq_0(rf);
1732 		fallthrough;
1733 	case CCQ_CREATED:
1734 		irdma_destroy_ccq(rf);
1735 		fallthrough;
1736 	case HW_RSRC_INITIALIZED:
1737 	case HMC_OBJS_CREATED:
1738 		irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1739 				      rf->reset, rf->rdma_ver);
1740 		fallthrough;
1741 	case CQP_CREATED:
1742 		irdma_destroy_cqp(rf, true);
1743 		fallthrough;
1744 	case INITIAL_STATE:
1745 		irdma_del_init_mem(rf);
1746 		break;
1747 	case INVALID_STATE:
1748 	default:
1749 		ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1750 		break;
1751 	}
1752 }
1753 
1754 /**
1755  * irdma_rt_init_hw - Initializes runtime portion of HW
1756  * @iwdev: irdma device
1757  * @l2params: qos, tc, mtu info from netdev driver
1758  *
1759  * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1760  * device resource objects.
1761  */
1762 int irdma_rt_init_hw(struct irdma_device *iwdev,
1763 		     struct irdma_l2params *l2params)
1764 {
1765 	struct irdma_pci_f *rf = iwdev->rf;
1766 	struct irdma_sc_dev *dev = &rf->sc_dev;
1767 	struct irdma_vsi_init_info vsi_info = {};
1768 	struct irdma_vsi_stats_info stats_info = {};
1769 	int status;
1770 
1771 	vsi_info.dev = dev;
1772 	vsi_info.back_vsi = iwdev;
1773 	vsi_info.params = l2params;
1774 	vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1775 	vsi_info.register_qset = rf->gen_ops.register_qset;
1776 	vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1777 	vsi_info.exception_lan_q = 2;
1778 	irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1779 
1780 	status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1781 	if (status)
1782 		return status;
1783 
1784 	stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1785 	if (!stats_info.pestat) {
1786 		irdma_cleanup_cm_core(&iwdev->cm_core);
1787 		return -ENOMEM;
1788 	}
1789 	stats_info.fcn_id = dev->hmc_fn_id;
1790 	status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1791 	if (status) {
1792 		irdma_cleanup_cm_core(&iwdev->cm_core);
1793 		kfree(stats_info.pestat);
1794 		return status;
1795 	}
1796 
1797 	do {
1798 		if (!iwdev->roce_mode) {
1799 			status = irdma_initialize_ilq(iwdev);
1800 			if (status)
1801 				break;
1802 			iwdev->init_state = ILQ_CREATED;
1803 			status = irdma_initialize_ieq(iwdev);
1804 			if (status)
1805 				break;
1806 			iwdev->init_state = IEQ_CREATED;
1807 		}
1808 		if (!rf->rsrc_created) {
1809 			status = irdma_setup_ceqs(rf, &iwdev->vsi);
1810 			if (status)
1811 				break;
1812 
1813 			iwdev->init_state = CEQS_CREATED;
1814 
1815 			status = irdma_hmc_init_pble(&rf->sc_dev,
1816 						     rf->pble_rsrc);
1817 			if (status) {
1818 				irdma_del_ceqs(rf);
1819 				break;
1820 			}
1821 
1822 			iwdev->init_state = PBLE_CHUNK_MEM;
1823 
1824 			status = irdma_setup_aeq(rf);
1825 			if (status) {
1826 				irdma_destroy_pble_prm(rf->pble_rsrc);
1827 				irdma_del_ceqs(rf);
1828 				break;
1829 			}
1830 			iwdev->init_state = AEQ_CREATED;
1831 			rf->rsrc_created = true;
1832 		}
1833 
1834 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1835 			irdma_alloc_set_mac(iwdev);
1836 		irdma_add_ip(iwdev);
1837 		iwdev->init_state = IP_ADDR_REGISTERED;
1838 
1839 		/* handles asynch cleanup tasks - disconnect CM , free qp,
1840 		 * free cq bufs
1841 		 */
1842 		iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1843 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1844 		if (!iwdev->cleanup_wq)
1845 			return -ENOMEM;
1846 		irdma_get_used_rsrc(iwdev);
1847 		init_waitqueue_head(&iwdev->suspend_wq);
1848 
1849 		return 0;
1850 	} while (0);
1851 
1852 	dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
1853 		status, iwdev->init_state);
1854 	irdma_rt_deinit_hw(iwdev);
1855 
1856 	return status;
1857 }
1858 
1859 /**
1860  * irdma_ctrl_init_hw - Initializes control portion of HW
1861  * @rf: RDMA PCI function
1862  *
1863  * Create admin queues, HMC obejcts and RF resource objects
1864  */
1865 int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
1866 {
1867 	struct irdma_sc_dev *dev = &rf->sc_dev;
1868 	int status;
1869 	do {
1870 		status = irdma_setup_init_state(rf);
1871 		if (status)
1872 			break;
1873 		rf->init_state = INITIAL_STATE;
1874 
1875 		status = irdma_create_cqp(rf);
1876 		if (status)
1877 			break;
1878 		rf->init_state = CQP_CREATED;
1879 
1880 		status = irdma_hmc_setup(rf);
1881 		if (status)
1882 			break;
1883 		rf->init_state = HMC_OBJS_CREATED;
1884 
1885 		status = irdma_initialize_hw_rsrc(rf);
1886 		if (status)
1887 			break;
1888 		rf->init_state = HW_RSRC_INITIALIZED;
1889 
1890 		status = irdma_create_ccq(rf);
1891 		if (status)
1892 			break;
1893 		rf->init_state = CCQ_CREATED;
1894 
1895 		dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
1896 		if (rf->rdma_ver != IRDMA_GEN_1) {
1897 			status = irdma_get_rdma_features(dev);
1898 			if (status)
1899 				break;
1900 		}
1901 
1902 		status = irdma_setup_ceq_0(rf);
1903 		if (status)
1904 			break;
1905 		rf->init_state = CEQ0_CREATED;
1906 		/* Handles processing of CQP completions */
1907 		rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
1908 						WQ_HIGHPRI | WQ_UNBOUND);
1909 		if (!rf->cqp_cmpl_wq) {
1910 			status = -ENOMEM;
1911 			break;
1912 		}
1913 		INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
1914 		irdma_sc_ccq_arm(dev->ccq);
1915 		return 0;
1916 	} while (0);
1917 
1918 	dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
1919 		rf->init_state, status);
1920 	irdma_ctrl_deinit_hw(rf);
1921 	return status;
1922 }
1923 
1924 /**
1925  * irdma_set_hw_rsrc - set hw memory resources.
1926  * @rf: RDMA PCI function
1927  */
1928 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
1929 {
1930 	rf->allocated_qps = (void *)(rf->mem_rsrc +
1931 		   (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
1932 	rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
1933 	rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
1934 	rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
1935 	rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
1936 	rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
1937 	rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
1938 	rf->qp_table = (struct irdma_qp **)
1939 		(&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
1940 
1941 	spin_lock_init(&rf->rsrc_lock);
1942 	spin_lock_init(&rf->arp_lock);
1943 	spin_lock_init(&rf->qptable_lock);
1944 	spin_lock_init(&rf->qh_list_lock);
1945 }
1946 
1947 /**
1948  * irdma_calc_mem_rsrc_size - calculate memory resources size.
1949  * @rf: RDMA PCI function
1950  */
1951 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
1952 {
1953 	u32 rsrc_size;
1954 
1955 	rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
1956 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
1957 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
1958 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
1959 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
1960 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
1961 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
1962 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
1963 	rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
1964 
1965 	return rsrc_size;
1966 }
1967 
1968 /**
1969  * irdma_initialize_hw_rsrc - initialize hw resource tracking array
1970  * @rf: RDMA PCI function
1971  */
1972 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
1973 {
1974 	u32 rsrc_size;
1975 	u32 mrdrvbits;
1976 	u32 ret;
1977 
1978 	if (rf->rdma_ver != IRDMA_GEN_1) {
1979 		rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
1980 						       GFP_KERNEL);
1981 		if (!rf->allocated_ws_nodes)
1982 			return -ENOMEM;
1983 
1984 		set_bit(0, rf->allocated_ws_nodes);
1985 		rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
1986 	}
1987 	rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
1988 	rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
1989 	rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
1990 	rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1991 	rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
1992 	rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
1993 	rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
1994 	rf->max_mcg = rf->max_qp;
1995 
1996 	rsrc_size = irdma_calc_mem_rsrc_size(rf);
1997 	rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);
1998 	if (!rf->mem_rsrc) {
1999 		ret = -ENOMEM;
2000 		goto mem_rsrc_kzalloc_fail;
2001 	}
2002 
2003 	rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2004 
2005 	irdma_set_hw_rsrc(rf);
2006 
2007 	set_bit(0, rf->allocated_mrs);
2008 	set_bit(0, rf->allocated_qps);
2009 	set_bit(0, rf->allocated_cqs);
2010 	set_bit(0, rf->allocated_pds);
2011 	set_bit(0, rf->allocated_arps);
2012 	set_bit(0, rf->allocated_ahs);
2013 	set_bit(0, rf->allocated_mcgs);
2014 	set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
2015 	set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
2016 	set_bit(1, rf->allocated_cqs);
2017 	set_bit(1, rf->allocated_pds);
2018 	set_bit(2, rf->allocated_cqs);
2019 	set_bit(2, rf->allocated_pds);
2020 
2021 	INIT_LIST_HEAD(&rf->mc_qht_list.list);
2022 	/* stag index mask has a minimum of 14 bits */
2023 	mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2024 	rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2025 
2026 	return 0;
2027 
2028 mem_rsrc_kzalloc_fail:
2029 	bitmap_free(rf->allocated_ws_nodes);
2030 	rf->allocated_ws_nodes = NULL;
2031 
2032 	return ret;
2033 }
2034 
2035 /**
2036  * irdma_cqp_ce_handler - handle cqp completions
2037  * @rf: RDMA PCI function
2038  * @cq: cq for cqp completions
2039  */
2040 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2041 {
2042 	struct irdma_cqp_request *cqp_request;
2043 	struct irdma_sc_dev *dev = &rf->sc_dev;
2044 	u32 cqe_count = 0;
2045 	struct irdma_ccq_cqe_info info;
2046 	unsigned long flags;
2047 	int ret;
2048 
2049 	do {
2050 		memset(&info, 0, sizeof(info));
2051 		spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2052 		ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2053 		spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2054 		if (ret)
2055 			break;
2056 
2057 		cqp_request = (struct irdma_cqp_request *)
2058 			      (unsigned long)info.scratch;
2059 		if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
2060 						     info.maj_err_code,
2061 						     info.min_err_code))
2062 			ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2063 				  info.op_code, info.maj_err_code, info.min_err_code);
2064 		if (cqp_request) {
2065 			cqp_request->compl_info.maj_err_code = info.maj_err_code;
2066 			cqp_request->compl_info.min_err_code = info.min_err_code;
2067 			cqp_request->compl_info.op_ret_val = info.op_ret_val;
2068 			cqp_request->compl_info.error = info.error;
2069 
2070 			if (cqp_request->waiting) {
2071 				cqp_request->request_done = true;
2072 				wake_up(&cqp_request->waitq);
2073 				irdma_put_cqp_request(&rf->cqp, cqp_request);
2074 			} else {
2075 				if (cqp_request->callback_fcn)
2076 					cqp_request->callback_fcn(cqp_request);
2077 				irdma_put_cqp_request(&rf->cqp, cqp_request);
2078 			}
2079 		}
2080 
2081 		cqe_count++;
2082 	} while (1);
2083 
2084 	if (cqe_count) {
2085 		irdma_process_bh(dev);
2086 		irdma_sc_ccq_arm(cq);
2087 	}
2088 }
2089 
2090 /**
2091  * cqp_compl_worker - Handle cqp completions
2092  * @work: Pointer to work structure
2093  */
2094 void cqp_compl_worker(struct work_struct *work)
2095 {
2096 	struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2097 					      cqp_cmpl_work);
2098 	struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2099 
2100 	irdma_cqp_ce_handler(rf, cq);
2101 }
2102 
2103 /**
2104  * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2105  * @cm_core: cm's core
2106  * @port: port to identify apbvt entry
2107  */
2108 static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2109 							  u16 port)
2110 {
2111 	struct irdma_apbvt_entry *entry;
2112 
2113 	hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2114 		if (entry->port == port) {
2115 			entry->use_cnt++;
2116 			return entry;
2117 		}
2118 	}
2119 
2120 	return NULL;
2121 }
2122 
2123 /**
2124  * irdma_next_iw_state - modify qp state
2125  * @iwqp: iwarp qp to modify
2126  * @state: next state for qp
2127  * @del_hash: del hash
2128  * @term: term message
2129  * @termlen: length of term message
2130  */
2131 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2132 			 u8 termlen)
2133 {
2134 	struct irdma_modify_qp_info info = {};
2135 
2136 	info.next_iwarp_state = state;
2137 	info.remove_hash_idx = del_hash;
2138 	info.cq_num_valid = true;
2139 	info.arp_cache_idx_valid = true;
2140 	info.dont_send_term = true;
2141 	info.dont_send_fin = true;
2142 	info.termlen = termlen;
2143 
2144 	if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2145 		info.dont_send_term = false;
2146 	if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2147 		info.dont_send_fin = false;
2148 	if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2149 		info.reset_tcp_conn = true;
2150 	iwqp->hw_iwarp_state = state;
2151 	irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2152 	iwqp->iwarp_state = info.next_iwarp_state;
2153 }
2154 
2155 /**
2156  * irdma_del_local_mac_entry - remove a mac entry from the hw
2157  * table
2158  * @rf: RDMA PCI function
2159  * @idx: the index of the mac ip address to delete
2160  */
2161 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2162 {
2163 	struct irdma_cqp *iwcqp = &rf->cqp;
2164 	struct irdma_cqp_request *cqp_request;
2165 	struct cqp_cmds_info *cqp_info;
2166 
2167 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2168 	if (!cqp_request)
2169 		return;
2170 
2171 	cqp_info = &cqp_request->info;
2172 	cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2173 	cqp_info->post_sq = 1;
2174 	cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2175 	cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2176 	cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2177 	cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2178 
2179 	irdma_handle_cqp_op(rf, cqp_request);
2180 	irdma_put_cqp_request(iwcqp, cqp_request);
2181 }
2182 
2183 /**
2184  * irdma_add_local_mac_entry - add a mac ip address entry to the
2185  * hw table
2186  * @rf: RDMA PCI function
2187  * @mac_addr: pointer to mac address
2188  * @idx: the index of the mac ip address to add
2189  */
2190 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2191 {
2192 	struct irdma_local_mac_entry_info *info;
2193 	struct irdma_cqp *iwcqp = &rf->cqp;
2194 	struct irdma_cqp_request *cqp_request;
2195 	struct cqp_cmds_info *cqp_info;
2196 	int status;
2197 
2198 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2199 	if (!cqp_request)
2200 		return -ENOMEM;
2201 
2202 	cqp_info = &cqp_request->info;
2203 	cqp_info->post_sq = 1;
2204 	info = &cqp_info->in.u.add_local_mac_entry.info;
2205 	ether_addr_copy(info->mac_addr, mac_addr);
2206 	info->entry_idx = idx;
2207 	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2208 	cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2209 	cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2210 	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2211 
2212 	status = irdma_handle_cqp_op(rf, cqp_request);
2213 	irdma_put_cqp_request(iwcqp, cqp_request);
2214 
2215 	return status;
2216 }
2217 
2218 /**
2219  * irdma_alloc_local_mac_entry - allocate a mac entry
2220  * @rf: RDMA PCI function
2221  * @mac_tbl_idx: the index of the new mac address
2222  *
2223  * Allocate a mac address entry and update the mac_tbl_idx
2224  * to hold the index of the newly created mac address
2225  * Return 0 if successful, otherwise return error
2226  */
2227 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2228 {
2229 	struct irdma_cqp *iwcqp = &rf->cqp;
2230 	struct irdma_cqp_request *cqp_request;
2231 	struct cqp_cmds_info *cqp_info;
2232 	int status = 0;
2233 
2234 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2235 	if (!cqp_request)
2236 		return -ENOMEM;
2237 
2238 	cqp_info = &cqp_request->info;
2239 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2240 	cqp_info->post_sq = 1;
2241 	cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2242 	cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2243 	status = irdma_handle_cqp_op(rf, cqp_request);
2244 	if (!status)
2245 		*mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2246 
2247 	irdma_put_cqp_request(iwcqp, cqp_request);
2248 
2249 	return status;
2250 }
2251 
2252 /**
2253  * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2254  * @iwdev: irdma device
2255  * @accel_local_port: port for apbvt
2256  * @add_port: add ordelete port
2257  */
2258 static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2259 				      u16 accel_local_port, bool add_port)
2260 {
2261 	struct irdma_apbvt_info *info;
2262 	struct irdma_cqp_request *cqp_request;
2263 	struct cqp_cmds_info *cqp_info;
2264 	int status;
2265 
2266 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2267 	if (!cqp_request)
2268 		return -ENOMEM;
2269 
2270 	cqp_info = &cqp_request->info;
2271 	info = &cqp_info->in.u.manage_apbvt_entry.info;
2272 	memset(info, 0, sizeof(*info));
2273 	info->add = add_port;
2274 	info->port = accel_local_port;
2275 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2276 	cqp_info->post_sq = 1;
2277 	cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2278 	cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2279 	ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
2280 		  (!add_port) ? "DELETE" : "ADD", accel_local_port);
2281 
2282 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2283 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2284 
2285 	return status;
2286 }
2287 
2288 /**
2289  * irdma_add_apbvt - add tcp port to HW apbvt table
2290  * @iwdev: irdma device
2291  * @port: port for apbvt
2292  */
2293 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2294 {
2295 	struct irdma_cm_core *cm_core = &iwdev->cm_core;
2296 	struct irdma_apbvt_entry *entry;
2297 	unsigned long flags;
2298 
2299 	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2300 	entry = irdma_lookup_apbvt_entry(cm_core, port);
2301 	if (entry) {
2302 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2303 		return entry;
2304 	}
2305 
2306 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2307 	if (!entry) {
2308 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2309 		return NULL;
2310 	}
2311 
2312 	entry->port = port;
2313 	entry->use_cnt = 1;
2314 	hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2315 	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2316 
2317 	if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2318 		kfree(entry);
2319 		return NULL;
2320 	}
2321 
2322 	return entry;
2323 }
2324 
2325 /**
2326  * irdma_del_apbvt - delete tcp port from HW apbvt table
2327  * @iwdev: irdma device
2328  * @entry: apbvt entry object
2329  */
2330 void irdma_del_apbvt(struct irdma_device *iwdev,
2331 		     struct irdma_apbvt_entry *entry)
2332 {
2333 	struct irdma_cm_core *cm_core = &iwdev->cm_core;
2334 	unsigned long flags;
2335 
2336 	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2337 	if (--entry->use_cnt) {
2338 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2339 		return;
2340 	}
2341 
2342 	hash_del(&entry->hlist);
2343 	/* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
2344 	 * protect against race where add APBVT CQP can race ahead of the delete
2345 	 * APBVT for same port.
2346 	 */
2347 	irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2348 	kfree(entry);
2349 	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2350 }
2351 
2352 /**
2353  * irdma_manage_arp_cache - manage hw arp cache
2354  * @rf: RDMA PCI function
2355  * @mac_addr: mac address ptr
2356  * @ip_addr: ip addr for arp cache
2357  * @ipv4: flag inicating IPv4
2358  * @action: add, delete or modify
2359  */
2360 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
2361 			    const unsigned char *mac_addr,
2362 			    u32 *ip_addr, bool ipv4, u32 action)
2363 {
2364 	struct irdma_add_arp_cache_entry_info *info;
2365 	struct irdma_cqp_request *cqp_request;
2366 	struct cqp_cmds_info *cqp_info;
2367 	int arp_index;
2368 
2369 	arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
2370 	if (arp_index == -1)
2371 		return;
2372 
2373 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2374 	if (!cqp_request)
2375 		return;
2376 
2377 	cqp_info = &cqp_request->info;
2378 	if (action == IRDMA_ARP_ADD) {
2379 		cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2380 		info = &cqp_info->in.u.add_arp_cache_entry.info;
2381 		memset(info, 0, sizeof(*info));
2382 		info->arp_index = (u16)arp_index;
2383 		info->permanent = true;
2384 		ether_addr_copy(info->mac_addr, mac_addr);
2385 		cqp_info->in.u.add_arp_cache_entry.scratch =
2386 			(uintptr_t)cqp_request;
2387 		cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2388 	} else {
2389 		cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2390 		cqp_info->in.u.del_arp_cache_entry.scratch =
2391 			(uintptr_t)cqp_request;
2392 		cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2393 		cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2394 	}
2395 
2396 	cqp_info->post_sq = 1;
2397 	irdma_handle_cqp_op(rf, cqp_request);
2398 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2399 }
2400 
2401 /**
2402  * irdma_send_syn_cqp_callback - do syn/ack after qhash
2403  * @cqp_request: qhash cqp completion
2404  */
2405 static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2406 {
2407 	struct irdma_cm_node *cm_node = cqp_request->param;
2408 
2409 	irdma_send_syn(cm_node, 1);
2410 	irdma_rem_ref_cm_node(cm_node);
2411 }
2412 
2413 /**
2414  * irdma_manage_qhash - add or modify qhash
2415  * @iwdev: irdma device
2416  * @cminfo: cm info for qhash
2417  * @etype: type (syn or quad)
2418  * @mtype: type of qhash
2419  * @cmnode: cmnode associated with connection
2420  * @wait: wait for completion
2421  */
2422 int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2423 		       enum irdma_quad_entry_type etype,
2424 		       enum irdma_quad_hash_manage_type mtype, void *cmnode,
2425 		       bool wait)
2426 {
2427 	struct irdma_qhash_table_info *info;
2428 	struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2429 	struct irdma_cqp_request *cqp_request;
2430 	struct cqp_cmds_info *cqp_info;
2431 	struct irdma_cm_node *cm_node = cmnode;
2432 	int status;
2433 
2434 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2435 	if (!cqp_request)
2436 		return -ENOMEM;
2437 
2438 	cqp_info = &cqp_request->info;
2439 	info = &cqp_info->in.u.manage_qhash_table_entry.info;
2440 	memset(info, 0, sizeof(*info));
2441 	info->vsi = &iwdev->vsi;
2442 	info->manage = mtype;
2443 	info->entry_type = etype;
2444 	if (cminfo->vlan_id < VLAN_N_VID) {
2445 		info->vlan_valid = true;
2446 		info->vlan_id = cminfo->vlan_id;
2447 	} else {
2448 		info->vlan_valid = false;
2449 	}
2450 	info->ipv4_valid = cminfo->ipv4;
2451 	info->user_pri = cminfo->user_pri;
2452 	ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
2453 	info->qp_num = cminfo->qh_qpid;
2454 	info->dest_port = cminfo->loc_port;
2455 	info->dest_ip[0] = cminfo->loc_addr[0];
2456 	info->dest_ip[1] = cminfo->loc_addr[1];
2457 	info->dest_ip[2] = cminfo->loc_addr[2];
2458 	info->dest_ip[3] = cminfo->loc_addr[3];
2459 	if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2460 	    etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2461 	    etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2462 	    etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2463 	    etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2464 		info->src_port = cminfo->rem_port;
2465 		info->src_ip[0] = cminfo->rem_addr[0];
2466 		info->src_ip[1] = cminfo->rem_addr[1];
2467 		info->src_ip[2] = cminfo->rem_addr[2];
2468 		info->src_ip[3] = cminfo->rem_addr[3];
2469 	}
2470 	if (cmnode) {
2471 		cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2472 		cqp_request->param = cmnode;
2473 		if (!wait)
2474 			refcount_inc(&cm_node->refcnt);
2475 	}
2476 	if (info->ipv4_valid)
2477 		ibdev_dbg(&iwdev->ibdev,
2478 			  "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
2479 			  (!mtype) ? "DELETE" : "ADD",
2480 			  __builtin_return_address(0), info->dest_port,
2481 			  info->src_port, info->dest_ip, info->src_ip,
2482 			  info->mac_addr, cminfo->vlan_id,
2483 			  cmnode ? cmnode : NULL);
2484 	else
2485 		ibdev_dbg(&iwdev->ibdev,
2486 			  "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
2487 			  (!mtype) ? "DELETE" : "ADD",
2488 			  __builtin_return_address(0), info->dest_port,
2489 			  info->src_port, info->dest_ip, info->src_ip,
2490 			  info->mac_addr, cminfo->vlan_id,
2491 			  cmnode ? cmnode : NULL);
2492 
2493 	cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2494 	cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2495 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2496 	cqp_info->post_sq = 1;
2497 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2498 	if (status && cm_node && !wait)
2499 		irdma_rem_ref_cm_node(cm_node);
2500 
2501 	irdma_put_cqp_request(iwcqp, cqp_request);
2502 
2503 	return status;
2504 }
2505 
2506 /**
2507  * irdma_hw_flush_wqes_callback - Check return code after flush
2508  * @cqp_request: qhash cqp completion
2509  */
2510 static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
2511 {
2512 	struct irdma_qp_flush_info *hw_info;
2513 	struct irdma_sc_qp *qp;
2514 	struct irdma_qp *iwqp;
2515 	struct cqp_cmds_info *cqp_info;
2516 
2517 	cqp_info = &cqp_request->info;
2518 	hw_info = &cqp_info->in.u.qp_flush_wqes.info;
2519 	qp = cqp_info->in.u.qp_flush_wqes.qp;
2520 	iwqp = qp->qp_uk.back_qp;
2521 
2522 	if (cqp_request->compl_info.maj_err_code)
2523 		return;
2524 
2525 	if (hw_info->rq &&
2526 	    (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2527 	     cqp_request->compl_info.min_err_code == 0)) {
2528 		/* RQ WQE flush was requested but did not happen */
2529 		qp->qp_uk.rq_flush_complete = true;
2530 	}
2531 	if (hw_info->sq &&
2532 	    (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2533 	     cqp_request->compl_info.min_err_code == 0)) {
2534 		if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2535 			ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2536 				  qp->qp_uk.qp_id);
2537 			irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2538 		}
2539 		qp->qp_uk.sq_flush_complete = true;
2540 	}
2541 }
2542 
2543 /**
2544  * irdma_hw_flush_wqes - flush qp's wqe
2545  * @rf: RDMA PCI function
2546  * @qp: hardware control qp
2547  * @info: info for flush
2548  * @wait: flag wait for completion
2549  */
2550 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2551 			struct irdma_qp_flush_info *info, bool wait)
2552 {
2553 	int status;
2554 	struct irdma_qp_flush_info *hw_info;
2555 	struct irdma_cqp_request *cqp_request;
2556 	struct cqp_cmds_info *cqp_info;
2557 	struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2558 
2559 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2560 	if (!cqp_request)
2561 		return -ENOMEM;
2562 
2563 	cqp_info = &cqp_request->info;
2564 	if (!wait)
2565 		cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
2566 	hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2567 	memcpy(hw_info, info, sizeof(*hw_info));
2568 	cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2569 	cqp_info->post_sq = 1;
2570 	cqp_info->in.u.qp_flush_wqes.qp = qp;
2571 	cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2572 	status = irdma_handle_cqp_op(rf, cqp_request);
2573 	if (status) {
2574 		qp->qp_uk.sq_flush_complete = true;
2575 		qp->qp_uk.rq_flush_complete = true;
2576 		irdma_put_cqp_request(&rf->cqp, cqp_request);
2577 		return status;
2578 	}
2579 
2580 	if (!wait || cqp_request->compl_info.maj_err_code)
2581 		goto put_cqp;
2582 
2583 	if (info->rq) {
2584 		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2585 		    cqp_request->compl_info.min_err_code == 0) {
2586 			/* RQ WQE flush was requested but did not happen */
2587 			qp->qp_uk.rq_flush_complete = true;
2588 		}
2589 	}
2590 	if (info->sq) {
2591 		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2592 		    cqp_request->compl_info.min_err_code == 0) {
2593 			/*
2594 			 * Handling case where WQE is posted to empty SQ when
2595 			 * flush has not completed
2596 			 */
2597 			if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2598 				struct irdma_cqp_request *new_req;
2599 
2600 				if (!qp->qp_uk.sq_flush_complete)
2601 					goto put_cqp;
2602 				qp->qp_uk.sq_flush_complete = false;
2603 				qp->flush_sq = false;
2604 
2605 				info->rq = false;
2606 				info->sq = true;
2607 				new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2608 				if (!new_req) {
2609 					status = -ENOMEM;
2610 					goto put_cqp;
2611 				}
2612 				cqp_info = &new_req->info;
2613 				hw_info = &new_req->info.in.u.qp_flush_wqes.info;
2614 				memcpy(hw_info, info, sizeof(*hw_info));
2615 				cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2616 				cqp_info->post_sq = 1;
2617 				cqp_info->in.u.qp_flush_wqes.qp = qp;
2618 				cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
2619 
2620 				status = irdma_handle_cqp_op(rf, new_req);
2621 				if (new_req->compl_info.maj_err_code ||
2622 				    new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2623 				    status) {
2624 					ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2625 						  iwqp->ibqp.qp_num);
2626 					qp->qp_uk.sq_flush_complete = false;
2627 					irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2628 				}
2629 				irdma_put_cqp_request(&rf->cqp, new_req);
2630 			} else {
2631 				/* SQ WQE flush was requested but did not happen */
2632 				qp->qp_uk.sq_flush_complete = true;
2633 			}
2634 		} else {
2635 			if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
2636 				qp->qp_uk.sq_flush_complete = true;
2637 		}
2638 	}
2639 
2640 	ibdev_dbg(&rf->iwdev->ibdev,
2641 		  "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2642 		  iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2643 		  iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2644 		  cqp_request->compl_info.maj_err_code,
2645 		  cqp_request->compl_info.min_err_code);
2646 put_cqp:
2647 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2648 
2649 	return status;
2650 }
2651 
2652 /**
2653  * irdma_gen_ae - generate AE
2654  * @rf: RDMA PCI function
2655  * @qp: qp associated with AE
2656  * @info: info for ae
2657  * @wait: wait for completion
2658  */
2659 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2660 		  struct irdma_gen_ae_info *info, bool wait)
2661 {
2662 	struct irdma_gen_ae_info *ae_info;
2663 	struct irdma_cqp_request *cqp_request;
2664 	struct cqp_cmds_info *cqp_info;
2665 
2666 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2667 	if (!cqp_request)
2668 		return;
2669 
2670 	cqp_info = &cqp_request->info;
2671 	ae_info = &cqp_request->info.in.u.gen_ae.info;
2672 	memcpy(ae_info, info, sizeof(*ae_info));
2673 	cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2674 	cqp_info->post_sq = 1;
2675 	cqp_info->in.u.gen_ae.qp = qp;
2676 	cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2677 
2678 	irdma_handle_cqp_op(rf, cqp_request);
2679 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2680 }
2681 
2682 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2683 {
2684 	struct irdma_qp_flush_info info = {};
2685 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
2686 	u8 flush_code = iwqp->sc_qp.flush_code;
2687 
2688 	if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
2689 		return;
2690 
2691 	/* Set flush info fields*/
2692 	info.sq = flush_mask & IRDMA_FLUSH_SQ;
2693 	info.rq = flush_mask & IRDMA_FLUSH_RQ;
2694 
2695 	/* Generate userflush errors in CQE */
2696 	info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2697 	info.sq_minor_code = FLUSH_GENERAL_ERR;
2698 	info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2699 	info.rq_minor_code = FLUSH_GENERAL_ERR;
2700 	info.userflushcode = true;
2701 
2702 	if (flush_mask & IRDMA_REFLUSH) {
2703 		if (info.sq)
2704 			iwqp->sc_qp.flush_sq = false;
2705 		if (info.rq)
2706 			iwqp->sc_qp.flush_rq = false;
2707 	} else {
2708 		if (flush_code) {
2709 			if (info.sq && iwqp->sc_qp.sq_flush_code)
2710 				info.sq_minor_code = flush_code;
2711 			if (info.rq && iwqp->sc_qp.rq_flush_code)
2712 				info.rq_minor_code = flush_code;
2713 		}
2714 		if (!iwqp->user_mode)
2715 			queue_delayed_work(iwqp->iwdev->cleanup_wq,
2716 					   &iwqp->dwork_flush,
2717 					   msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2718 	}
2719 
2720 	/* Issue flush */
2721 	(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2722 				  flush_mask & IRDMA_FLUSH_WAIT);
2723 	iwqp->flush_issued = true;
2724 }
2725