1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include "gdma.h"
5 #include "hw_channel.h"
6 
7 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
8 {
9 	struct gdma_resource *r = &hwc->inflight_msg_res;
10 	unsigned long flags;
11 	u32 index;
12 
13 	down(&hwc->sema);
14 
15 	spin_lock_irqsave(&r->lock, flags);
16 
17 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
18 				    hwc->inflight_msg_res.size);
19 
20 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
21 
22 	spin_unlock_irqrestore(&r->lock, flags);
23 
24 	*msg_id = index;
25 
26 	return 0;
27 }
28 
29 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
30 {
31 	struct gdma_resource *r = &hwc->inflight_msg_res;
32 	unsigned long flags;
33 
34 	spin_lock_irqsave(&r->lock, flags);
35 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
36 	spin_unlock_irqrestore(&r->lock, flags);
37 
38 	up(&hwc->sema);
39 }
40 
41 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
42 				    const struct gdma_resp_hdr *resp_msg,
43 				    u32 resp_len)
44 {
45 	if (resp_len < sizeof(*resp_msg))
46 		return -EPROTO;
47 
48 	if (resp_len > caller_ctx->output_buflen)
49 		return -EPROTO;
50 
51 	return 0;
52 }
53 
54 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
55 				 const struct gdma_resp_hdr *resp_msg)
56 {
57 	struct hwc_caller_ctx *ctx;
58 	int err;
59 
60 	if (!test_bit(resp_msg->response.hwc_msg_id,
61 		      hwc->inflight_msg_res.map)) {
62 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
63 			resp_msg->response.hwc_msg_id);
64 		return;
65 	}
66 
67 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
68 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
69 	if (err)
70 		goto out;
71 
72 	ctx->status_code = resp_msg->status;
73 
74 	memcpy(ctx->output_buf, resp_msg, resp_len);
75 out:
76 	ctx->error = err;
77 	complete(&ctx->comp_event);
78 }
79 
80 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
81 				struct hwc_work_request *req)
82 {
83 	struct device *dev = hwc_rxq->hwc->dev;
84 	struct gdma_sge *sge;
85 	int err;
86 
87 	sge = &req->sge;
88 	sge->address = (u64)req->buf_sge_addr;
89 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
90 	sge->size = req->buf_len;
91 
92 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
93 	req->wqe_req.sgl = sge;
94 	req->wqe_req.num_sge = 1;
95 	req->wqe_req.client_data_unit = 0;
96 
97 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
98 	if (err)
99 		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100 	return err;
101 }
102 
103 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104 					struct gdma_event *event)
105 {
106 	struct hw_channel_context *hwc = ctx;
107 	struct gdma_dev *gd = hwc->gdma_dev;
108 	union hwc_init_type_data type_data;
109 	union hwc_init_eq_id_db eq_db;
110 	u32 type, val;
111 
112 	switch (event->type) {
113 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114 		eq_db.as_uint32 = event->details[0];
115 		hwc->cq->gdma_eq->id = eq_db.eq_id;
116 		gd->doorbell = eq_db.doorbell;
117 		break;
118 
119 	case GDMA_EQE_HWC_INIT_DATA:
120 		type_data.as_uint32 = event->details[0];
121 		type = type_data.type;
122 		val = type_data.value;
123 
124 		switch (type) {
125 		case HWC_INIT_DATA_CQID:
126 			hwc->cq->gdma_cq->id = val;
127 			break;
128 
129 		case HWC_INIT_DATA_RQID:
130 			hwc->rxq->gdma_wq->id = val;
131 			break;
132 
133 		case HWC_INIT_DATA_SQID:
134 			hwc->txq->gdma_wq->id = val;
135 			break;
136 
137 		case HWC_INIT_DATA_QUEUE_DEPTH:
138 			hwc->hwc_init_q_depth_max = (u16)val;
139 			break;
140 
141 		case HWC_INIT_DATA_MAX_REQUEST:
142 			hwc->hwc_init_max_req_msg_size = val;
143 			break;
144 
145 		case HWC_INIT_DATA_MAX_RESPONSE:
146 			hwc->hwc_init_max_resp_msg_size = val;
147 			break;
148 
149 		case HWC_INIT_DATA_MAX_NUM_CQS:
150 			gd->gdma_context->max_num_cqs = val;
151 			break;
152 
153 		case HWC_INIT_DATA_PDID:
154 			hwc->gdma_dev->pdid = val;
155 			break;
156 
157 		case HWC_INIT_DATA_GPA_MKEY:
158 			hwc->rxq->msg_buf->gpa_mkey = val;
159 			hwc->txq->msg_buf->gpa_mkey = val;
160 			break;
161 		}
162 
163 		break;
164 
165 	case GDMA_EQE_HWC_INIT_DONE:
166 		complete(&hwc->hwc_init_eqe_comp);
167 		break;
168 
169 	default:
170 		/* Ignore unknown events, which should never happen. */
171 		break;
172 	}
173 }
174 
175 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
176 				      const struct hwc_rx_oob *rx_oob)
177 {
178 	struct hw_channel_context *hwc = ctx;
179 	struct hwc_wq *hwc_rxq = hwc->rxq;
180 	struct hwc_work_request *rx_req;
181 	struct gdma_resp_hdr *resp;
182 	struct gdma_wqe *dma_oob;
183 	struct gdma_queue *rq;
184 	struct gdma_sge *sge;
185 	u64 rq_base_addr;
186 	u64 rx_req_idx;
187 	u8 *wqe;
188 
189 	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
190 		return;
191 
192 	rq = hwc_rxq->gdma_wq;
193 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
194 	dma_oob = (struct gdma_wqe *)wqe;
195 
196 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
197 
198 	/* Select the RX work request for virtual address and for reposting. */
199 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
200 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
201 
202 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
203 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
204 
205 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
206 		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
207 			resp->response.hwc_msg_id);
208 		return;
209 	}
210 
211 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
212 
213 	/* Do no longer use 'resp', because the buffer is posted to the HW
214 	 * in the below mana_hwc_post_rx_wqe().
215 	 */
216 	resp = NULL;
217 
218 	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
219 }
220 
221 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
222 				      const struct hwc_rx_oob *rx_oob)
223 {
224 	struct hw_channel_context *hwc = ctx;
225 	struct hwc_wq *hwc_txq = hwc->txq;
226 
227 	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
228 }
229 
230 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
231 				   enum gdma_queue_type type, u64 queue_size,
232 				   struct gdma_queue **queue)
233 {
234 	struct gdma_queue_spec spec = {};
235 
236 	if (type != GDMA_SQ && type != GDMA_RQ)
237 		return -EINVAL;
238 
239 	spec.type = type;
240 	spec.monitor_avl_buf = false;
241 	spec.queue_size = queue_size;
242 
243 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
244 }
245 
246 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
247 				   u64 queue_size,
248 				   void *ctx, gdma_cq_callback *cb,
249 				   struct gdma_queue *parent_eq,
250 				   struct gdma_queue **queue)
251 {
252 	struct gdma_queue_spec spec = {};
253 
254 	spec.type = GDMA_CQ;
255 	spec.monitor_avl_buf = false;
256 	spec.queue_size = queue_size;
257 	spec.cq.context = ctx;
258 	spec.cq.callback = cb;
259 	spec.cq.parent_eq = parent_eq;
260 
261 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
262 }
263 
264 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
265 				   u64 queue_size,
266 				   void *ctx, gdma_eq_callback *cb,
267 				   struct gdma_queue **queue)
268 {
269 	struct gdma_queue_spec spec = {};
270 
271 	spec.type = GDMA_EQ;
272 	spec.monitor_avl_buf = false;
273 	spec.queue_size = queue_size;
274 	spec.eq.context = ctx;
275 	spec.eq.callback = cb;
276 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
277 
278 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
279 }
280 
281 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
282 {
283 	struct hwc_rx_oob comp_data = {};
284 	struct gdma_comp *completions;
285 	struct hwc_cq *hwc_cq = ctx;
286 	int comp_read, i;
287 
288 	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
289 
290 	completions = hwc_cq->comp_buf;
291 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
292 	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
293 
294 	for (i = 0; i < comp_read; ++i) {
295 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
296 
297 		if (completions[i].is_sq)
298 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
299 						completions[i].wq_num,
300 						&comp_data);
301 		else
302 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
303 						completions[i].wq_num,
304 						&comp_data);
305 	}
306 
307 	mana_gd_ring_cq(q_self, SET_ARM_BIT);
308 }
309 
310 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
311 {
312 	if (!hwc_cq)
313 		return;
314 
315 	kfree(hwc_cq->comp_buf);
316 
317 	if (hwc_cq->gdma_cq)
318 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
319 
320 	if (hwc_cq->gdma_eq)
321 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
322 
323 	kfree(hwc_cq);
324 }
325 
326 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
327 			      gdma_eq_callback *callback, void *ctx,
328 			      hwc_rx_event_handler_t *rx_ev_hdlr,
329 			      void *rx_ev_ctx,
330 			      hwc_tx_event_handler_t *tx_ev_hdlr,
331 			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
332 {
333 	struct gdma_queue *eq, *cq;
334 	struct gdma_comp *comp_buf;
335 	struct hwc_cq *hwc_cq;
336 	u32 eq_size, cq_size;
337 	int err;
338 
339 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
340 	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
341 		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
342 
343 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
344 	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
345 		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
346 
347 	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
348 	if (!hwc_cq)
349 		return -ENOMEM;
350 
351 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
352 	if (err) {
353 		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
354 		goto out;
355 	}
356 	hwc_cq->gdma_eq = eq;
357 
358 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
359 				      eq, &cq);
360 	if (err) {
361 		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
362 		goto out;
363 	}
364 	hwc_cq->gdma_cq = cq;
365 
366 	comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL);
367 	if (!comp_buf) {
368 		err = -ENOMEM;
369 		goto out;
370 	}
371 
372 	hwc_cq->hwc = hwc;
373 	hwc_cq->comp_buf = comp_buf;
374 	hwc_cq->queue_depth = q_depth;
375 	hwc_cq->rx_event_handler = rx_ev_hdlr;
376 	hwc_cq->rx_event_ctx = rx_ev_ctx;
377 	hwc_cq->tx_event_handler = tx_ev_hdlr;
378 	hwc_cq->tx_event_ctx = tx_ev_ctx;
379 
380 	*hwc_cq_ptr = hwc_cq;
381 	return 0;
382 out:
383 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
384 	return err;
385 }
386 
387 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
388 				  u32 max_msg_size,
389 				  struct hwc_dma_buf **dma_buf_ptr)
390 {
391 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
392 	struct hwc_work_request *hwc_wr;
393 	struct hwc_dma_buf *dma_buf;
394 	struct gdma_mem_info *gmi;
395 	void *virt_addr;
396 	u32 buf_size;
397 	u8 *base_pa;
398 	int err;
399 	u16 i;
400 
401 	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
402 	if (!dma_buf)
403 		return -ENOMEM;
404 
405 	dma_buf->num_reqs = q_depth;
406 
407 	buf_size = PAGE_ALIGN(q_depth * max_msg_size);
408 
409 	gmi = &dma_buf->mem_info;
410 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
411 	if (err) {
412 		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
413 		goto out;
414 	}
415 
416 	virt_addr = dma_buf->mem_info.virt_addr;
417 	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
418 
419 	for (i = 0; i < q_depth; i++) {
420 		hwc_wr = &dma_buf->reqs[i];
421 
422 		hwc_wr->buf_va = virt_addr + i * max_msg_size;
423 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
424 
425 		hwc_wr->buf_len = max_msg_size;
426 	}
427 
428 	*dma_buf_ptr = dma_buf;
429 	return 0;
430 out:
431 	kfree(dma_buf);
432 	return err;
433 }
434 
435 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
436 				     struct hwc_dma_buf *dma_buf)
437 {
438 	if (!dma_buf)
439 		return;
440 
441 	mana_gd_free_memory(&dma_buf->mem_info);
442 
443 	kfree(dma_buf);
444 }
445 
446 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
447 				struct hwc_wq *hwc_wq)
448 {
449 	if (!hwc_wq)
450 		return;
451 
452 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
453 
454 	if (hwc_wq->gdma_wq)
455 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
456 				      hwc_wq->gdma_wq);
457 
458 	kfree(hwc_wq);
459 }
460 
461 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
462 			      enum gdma_queue_type q_type, u16 q_depth,
463 			      u32 max_msg_size, struct hwc_cq *hwc_cq,
464 			      struct hwc_wq **hwc_wq_ptr)
465 {
466 	struct gdma_queue *queue;
467 	struct hwc_wq *hwc_wq;
468 	u32 queue_size;
469 	int err;
470 
471 	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
472 
473 	if (q_type == GDMA_RQ)
474 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
475 	else
476 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
477 
478 	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
479 		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
480 
481 	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
482 	if (!hwc_wq)
483 		return -ENOMEM;
484 
485 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
486 	if (err)
487 		goto out;
488 
489 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
490 				     &hwc_wq->msg_buf);
491 	if (err)
492 		goto out;
493 
494 	hwc_wq->hwc = hwc;
495 	hwc_wq->gdma_wq = queue;
496 	hwc_wq->queue_depth = q_depth;
497 	hwc_wq->hwc_cq = hwc_cq;
498 
499 	*hwc_wq_ptr = hwc_wq;
500 	return 0;
501 out:
502 	if (err)
503 		mana_hwc_destroy_wq(hwc, hwc_wq);
504 	return err;
505 }
506 
507 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
508 				struct hwc_work_request *req,
509 				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
510 				bool dest_pf)
511 {
512 	struct device *dev = hwc_txq->hwc->dev;
513 	struct hwc_tx_oob *tx_oob;
514 	struct gdma_sge *sge;
515 	int err;
516 
517 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
518 		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
519 			req->msg_size, req->buf_len);
520 		return -EINVAL;
521 	}
522 
523 	tx_oob = &req->tx_oob;
524 
525 	tx_oob->vrq_id = dest_virt_rq_id;
526 	tx_oob->dest_vfid = 0;
527 	tx_oob->vrcq_id = dest_virt_rcq_id;
528 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
529 	tx_oob->loopback = false;
530 	tx_oob->lso_override = false;
531 	tx_oob->dest_pf = dest_pf;
532 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
533 
534 	sge = &req->sge;
535 	sge->address = (u64)req->buf_sge_addr;
536 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
537 	sge->size = req->msg_size;
538 
539 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
540 	req->wqe_req.sgl = sge;
541 	req->wqe_req.num_sge = 1;
542 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
543 	req->wqe_req.inline_oob_data = tx_oob;
544 	req->wqe_req.client_data_unit = 0;
545 
546 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
547 	if (err)
548 		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
549 	return err;
550 }
551 
552 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
553 				      u16 num_msg)
554 {
555 	int err;
556 
557 	sema_init(&hwc->sema, num_msg);
558 
559 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
560 	if (err)
561 		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
562 	return err;
563 }
564 
565 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
566 				 u32 max_req_msg_size, u32 max_resp_msg_size)
567 {
568 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
569 	struct hwc_wq *hwc_rxq = hwc->rxq;
570 	struct hwc_work_request *req;
571 	struct hwc_caller_ctx *ctx;
572 	int err;
573 	int i;
574 
575 	/* Post all WQEs on the RQ */
576 	for (i = 0; i < q_depth; i++) {
577 		req = &hwc_rxq->msg_buf->reqs[i];
578 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
579 		if (err)
580 			return err;
581 	}
582 
583 	ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL);
584 	if (!ctx)
585 		return -ENOMEM;
586 
587 	for (i = 0; i < q_depth; ++i)
588 		init_completion(&ctx[i].comp_event);
589 
590 	hwc->caller_ctx = ctx;
591 
592 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
593 }
594 
595 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
596 				      u32 *max_req_msg_size,
597 				      u32 *max_resp_msg_size)
598 {
599 	struct hw_channel_context *hwc = gc->hwc.driver_data;
600 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
601 	struct gdma_queue *sq = hwc->txq->gdma_wq;
602 	struct gdma_queue *eq = hwc->cq->gdma_eq;
603 	struct gdma_queue *cq = hwc->cq->gdma_cq;
604 	int err;
605 
606 	init_completion(&hwc->hwc_init_eqe_comp);
607 
608 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
609 				 eq->mem_info.dma_handle,
610 				 cq->mem_info.dma_handle,
611 				 rq->mem_info.dma_handle,
612 				 sq->mem_info.dma_handle,
613 				 eq->eq.msix_index);
614 	if (err)
615 		return err;
616 
617 	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
618 		return -ETIMEDOUT;
619 
620 	*q_depth = hwc->hwc_init_q_depth_max;
621 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
622 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
623 
624 	if (WARN_ON(cq->id >= gc->max_num_cqs))
625 		return -EPROTO;
626 
627 	gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *));
628 	if (!gc->cq_table)
629 		return -ENOMEM;
630 
631 	gc->cq_table[cq->id] = cq;
632 
633 	return 0;
634 }
635 
636 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
637 				u32 max_req_msg_size, u32 max_resp_msg_size)
638 {
639 	struct hwc_wq *hwc_rxq = NULL;
640 	struct hwc_wq *hwc_txq = NULL;
641 	struct hwc_cq *hwc_cq = NULL;
642 	int err;
643 
644 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
645 	if (err)
646 		return err;
647 
648 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
649 	 * queue depth and RQ queue depth.
650 	 */
651 	err = mana_hwc_create_cq(hwc, q_depth * 2,
652 				 mana_hwc_init_event_handler, hwc,
653 				 mana_hwc_rx_event_handler, hwc,
654 				 mana_hwc_tx_event_handler, hwc, &hwc_cq);
655 	if (err) {
656 		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
657 		goto out;
658 	}
659 	hwc->cq = hwc_cq;
660 
661 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
662 				 hwc_cq, &hwc_rxq);
663 	if (err) {
664 		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
665 		goto out;
666 	}
667 	hwc->rxq = hwc_rxq;
668 
669 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
670 				 hwc_cq, &hwc_txq);
671 	if (err) {
672 		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
673 		goto out;
674 	}
675 	hwc->txq = hwc_txq;
676 
677 	hwc->num_inflight_msg = q_depth;
678 	hwc->max_req_msg_size = max_req_msg_size;
679 
680 	return 0;
681 out:
682 	if (hwc_txq)
683 		mana_hwc_destroy_wq(hwc, hwc_txq);
684 
685 	if (hwc_rxq)
686 		mana_hwc_destroy_wq(hwc, hwc_rxq);
687 
688 	if (hwc_cq)
689 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
690 
691 	mana_gd_free_res_map(&hwc->inflight_msg_res);
692 	return err;
693 }
694 
695 int mana_hwc_create_channel(struct gdma_context *gc)
696 {
697 	u32 max_req_msg_size, max_resp_msg_size;
698 	struct gdma_dev *gd = &gc->hwc;
699 	struct hw_channel_context *hwc;
700 	u16 q_depth_max;
701 	int err;
702 
703 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
704 	if (!hwc)
705 		return -ENOMEM;
706 
707 	gd->gdma_context = gc;
708 	gd->driver_data = hwc;
709 	hwc->gdma_dev = gd;
710 	hwc->dev = gc->dev;
711 
712 	/* HWC's instance number is always 0. */
713 	gd->dev_id.as_uint32 = 0;
714 	gd->dev_id.type = GDMA_DEVICE_HWC;
715 
716 	gd->pdid = INVALID_PDID;
717 	gd->doorbell = INVALID_DOORBELL;
718 
719 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
720 				   HW_CHANNEL_MAX_REQUEST_SIZE,
721 				   HW_CHANNEL_MAX_RESPONSE_SIZE);
722 	if (err) {
723 		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
724 		goto out;
725 	}
726 
727 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
728 					 &max_resp_msg_size);
729 	if (err) {
730 		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
731 		goto out;
732 	}
733 
734 	err = mana_hwc_test_channel(gc->hwc.driver_data,
735 				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
736 				    max_req_msg_size, max_resp_msg_size);
737 	if (err) {
738 		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
739 		goto out;
740 	}
741 
742 	return 0;
743 out:
744 	kfree(hwc);
745 	return err;
746 }
747 
748 void mana_hwc_destroy_channel(struct gdma_context *gc)
749 {
750 	struct hw_channel_context *hwc = gc->hwc.driver_data;
751 	struct hwc_caller_ctx *ctx;
752 
753 	mana_smc_teardown_hwc(&gc->shm_channel, false);
754 
755 	ctx = hwc->caller_ctx;
756 	kfree(ctx);
757 	hwc->caller_ctx = NULL;
758 
759 	mana_hwc_destroy_wq(hwc, hwc->txq);
760 	hwc->txq = NULL;
761 
762 	mana_hwc_destroy_wq(hwc, hwc->rxq);
763 	hwc->rxq = NULL;
764 
765 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
766 	hwc->cq = NULL;
767 
768 	mana_gd_free_res_map(&hwc->inflight_msg_res);
769 
770 	hwc->num_inflight_msg = 0;
771 
772 	if (hwc->gdma_dev->pdid != INVALID_PDID) {
773 		hwc->gdma_dev->doorbell = INVALID_DOORBELL;
774 		hwc->gdma_dev->pdid = INVALID_PDID;
775 	}
776 
777 	kfree(hwc);
778 	gc->hwc.driver_data = NULL;
779 	gc->hwc.gdma_context = NULL;
780 }
781 
782 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
783 			  const void *req, u32 resp_len, void *resp)
784 {
785 	struct hwc_work_request *tx_wr;
786 	struct hwc_wq *txq = hwc->txq;
787 	struct gdma_req_hdr *req_msg;
788 	struct hwc_caller_ctx *ctx;
789 	u16 msg_id;
790 	int err;
791 
792 	mana_hwc_get_msg_index(hwc, &msg_id);
793 
794 	tx_wr = &txq->msg_buf->reqs[msg_id];
795 
796 	if (req_len > tx_wr->buf_len) {
797 		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
798 			tx_wr->buf_len);
799 		err = -EINVAL;
800 		goto out;
801 	}
802 
803 	ctx = hwc->caller_ctx + msg_id;
804 	ctx->output_buf = resp;
805 	ctx->output_buflen = resp_len;
806 
807 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
808 	if (req)
809 		memcpy(req_msg, req, req_len);
810 
811 	req_msg->req.hwc_msg_id = msg_id;
812 
813 	tx_wr->msg_size = req_len;
814 
815 	err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
816 	if (err) {
817 		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
818 		goto out;
819 	}
820 
821 	if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
822 		dev_err(hwc->dev, "HWC: Request timed out!\n");
823 		err = -ETIMEDOUT;
824 		goto out;
825 	}
826 
827 	if (ctx->error) {
828 		err = ctx->error;
829 		goto out;
830 	}
831 
832 	if (ctx->status_code) {
833 		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
834 			ctx->status_code);
835 		err = -EPROTO;
836 		goto out;
837 	}
838 out:
839 	mana_hwc_put_msg_index(hwc, msg_id);
840 	return err;
841 }
842