1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include "gdma.h"
5 #include "hw_channel.h"
6 
7 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
8 {
9 	struct gdma_resource *r = &hwc->inflight_msg_res;
10 	unsigned long flags;
11 	u32 index;
12 
13 	down(&hwc->sema);
14 
15 	spin_lock_irqsave(&r->lock, flags);
16 
17 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
18 				    hwc->inflight_msg_res.size);
19 
20 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
21 
22 	spin_unlock_irqrestore(&r->lock, flags);
23 
24 	*msg_id = index;
25 
26 	return 0;
27 }
28 
29 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
30 {
31 	struct gdma_resource *r = &hwc->inflight_msg_res;
32 	unsigned long flags;
33 
34 	spin_lock_irqsave(&r->lock, flags);
35 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
36 	spin_unlock_irqrestore(&r->lock, flags);
37 
38 	up(&hwc->sema);
39 }
40 
41 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
42 				    const struct gdma_resp_hdr *resp_msg,
43 				    u32 resp_len)
44 {
45 	if (resp_len < sizeof(*resp_msg))
46 		return -EPROTO;
47 
48 	if (resp_len > caller_ctx->output_buflen)
49 		return -EPROTO;
50 
51 	return 0;
52 }
53 
54 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
55 				 const struct gdma_resp_hdr *resp_msg)
56 {
57 	struct hwc_caller_ctx *ctx;
58 	int err;
59 
60 	if (!test_bit(resp_msg->response.hwc_msg_id,
61 		      hwc->inflight_msg_res.map)) {
62 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
63 			resp_msg->response.hwc_msg_id);
64 		return;
65 	}
66 
67 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
68 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
69 	if (err)
70 		goto out;
71 
72 	ctx->status_code = resp_msg->status;
73 
74 	memcpy(ctx->output_buf, resp_msg, resp_len);
75 out:
76 	ctx->error = err;
77 	complete(&ctx->comp_event);
78 }
79 
80 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
81 				struct hwc_work_request *req)
82 {
83 	struct device *dev = hwc_rxq->hwc->dev;
84 	struct gdma_sge *sge;
85 	int err;
86 
87 	sge = &req->sge;
88 	sge->address = (u64)req->buf_sge_addr;
89 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
90 	sge->size = req->buf_len;
91 
92 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
93 	req->wqe_req.sgl = sge;
94 	req->wqe_req.num_sge = 1;
95 	req->wqe_req.client_data_unit = 0;
96 
97 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
98 	if (err)
99 		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100 	return err;
101 }
102 
103 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104 					struct gdma_event *event)
105 {
106 	struct hw_channel_context *hwc = ctx;
107 	struct gdma_dev *gd = hwc->gdma_dev;
108 	union hwc_init_type_data type_data;
109 	union hwc_init_eq_id_db eq_db;
110 	u32 type, val;
111 
112 	switch (event->type) {
113 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114 		eq_db.as_uint32 = event->details[0];
115 		hwc->cq->gdma_eq->id = eq_db.eq_id;
116 		gd->doorbell = eq_db.doorbell;
117 		break;
118 
119 	case GDMA_EQE_HWC_INIT_DATA:
120 		type_data.as_uint32 = event->details[0];
121 		type = type_data.type;
122 		val = type_data.value;
123 
124 		switch (type) {
125 		case HWC_INIT_DATA_CQID:
126 			hwc->cq->gdma_cq->id = val;
127 			break;
128 
129 		case HWC_INIT_DATA_RQID:
130 			hwc->rxq->gdma_wq->id = val;
131 			break;
132 
133 		case HWC_INIT_DATA_SQID:
134 			hwc->txq->gdma_wq->id = val;
135 			break;
136 
137 		case HWC_INIT_DATA_QUEUE_DEPTH:
138 			hwc->hwc_init_q_depth_max = (u16)val;
139 			break;
140 
141 		case HWC_INIT_DATA_MAX_REQUEST:
142 			hwc->hwc_init_max_req_msg_size = val;
143 			break;
144 
145 		case HWC_INIT_DATA_MAX_RESPONSE:
146 			hwc->hwc_init_max_resp_msg_size = val;
147 			break;
148 
149 		case HWC_INIT_DATA_MAX_NUM_CQS:
150 			gd->gdma_context->max_num_cqs = val;
151 			break;
152 
153 		case HWC_INIT_DATA_PDID:
154 			hwc->gdma_dev->pdid = val;
155 			break;
156 
157 		case HWC_INIT_DATA_GPA_MKEY:
158 			hwc->rxq->msg_buf->gpa_mkey = val;
159 			hwc->txq->msg_buf->gpa_mkey = val;
160 			break;
161 		}
162 
163 		break;
164 
165 	case GDMA_EQE_HWC_INIT_DONE:
166 		complete(&hwc->hwc_init_eqe_comp);
167 		break;
168 
169 	default:
170 		/* Ignore unknown events, which should never happen. */
171 		break;
172 	}
173 }
174 
175 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
176 				      const struct hwc_rx_oob *rx_oob)
177 {
178 	struct hw_channel_context *hwc = ctx;
179 	struct hwc_wq *hwc_rxq = hwc->rxq;
180 	struct hwc_work_request *rx_req;
181 	struct gdma_resp_hdr *resp;
182 	struct gdma_wqe *dma_oob;
183 	struct gdma_queue *rq;
184 	struct gdma_sge *sge;
185 	u64 rq_base_addr;
186 	u64 rx_req_idx;
187 	u8 *wqe;
188 
189 	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
190 		return;
191 
192 	rq = hwc_rxq->gdma_wq;
193 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
194 	dma_oob = (struct gdma_wqe *)wqe;
195 
196 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
197 
198 	/* Select the RX work request for virtual address and for reposting. */
199 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
200 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
201 
202 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
203 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
204 
205 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
206 		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
207 			resp->response.hwc_msg_id);
208 		return;
209 	}
210 
211 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
212 
213 	/* Do no longer use 'resp', because the buffer is posted to the HW
214 	 * in the below mana_hwc_post_rx_wqe().
215 	 */
216 	resp = NULL;
217 
218 	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
219 }
220 
221 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
222 				      const struct hwc_rx_oob *rx_oob)
223 {
224 	struct hw_channel_context *hwc = ctx;
225 	struct hwc_wq *hwc_txq = hwc->txq;
226 
227 	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
228 }
229 
230 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
231 				   enum gdma_queue_type type, u64 queue_size,
232 				   struct gdma_queue **queue)
233 {
234 	struct gdma_queue_spec spec = {};
235 
236 	if (type != GDMA_SQ && type != GDMA_RQ)
237 		return -EINVAL;
238 
239 	spec.type = type;
240 	spec.monitor_avl_buf = false;
241 	spec.queue_size = queue_size;
242 
243 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
244 }
245 
246 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
247 				   u64 queue_size,
248 				   void *ctx, gdma_cq_callback *cb,
249 				   struct gdma_queue *parent_eq,
250 				   struct gdma_queue **queue)
251 {
252 	struct gdma_queue_spec spec = {};
253 
254 	spec.type = GDMA_CQ;
255 	spec.monitor_avl_buf = false;
256 	spec.queue_size = queue_size;
257 	spec.cq.context = ctx;
258 	spec.cq.callback = cb;
259 	spec.cq.parent_eq = parent_eq;
260 
261 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
262 }
263 
264 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
265 				   u64 queue_size,
266 				   void *ctx, gdma_eq_callback *cb,
267 				   struct gdma_queue **queue)
268 {
269 	struct gdma_queue_spec spec = {};
270 
271 	spec.type = GDMA_EQ;
272 	spec.monitor_avl_buf = false;
273 	spec.queue_size = queue_size;
274 	spec.eq.context = ctx;
275 	spec.eq.callback = cb;
276 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
277 
278 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
279 }
280 
281 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
282 {
283 	struct hwc_rx_oob comp_data = {};
284 	struct gdma_comp *completions;
285 	struct hwc_cq *hwc_cq = ctx;
286 	int comp_read, i;
287 
288 	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
289 
290 	completions = hwc_cq->comp_buf;
291 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
292 	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
293 
294 	for (i = 0; i < comp_read; ++i) {
295 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
296 
297 		if (completions[i].is_sq)
298 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
299 						completions[i].wq_num,
300 						&comp_data);
301 		else
302 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
303 						completions[i].wq_num,
304 						&comp_data);
305 	}
306 
307 	mana_gd_ring_cq(q_self, SET_ARM_BIT);
308 }
309 
310 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
311 {
312 	kfree(hwc_cq->comp_buf);
313 
314 	if (hwc_cq->gdma_cq)
315 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
316 
317 	if (hwc_cq->gdma_eq)
318 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
319 
320 	kfree(hwc_cq);
321 }
322 
323 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
324 			      gdma_eq_callback *callback, void *ctx,
325 			      hwc_rx_event_handler_t *rx_ev_hdlr,
326 			      void *rx_ev_ctx,
327 			      hwc_tx_event_handler_t *tx_ev_hdlr,
328 			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
329 {
330 	struct gdma_queue *eq, *cq;
331 	struct gdma_comp *comp_buf;
332 	struct hwc_cq *hwc_cq;
333 	u32 eq_size, cq_size;
334 	int err;
335 
336 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
337 	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
338 		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
339 
340 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
341 	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
342 		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
343 
344 	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
345 	if (!hwc_cq)
346 		return -ENOMEM;
347 
348 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
349 	if (err) {
350 		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
351 		goto out;
352 	}
353 	hwc_cq->gdma_eq = eq;
354 
355 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
356 				      eq, &cq);
357 	if (err) {
358 		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
359 		goto out;
360 	}
361 	hwc_cq->gdma_cq = cq;
362 
363 	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
364 	if (!comp_buf) {
365 		err = -ENOMEM;
366 		goto out;
367 	}
368 
369 	hwc_cq->hwc = hwc;
370 	hwc_cq->comp_buf = comp_buf;
371 	hwc_cq->queue_depth = q_depth;
372 	hwc_cq->rx_event_handler = rx_ev_hdlr;
373 	hwc_cq->rx_event_ctx = rx_ev_ctx;
374 	hwc_cq->tx_event_handler = tx_ev_hdlr;
375 	hwc_cq->tx_event_ctx = tx_ev_ctx;
376 
377 	*hwc_cq_ptr = hwc_cq;
378 	return 0;
379 out:
380 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
381 	return err;
382 }
383 
384 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
385 				  u32 max_msg_size,
386 				  struct hwc_dma_buf **dma_buf_ptr)
387 {
388 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
389 	struct hwc_work_request *hwc_wr;
390 	struct hwc_dma_buf *dma_buf;
391 	struct gdma_mem_info *gmi;
392 	void *virt_addr;
393 	u32 buf_size;
394 	u8 *base_pa;
395 	int err;
396 	u16 i;
397 
398 	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
399 	if (!dma_buf)
400 		return -ENOMEM;
401 
402 	dma_buf->num_reqs = q_depth;
403 
404 	buf_size = PAGE_ALIGN(q_depth * max_msg_size);
405 
406 	gmi = &dma_buf->mem_info;
407 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
408 	if (err) {
409 		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
410 		goto out;
411 	}
412 
413 	virt_addr = dma_buf->mem_info.virt_addr;
414 	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
415 
416 	for (i = 0; i < q_depth; i++) {
417 		hwc_wr = &dma_buf->reqs[i];
418 
419 		hwc_wr->buf_va = virt_addr + i * max_msg_size;
420 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
421 
422 		hwc_wr->buf_len = max_msg_size;
423 	}
424 
425 	*dma_buf_ptr = dma_buf;
426 	return 0;
427 out:
428 	kfree(dma_buf);
429 	return err;
430 }
431 
432 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
433 				     struct hwc_dma_buf *dma_buf)
434 {
435 	if (!dma_buf)
436 		return;
437 
438 	mana_gd_free_memory(&dma_buf->mem_info);
439 
440 	kfree(dma_buf);
441 }
442 
443 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
444 				struct hwc_wq *hwc_wq)
445 {
446 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
447 
448 	if (hwc_wq->gdma_wq)
449 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
450 				      hwc_wq->gdma_wq);
451 
452 	kfree(hwc_wq);
453 }
454 
455 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
456 			      enum gdma_queue_type q_type, u16 q_depth,
457 			      u32 max_msg_size, struct hwc_cq *hwc_cq,
458 			      struct hwc_wq **hwc_wq_ptr)
459 {
460 	struct gdma_queue *queue;
461 	struct hwc_wq *hwc_wq;
462 	u32 queue_size;
463 	int err;
464 
465 	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
466 
467 	if (q_type == GDMA_RQ)
468 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
469 	else
470 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
471 
472 	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
473 		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
474 
475 	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
476 	if (!hwc_wq)
477 		return -ENOMEM;
478 
479 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
480 	if (err)
481 		goto out;
482 
483 	hwc_wq->hwc = hwc;
484 	hwc_wq->gdma_wq = queue;
485 	hwc_wq->queue_depth = q_depth;
486 	hwc_wq->hwc_cq = hwc_cq;
487 
488 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
489 				     &hwc_wq->msg_buf);
490 	if (err)
491 		goto out;
492 
493 	*hwc_wq_ptr = hwc_wq;
494 	return 0;
495 out:
496 	if (err)
497 		mana_hwc_destroy_wq(hwc, hwc_wq);
498 	return err;
499 }
500 
501 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
502 				struct hwc_work_request *req,
503 				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
504 				bool dest_pf)
505 {
506 	struct device *dev = hwc_txq->hwc->dev;
507 	struct hwc_tx_oob *tx_oob;
508 	struct gdma_sge *sge;
509 	int err;
510 
511 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
512 		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
513 			req->msg_size, req->buf_len);
514 		return -EINVAL;
515 	}
516 
517 	tx_oob = &req->tx_oob;
518 
519 	tx_oob->vrq_id = dest_virt_rq_id;
520 	tx_oob->dest_vfid = 0;
521 	tx_oob->vrcq_id = dest_virt_rcq_id;
522 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
523 	tx_oob->loopback = false;
524 	tx_oob->lso_override = false;
525 	tx_oob->dest_pf = dest_pf;
526 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
527 
528 	sge = &req->sge;
529 	sge->address = (u64)req->buf_sge_addr;
530 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
531 	sge->size = req->msg_size;
532 
533 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
534 	req->wqe_req.sgl = sge;
535 	req->wqe_req.num_sge = 1;
536 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
537 	req->wqe_req.inline_oob_data = tx_oob;
538 	req->wqe_req.client_data_unit = 0;
539 
540 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
541 	if (err)
542 		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
543 	return err;
544 }
545 
546 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
547 				      u16 num_msg)
548 {
549 	int err;
550 
551 	sema_init(&hwc->sema, num_msg);
552 
553 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
554 	if (err)
555 		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
556 	return err;
557 }
558 
559 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
560 				 u32 max_req_msg_size, u32 max_resp_msg_size)
561 {
562 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
563 	struct hwc_wq *hwc_rxq = hwc->rxq;
564 	struct hwc_work_request *req;
565 	struct hwc_caller_ctx *ctx;
566 	int err;
567 	int i;
568 
569 	/* Post all WQEs on the RQ */
570 	for (i = 0; i < q_depth; i++) {
571 		req = &hwc_rxq->msg_buf->reqs[i];
572 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
573 		if (err)
574 			return err;
575 	}
576 
577 	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
578 	if (!ctx)
579 		return -ENOMEM;
580 
581 	for (i = 0; i < q_depth; ++i)
582 		init_completion(&ctx[i].comp_event);
583 
584 	hwc->caller_ctx = ctx;
585 
586 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
587 }
588 
589 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
590 				      u32 *max_req_msg_size,
591 				      u32 *max_resp_msg_size)
592 {
593 	struct hw_channel_context *hwc = gc->hwc.driver_data;
594 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
595 	struct gdma_queue *sq = hwc->txq->gdma_wq;
596 	struct gdma_queue *eq = hwc->cq->gdma_eq;
597 	struct gdma_queue *cq = hwc->cq->gdma_cq;
598 	int err;
599 
600 	init_completion(&hwc->hwc_init_eqe_comp);
601 
602 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
603 				 eq->mem_info.dma_handle,
604 				 cq->mem_info.dma_handle,
605 				 rq->mem_info.dma_handle,
606 				 sq->mem_info.dma_handle,
607 				 eq->eq.msix_index);
608 	if (err)
609 		return err;
610 
611 	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
612 		return -ETIMEDOUT;
613 
614 	*q_depth = hwc->hwc_init_q_depth_max;
615 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
616 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
617 
618 	/* Both were set in mana_hwc_init_event_handler(). */
619 	if (WARN_ON(cq->id >= gc->max_num_cqs))
620 		return -EPROTO;
621 
622 	gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *));
623 	if (!gc->cq_table)
624 		return -ENOMEM;
625 
626 	gc->cq_table[cq->id] = cq;
627 
628 	return 0;
629 }
630 
631 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
632 				u32 max_req_msg_size, u32 max_resp_msg_size)
633 {
634 	int err;
635 
636 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
637 	if (err)
638 		return err;
639 
640 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
641 	 * queue depth and RQ queue depth.
642 	 */
643 	err = mana_hwc_create_cq(hwc, q_depth * 2,
644 				 mana_hwc_init_event_handler, hwc,
645 				 mana_hwc_rx_event_handler, hwc,
646 				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
647 	if (err) {
648 		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
649 		goto out;
650 	}
651 
652 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
653 				 hwc->cq, &hwc->rxq);
654 	if (err) {
655 		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
656 		goto out;
657 	}
658 
659 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
660 				 hwc->cq, &hwc->txq);
661 	if (err) {
662 		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
663 		goto out;
664 	}
665 
666 	hwc->num_inflight_msg = q_depth;
667 	hwc->max_req_msg_size = max_req_msg_size;
668 
669 	return 0;
670 out:
671 	/* mana_hwc_create_channel() will do the cleanup.*/
672 	return err;
673 }
674 
675 int mana_hwc_create_channel(struct gdma_context *gc)
676 {
677 	u32 max_req_msg_size, max_resp_msg_size;
678 	struct gdma_dev *gd = &gc->hwc;
679 	struct hw_channel_context *hwc;
680 	u16 q_depth_max;
681 	int err;
682 
683 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
684 	if (!hwc)
685 		return -ENOMEM;
686 
687 	gd->gdma_context = gc;
688 	gd->driver_data = hwc;
689 	hwc->gdma_dev = gd;
690 	hwc->dev = gc->dev;
691 
692 	/* HWC's instance number is always 0. */
693 	gd->dev_id.as_uint32 = 0;
694 	gd->dev_id.type = GDMA_DEVICE_HWC;
695 
696 	gd->pdid = INVALID_PDID;
697 	gd->doorbell = INVALID_DOORBELL;
698 
699 	/* mana_hwc_init_queues() only creates the required data structures,
700 	 * and doesn't touch the HWC device.
701 	 */
702 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
703 				   HW_CHANNEL_MAX_REQUEST_SIZE,
704 				   HW_CHANNEL_MAX_RESPONSE_SIZE);
705 	if (err) {
706 		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
707 		goto out;
708 	}
709 
710 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
711 					 &max_resp_msg_size);
712 	if (err) {
713 		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
714 		goto out;
715 	}
716 
717 	err = mana_hwc_test_channel(gc->hwc.driver_data,
718 				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
719 				    max_req_msg_size, max_resp_msg_size);
720 	if (err) {
721 		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
722 		goto out;
723 	}
724 
725 	return 0;
726 out:
727 	mana_hwc_destroy_channel(gc);
728 	return err;
729 }
730 
731 void mana_hwc_destroy_channel(struct gdma_context *gc)
732 {
733 	struct hw_channel_context *hwc = gc->hwc.driver_data;
734 
735 	if (!hwc)
736 		return;
737 
738 	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
739 	 * non-zero, the HWC worked and we should tear down the HWC here.
740 	 */
741 	if (gc->max_num_cqs > 0) {
742 		mana_smc_teardown_hwc(&gc->shm_channel, false);
743 		gc->max_num_cqs = 0;
744 	}
745 
746 	kfree(hwc->caller_ctx);
747 	hwc->caller_ctx = NULL;
748 
749 	if (hwc->txq)
750 		mana_hwc_destroy_wq(hwc, hwc->txq);
751 
752 	if (hwc->rxq)
753 		mana_hwc_destroy_wq(hwc, hwc->rxq);
754 
755 	if (hwc->cq)
756 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
757 
758 	mana_gd_free_res_map(&hwc->inflight_msg_res);
759 
760 	hwc->num_inflight_msg = 0;
761 
762 	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
763 	hwc->gdma_dev->pdid = INVALID_PDID;
764 
765 	kfree(hwc);
766 	gc->hwc.driver_data = NULL;
767 	gc->hwc.gdma_context = NULL;
768 
769 	vfree(gc->cq_table);
770 	gc->cq_table = NULL;
771 }
772 
773 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
774 			  const void *req, u32 resp_len, void *resp)
775 {
776 	struct hwc_work_request *tx_wr;
777 	struct hwc_wq *txq = hwc->txq;
778 	struct gdma_req_hdr *req_msg;
779 	struct hwc_caller_ctx *ctx;
780 	u16 msg_id;
781 	int err;
782 
783 	mana_hwc_get_msg_index(hwc, &msg_id);
784 
785 	tx_wr = &txq->msg_buf->reqs[msg_id];
786 
787 	if (req_len > tx_wr->buf_len) {
788 		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
789 			tx_wr->buf_len);
790 		err = -EINVAL;
791 		goto out;
792 	}
793 
794 	ctx = hwc->caller_ctx + msg_id;
795 	ctx->output_buf = resp;
796 	ctx->output_buflen = resp_len;
797 
798 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
799 	if (req)
800 		memcpy(req_msg, req, req_len);
801 
802 	req_msg->req.hwc_msg_id = msg_id;
803 
804 	tx_wr->msg_size = req_len;
805 
806 	err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
807 	if (err) {
808 		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
809 		goto out;
810 	}
811 
812 	if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
813 		dev_err(hwc->dev, "HWC: Request timed out!\n");
814 		err = -ETIMEDOUT;
815 		goto out;
816 	}
817 
818 	if (ctx->error) {
819 		err = ctx->error;
820 		goto out;
821 	}
822 
823 	if (ctx->status_code) {
824 		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
825 			ctx->status_code);
826 		err = -EPROTO;
827 		goto out;
828 	}
829 out:
830 	mana_hwc_put_msg_index(hwc, msg_id);
831 	return err;
832 }
833