1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #include <net/mana/gdma.h>
5 #include <net/mana/hw_channel.h>
6
mana_hwc_get_msg_index(struct hw_channel_context * hwc,u16 * msg_id)7 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
8 {
9 struct gdma_resource *r = &hwc->inflight_msg_res;
10 unsigned long flags;
11 u32 index;
12
13 down(&hwc->sema);
14
15 spin_lock_irqsave(&r->lock, flags);
16
17 index = find_first_zero_bit(hwc->inflight_msg_res.map,
18 hwc->inflight_msg_res.size);
19
20 bitmap_set(hwc->inflight_msg_res.map, index, 1);
21
22 spin_unlock_irqrestore(&r->lock, flags);
23
24 *msg_id = index;
25
26 return 0;
27 }
28
mana_hwc_put_msg_index(struct hw_channel_context * hwc,u16 msg_id)29 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
30 {
31 struct gdma_resource *r = &hwc->inflight_msg_res;
32 unsigned long flags;
33
34 spin_lock_irqsave(&r->lock, flags);
35 bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
36 spin_unlock_irqrestore(&r->lock, flags);
37
38 up(&hwc->sema);
39 }
40
mana_hwc_verify_resp_msg(const struct hwc_caller_ctx * caller_ctx,const struct gdma_resp_hdr * resp_msg,u32 resp_len)41 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
42 const struct gdma_resp_hdr *resp_msg,
43 u32 resp_len)
44 {
45 if (resp_len < sizeof(*resp_msg))
46 return -EPROTO;
47
48 if (resp_len > caller_ctx->output_buflen)
49 return -EPROTO;
50
51 return 0;
52 }
53
mana_hwc_post_rx_wqe(const struct hwc_wq * hwc_rxq,struct hwc_work_request * req)54 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
55 struct hwc_work_request *req)
56 {
57 struct device *dev = hwc_rxq->hwc->dev;
58 struct gdma_sge *sge;
59 int err;
60
61 sge = &req->sge;
62 sge->address = (u64)req->buf_sge_addr;
63 sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
64 sge->size = req->buf_len;
65
66 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
67 req->wqe_req.sgl = sge;
68 req->wqe_req.num_sge = 1;
69 req->wqe_req.client_data_unit = 0;
70
71 err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
72 if (err)
73 dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
74 return err;
75 }
76
mana_hwc_handle_resp(struct hw_channel_context * hwc,u32 resp_len,struct hwc_work_request * rx_req)77 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
78 struct hwc_work_request *rx_req)
79 {
80 const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
81 struct hwc_caller_ctx *ctx;
82 int err;
83
84 if (!test_bit(resp_msg->response.hwc_msg_id,
85 hwc->inflight_msg_res.map)) {
86 dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
87 resp_msg->response.hwc_msg_id);
88 mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
89 return;
90 }
91
92 ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
93 err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
94 if (err)
95 goto out;
96
97 ctx->status_code = resp_msg->status;
98
99 memcpy(ctx->output_buf, resp_msg, resp_len);
100 out:
101 ctx->error = err;
102
103 /* Must post rx wqe before complete(), otherwise the next rx may
104 * hit no_wqe error.
105 */
106 mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
107
108 complete(&ctx->comp_event);
109 }
110
mana_hwc_init_event_handler(void * ctx,struct gdma_queue * q_self,struct gdma_event * event)111 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
112 struct gdma_event *event)
113 {
114 struct hw_channel_context *hwc = ctx;
115 struct gdma_dev *gd = hwc->gdma_dev;
116 union hwc_init_type_data type_data;
117 union hwc_init_eq_id_db eq_db;
118 u32 type, val;
119
120 switch (event->type) {
121 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
122 eq_db.as_uint32 = event->details[0];
123 hwc->cq->gdma_eq->id = eq_db.eq_id;
124 gd->doorbell = eq_db.doorbell;
125 break;
126
127 case GDMA_EQE_HWC_INIT_DATA:
128 type_data.as_uint32 = event->details[0];
129 type = type_data.type;
130 val = type_data.value;
131
132 switch (type) {
133 case HWC_INIT_DATA_CQID:
134 hwc->cq->gdma_cq->id = val;
135 break;
136
137 case HWC_INIT_DATA_RQID:
138 hwc->rxq->gdma_wq->id = val;
139 break;
140
141 case HWC_INIT_DATA_SQID:
142 hwc->txq->gdma_wq->id = val;
143 break;
144
145 case HWC_INIT_DATA_QUEUE_DEPTH:
146 hwc->hwc_init_q_depth_max = (u16)val;
147 break;
148
149 case HWC_INIT_DATA_MAX_REQUEST:
150 hwc->hwc_init_max_req_msg_size = val;
151 break;
152
153 case HWC_INIT_DATA_MAX_RESPONSE:
154 hwc->hwc_init_max_resp_msg_size = val;
155 break;
156
157 case HWC_INIT_DATA_MAX_NUM_CQS:
158 gd->gdma_context->max_num_cqs = val;
159 break;
160
161 case HWC_INIT_DATA_PDID:
162 hwc->gdma_dev->pdid = val;
163 break;
164
165 case HWC_INIT_DATA_GPA_MKEY:
166 hwc->rxq->msg_buf->gpa_mkey = val;
167 hwc->txq->msg_buf->gpa_mkey = val;
168 break;
169
170 case HWC_INIT_DATA_PF_DEST_RQ_ID:
171 hwc->pf_dest_vrq_id = val;
172 break;
173
174 case HWC_INIT_DATA_PF_DEST_CQ_ID:
175 hwc->pf_dest_vrcq_id = val;
176 break;
177 }
178
179 break;
180
181 case GDMA_EQE_HWC_INIT_DONE:
182 complete(&hwc->hwc_init_eqe_comp);
183 break;
184
185 case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
186 type_data.as_uint32 = event->details[0];
187 type = type_data.type;
188 val = type_data.value;
189
190 switch (type) {
191 case HWC_DATA_CFG_HWC_TIMEOUT:
192 hwc->hwc_timeout = val;
193 break;
194
195 default:
196 dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
197 break;
198 }
199
200 break;
201
202 default:
203 dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
204 /* Ignore unknown events, which should never happen. */
205 break;
206 }
207 }
208
mana_hwc_rx_event_handler(void * ctx,u32 gdma_rxq_id,const struct hwc_rx_oob * rx_oob)209 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
210 const struct hwc_rx_oob *rx_oob)
211 {
212 struct hw_channel_context *hwc = ctx;
213 struct hwc_wq *hwc_rxq = hwc->rxq;
214 struct hwc_work_request *rx_req;
215 struct gdma_resp_hdr *resp;
216 struct gdma_wqe *dma_oob;
217 struct gdma_queue *rq;
218 struct gdma_sge *sge;
219 u64 rq_base_addr;
220 u64 rx_req_idx;
221 u8 *wqe;
222
223 if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
224 return;
225
226 rq = hwc_rxq->gdma_wq;
227 wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
228 dma_oob = (struct gdma_wqe *)wqe;
229
230 sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
231
232 /* Select the RX work request for virtual address and for reposting. */
233 rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
234 rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
235
236 rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
237 resp = (struct gdma_resp_hdr *)rx_req->buf_va;
238
239 if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
240 dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
241 resp->response.hwc_msg_id);
242 return;
243 }
244
245 mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
246
247 /* Can no longer use 'resp', because the buffer is posted to the HW
248 * in mana_hwc_handle_resp() above.
249 */
250 resp = NULL;
251 }
252
mana_hwc_tx_event_handler(void * ctx,u32 gdma_txq_id,const struct hwc_rx_oob * rx_oob)253 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
254 const struct hwc_rx_oob *rx_oob)
255 {
256 struct hw_channel_context *hwc = ctx;
257 struct hwc_wq *hwc_txq = hwc->txq;
258
259 WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
260 }
261
mana_hwc_create_gdma_wq(struct hw_channel_context * hwc,enum gdma_queue_type type,u64 queue_size,struct gdma_queue ** queue)262 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
263 enum gdma_queue_type type, u64 queue_size,
264 struct gdma_queue **queue)
265 {
266 struct gdma_queue_spec spec = {};
267
268 if (type != GDMA_SQ && type != GDMA_RQ)
269 return -EINVAL;
270
271 spec.type = type;
272 spec.monitor_avl_buf = false;
273 spec.queue_size = queue_size;
274
275 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
276 }
277
mana_hwc_create_gdma_cq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_cq_callback * cb,struct gdma_queue * parent_eq,struct gdma_queue ** queue)278 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
279 u64 queue_size,
280 void *ctx, gdma_cq_callback *cb,
281 struct gdma_queue *parent_eq,
282 struct gdma_queue **queue)
283 {
284 struct gdma_queue_spec spec = {};
285
286 spec.type = GDMA_CQ;
287 spec.monitor_avl_buf = false;
288 spec.queue_size = queue_size;
289 spec.cq.context = ctx;
290 spec.cq.callback = cb;
291 spec.cq.parent_eq = parent_eq;
292
293 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
294 }
295
mana_hwc_create_gdma_eq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_eq_callback * cb,struct gdma_queue ** queue)296 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
297 u64 queue_size,
298 void *ctx, gdma_eq_callback *cb,
299 struct gdma_queue **queue)
300 {
301 struct gdma_queue_spec spec = {};
302
303 spec.type = GDMA_EQ;
304 spec.monitor_avl_buf = false;
305 spec.queue_size = queue_size;
306 spec.eq.context = ctx;
307 spec.eq.callback = cb;
308 spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
309
310 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
311 }
312
mana_hwc_comp_event(void * ctx,struct gdma_queue * q_self)313 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
314 {
315 struct hwc_rx_oob comp_data = {};
316 struct gdma_comp *completions;
317 struct hwc_cq *hwc_cq = ctx;
318 int comp_read, i;
319
320 WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
321
322 completions = hwc_cq->comp_buf;
323 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
324 WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
325
326 for (i = 0; i < comp_read; ++i) {
327 comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
328
329 if (completions[i].is_sq)
330 hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
331 completions[i].wq_num,
332 &comp_data);
333 else
334 hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
335 completions[i].wq_num,
336 &comp_data);
337 }
338
339 mana_gd_ring_cq(q_self, SET_ARM_BIT);
340 }
341
mana_hwc_destroy_cq(struct gdma_context * gc,struct hwc_cq * hwc_cq)342 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
343 {
344 kfree(hwc_cq->comp_buf);
345
346 if (hwc_cq->gdma_cq)
347 mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
348
349 if (hwc_cq->gdma_eq)
350 mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
351
352 kfree(hwc_cq);
353 }
354
mana_hwc_create_cq(struct hw_channel_context * hwc,u16 q_depth,gdma_eq_callback * callback,void * ctx,hwc_rx_event_handler_t * rx_ev_hdlr,void * rx_ev_ctx,hwc_tx_event_handler_t * tx_ev_hdlr,void * tx_ev_ctx,struct hwc_cq ** hwc_cq_ptr)355 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
356 gdma_eq_callback *callback, void *ctx,
357 hwc_rx_event_handler_t *rx_ev_hdlr,
358 void *rx_ev_ctx,
359 hwc_tx_event_handler_t *tx_ev_hdlr,
360 void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
361 {
362 struct gdma_queue *eq, *cq;
363 struct gdma_comp *comp_buf;
364 struct hwc_cq *hwc_cq;
365 u32 eq_size, cq_size;
366 int err;
367
368 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
369 if (eq_size < MANA_MIN_QSIZE)
370 eq_size = MANA_MIN_QSIZE;
371
372 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
373 if (cq_size < MANA_MIN_QSIZE)
374 cq_size = MANA_MIN_QSIZE;
375
376 hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
377 if (!hwc_cq)
378 return -ENOMEM;
379
380 err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
381 if (err) {
382 dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
383 goto out;
384 }
385 hwc_cq->gdma_eq = eq;
386
387 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
388 eq, &cq);
389 if (err) {
390 dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
391 goto out;
392 }
393 hwc_cq->gdma_cq = cq;
394
395 comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
396 if (!comp_buf) {
397 err = -ENOMEM;
398 goto out;
399 }
400
401 hwc_cq->hwc = hwc;
402 hwc_cq->comp_buf = comp_buf;
403 hwc_cq->queue_depth = q_depth;
404 hwc_cq->rx_event_handler = rx_ev_hdlr;
405 hwc_cq->rx_event_ctx = rx_ev_ctx;
406 hwc_cq->tx_event_handler = tx_ev_hdlr;
407 hwc_cq->tx_event_ctx = tx_ev_ctx;
408
409 *hwc_cq_ptr = hwc_cq;
410 return 0;
411 out:
412 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
413 return err;
414 }
415
mana_hwc_alloc_dma_buf(struct hw_channel_context * hwc,u16 q_depth,u32 max_msg_size,struct hwc_dma_buf ** dma_buf_ptr)416 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
417 u32 max_msg_size,
418 struct hwc_dma_buf **dma_buf_ptr)
419 {
420 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
421 struct hwc_work_request *hwc_wr;
422 struct hwc_dma_buf *dma_buf;
423 struct gdma_mem_info *gmi;
424 void *virt_addr;
425 u32 buf_size;
426 u8 *base_pa;
427 int err;
428 u16 i;
429
430 dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
431 if (!dma_buf)
432 return -ENOMEM;
433
434 dma_buf->num_reqs = q_depth;
435
436 buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
437
438 gmi = &dma_buf->mem_info;
439 err = mana_gd_alloc_memory(gc, buf_size, gmi);
440 if (err) {
441 dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
442 goto out;
443 }
444
445 virt_addr = dma_buf->mem_info.virt_addr;
446 base_pa = (u8 *)dma_buf->mem_info.dma_handle;
447
448 for (i = 0; i < q_depth; i++) {
449 hwc_wr = &dma_buf->reqs[i];
450
451 hwc_wr->buf_va = virt_addr + i * max_msg_size;
452 hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
453
454 hwc_wr->buf_len = max_msg_size;
455 }
456
457 *dma_buf_ptr = dma_buf;
458 return 0;
459 out:
460 kfree(dma_buf);
461 return err;
462 }
463
mana_hwc_dealloc_dma_buf(struct hw_channel_context * hwc,struct hwc_dma_buf * dma_buf)464 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
465 struct hwc_dma_buf *dma_buf)
466 {
467 if (!dma_buf)
468 return;
469
470 mana_gd_free_memory(&dma_buf->mem_info);
471
472 kfree(dma_buf);
473 }
474
mana_hwc_destroy_wq(struct hw_channel_context * hwc,struct hwc_wq * hwc_wq)475 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
476 struct hwc_wq *hwc_wq)
477 {
478 mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
479
480 if (hwc_wq->gdma_wq)
481 mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
482 hwc_wq->gdma_wq);
483
484 kfree(hwc_wq);
485 }
486
mana_hwc_create_wq(struct hw_channel_context * hwc,enum gdma_queue_type q_type,u16 q_depth,u32 max_msg_size,struct hwc_cq * hwc_cq,struct hwc_wq ** hwc_wq_ptr)487 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
488 enum gdma_queue_type q_type, u16 q_depth,
489 u32 max_msg_size, struct hwc_cq *hwc_cq,
490 struct hwc_wq **hwc_wq_ptr)
491 {
492 struct gdma_queue *queue;
493 struct hwc_wq *hwc_wq;
494 u32 queue_size;
495 int err;
496
497 WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
498
499 if (q_type == GDMA_RQ)
500 queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
501 else
502 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
503
504 if (queue_size < MANA_MIN_QSIZE)
505 queue_size = MANA_MIN_QSIZE;
506
507 hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
508 if (!hwc_wq)
509 return -ENOMEM;
510
511 err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
512 if (err)
513 goto out;
514
515 hwc_wq->hwc = hwc;
516 hwc_wq->gdma_wq = queue;
517 hwc_wq->queue_depth = q_depth;
518 hwc_wq->hwc_cq = hwc_cq;
519
520 err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
521 &hwc_wq->msg_buf);
522 if (err)
523 goto out;
524
525 *hwc_wq_ptr = hwc_wq;
526 return 0;
527 out:
528 if (err)
529 mana_hwc_destroy_wq(hwc, hwc_wq);
530 return err;
531 }
532
mana_hwc_post_tx_wqe(const struct hwc_wq * hwc_txq,struct hwc_work_request * req,u32 dest_virt_rq_id,u32 dest_virt_rcq_id,bool dest_pf)533 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
534 struct hwc_work_request *req,
535 u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
536 bool dest_pf)
537 {
538 struct device *dev = hwc_txq->hwc->dev;
539 struct hwc_tx_oob *tx_oob;
540 struct gdma_sge *sge;
541 int err;
542
543 if (req->msg_size == 0 || req->msg_size > req->buf_len) {
544 dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
545 req->msg_size, req->buf_len);
546 return -EINVAL;
547 }
548
549 tx_oob = &req->tx_oob;
550
551 tx_oob->vrq_id = dest_virt_rq_id;
552 tx_oob->dest_vfid = 0;
553 tx_oob->vrcq_id = dest_virt_rcq_id;
554 tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
555 tx_oob->loopback = false;
556 tx_oob->lso_override = false;
557 tx_oob->dest_pf = dest_pf;
558 tx_oob->vsq_id = hwc_txq->gdma_wq->id;
559
560 sge = &req->sge;
561 sge->address = (u64)req->buf_sge_addr;
562 sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
563 sge->size = req->msg_size;
564
565 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
566 req->wqe_req.sgl = sge;
567 req->wqe_req.num_sge = 1;
568 req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
569 req->wqe_req.inline_oob_data = tx_oob;
570 req->wqe_req.client_data_unit = 0;
571
572 err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
573 if (err)
574 dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
575 return err;
576 }
577
mana_hwc_init_inflight_msg(struct hw_channel_context * hwc,u16 num_msg)578 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
579 u16 num_msg)
580 {
581 int err;
582
583 sema_init(&hwc->sema, num_msg);
584
585 err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
586 if (err)
587 dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
588 return err;
589 }
590
mana_hwc_test_channel(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)591 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
592 u32 max_req_msg_size, u32 max_resp_msg_size)
593 {
594 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
595 struct hwc_wq *hwc_rxq = hwc->rxq;
596 struct hwc_work_request *req;
597 struct hwc_caller_ctx *ctx;
598 int err;
599 int i;
600
601 /* Post all WQEs on the RQ */
602 for (i = 0; i < q_depth; i++) {
603 req = &hwc_rxq->msg_buf->reqs[i];
604 err = mana_hwc_post_rx_wqe(hwc_rxq, req);
605 if (err)
606 return err;
607 }
608
609 ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
610 if (!ctx)
611 return -ENOMEM;
612
613 for (i = 0; i < q_depth; ++i)
614 init_completion(&ctx[i].comp_event);
615
616 hwc->caller_ctx = ctx;
617
618 return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
619 }
620
mana_hwc_establish_channel(struct gdma_context * gc,u16 * q_depth,u32 * max_req_msg_size,u32 * max_resp_msg_size)621 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
622 u32 *max_req_msg_size,
623 u32 *max_resp_msg_size)
624 {
625 struct hw_channel_context *hwc = gc->hwc.driver_data;
626 struct gdma_queue *rq = hwc->rxq->gdma_wq;
627 struct gdma_queue *sq = hwc->txq->gdma_wq;
628 struct gdma_queue *eq = hwc->cq->gdma_eq;
629 struct gdma_queue *cq = hwc->cq->gdma_cq;
630 int err;
631
632 init_completion(&hwc->hwc_init_eqe_comp);
633
634 err = mana_smc_setup_hwc(&gc->shm_channel, false,
635 eq->mem_info.dma_handle,
636 cq->mem_info.dma_handle,
637 rq->mem_info.dma_handle,
638 sq->mem_info.dma_handle,
639 eq->eq.msix_index);
640 if (err)
641 return err;
642
643 if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
644 return -ETIMEDOUT;
645
646 *q_depth = hwc->hwc_init_q_depth_max;
647 *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
648 *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
649
650 /* Both were set in mana_hwc_init_event_handler(). */
651 if (WARN_ON(cq->id >= gc->max_num_cqs))
652 return -EPROTO;
653
654 gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
655 if (!gc->cq_table)
656 return -ENOMEM;
657
658 gc->cq_table[cq->id] = cq;
659
660 return 0;
661 }
662
mana_hwc_init_queues(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)663 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
664 u32 max_req_msg_size, u32 max_resp_msg_size)
665 {
666 int err;
667
668 err = mana_hwc_init_inflight_msg(hwc, q_depth);
669 if (err)
670 return err;
671
672 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
673 * queue depth and RQ queue depth.
674 */
675 err = mana_hwc_create_cq(hwc, q_depth * 2,
676 mana_hwc_init_event_handler, hwc,
677 mana_hwc_rx_event_handler, hwc,
678 mana_hwc_tx_event_handler, hwc, &hwc->cq);
679 if (err) {
680 dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
681 goto out;
682 }
683
684 err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
685 hwc->cq, &hwc->rxq);
686 if (err) {
687 dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
688 goto out;
689 }
690
691 err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
692 hwc->cq, &hwc->txq);
693 if (err) {
694 dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
695 goto out;
696 }
697
698 hwc->num_inflight_msg = q_depth;
699 hwc->max_req_msg_size = max_req_msg_size;
700
701 return 0;
702 out:
703 /* mana_hwc_create_channel() will do the cleanup.*/
704 return err;
705 }
706
mana_hwc_create_channel(struct gdma_context * gc)707 int mana_hwc_create_channel(struct gdma_context *gc)
708 {
709 u32 max_req_msg_size, max_resp_msg_size;
710 struct gdma_dev *gd = &gc->hwc;
711 struct hw_channel_context *hwc;
712 u16 q_depth_max;
713 int err;
714
715 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
716 if (!hwc)
717 return -ENOMEM;
718
719 gd->gdma_context = gc;
720 gd->driver_data = hwc;
721 hwc->gdma_dev = gd;
722 hwc->dev = gc->dev;
723 hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
724
725 /* HWC's instance number is always 0. */
726 gd->dev_id.as_uint32 = 0;
727 gd->dev_id.type = GDMA_DEVICE_HWC;
728
729 gd->pdid = INVALID_PDID;
730 gd->doorbell = INVALID_DOORBELL;
731
732 /* mana_hwc_init_queues() only creates the required data structures,
733 * and doesn't touch the HWC device.
734 */
735 err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
736 HW_CHANNEL_MAX_REQUEST_SIZE,
737 HW_CHANNEL_MAX_RESPONSE_SIZE);
738 if (err) {
739 dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
740 goto out;
741 }
742
743 err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
744 &max_resp_msg_size);
745 if (err) {
746 dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
747 goto out;
748 }
749
750 err = mana_hwc_test_channel(gc->hwc.driver_data,
751 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
752 max_req_msg_size, max_resp_msg_size);
753 if (err) {
754 dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
755 goto out;
756 }
757
758 return 0;
759 out:
760 mana_hwc_destroy_channel(gc);
761 return err;
762 }
763
mana_hwc_destroy_channel(struct gdma_context * gc)764 void mana_hwc_destroy_channel(struct gdma_context *gc)
765 {
766 struct hw_channel_context *hwc = gc->hwc.driver_data;
767
768 if (!hwc)
769 return;
770
771 /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
772 * non-zero, the HWC worked and we should tear down the HWC here.
773 */
774 if (gc->max_num_cqs > 0) {
775 mana_smc_teardown_hwc(&gc->shm_channel, false);
776 gc->max_num_cqs = 0;
777 }
778
779 kfree(hwc->caller_ctx);
780 hwc->caller_ctx = NULL;
781
782 if (hwc->txq)
783 mana_hwc_destroy_wq(hwc, hwc->txq);
784
785 if (hwc->rxq)
786 mana_hwc_destroy_wq(hwc, hwc->rxq);
787
788 if (hwc->cq)
789 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
790
791 mana_gd_free_res_map(&hwc->inflight_msg_res);
792
793 hwc->num_inflight_msg = 0;
794
795 hwc->gdma_dev->doorbell = INVALID_DOORBELL;
796 hwc->gdma_dev->pdid = INVALID_PDID;
797
798 hwc->hwc_timeout = 0;
799
800 kfree(hwc);
801 gc->hwc.driver_data = NULL;
802 gc->hwc.gdma_context = NULL;
803
804 vfree(gc->cq_table);
805 gc->cq_table = NULL;
806 }
807
mana_hwc_send_request(struct hw_channel_context * hwc,u32 req_len,const void * req,u32 resp_len,void * resp)808 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
809 const void *req, u32 resp_len, void *resp)
810 {
811 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
812 struct hwc_work_request *tx_wr;
813 struct hwc_wq *txq = hwc->txq;
814 struct gdma_req_hdr *req_msg;
815 struct hwc_caller_ctx *ctx;
816 u32 dest_vrcq = 0;
817 u32 dest_vrq = 0;
818 u16 msg_id;
819 int err;
820
821 mana_hwc_get_msg_index(hwc, &msg_id);
822
823 tx_wr = &txq->msg_buf->reqs[msg_id];
824
825 if (req_len > tx_wr->buf_len) {
826 dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
827 tx_wr->buf_len);
828 err = -EINVAL;
829 goto out;
830 }
831
832 ctx = hwc->caller_ctx + msg_id;
833 ctx->output_buf = resp;
834 ctx->output_buflen = resp_len;
835
836 req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
837 if (req)
838 memcpy(req_msg, req, req_len);
839
840 req_msg->req.hwc_msg_id = msg_id;
841
842 tx_wr->msg_size = req_len;
843
844 if (gc->is_pf) {
845 dest_vrq = hwc->pf_dest_vrq_id;
846 dest_vrcq = hwc->pf_dest_vrcq_id;
847 }
848
849 err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
850 if (err) {
851 dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
852 goto out;
853 }
854
855 if (!wait_for_completion_timeout(&ctx->comp_event,
856 (msecs_to_jiffies(hwc->hwc_timeout)))) {
857 dev_err(hwc->dev, "HWC: Request timed out!\n");
858 err = -ETIMEDOUT;
859 goto out;
860 }
861
862 if (ctx->error) {
863 err = ctx->error;
864 goto out;
865 }
866
867 if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
868 dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
869 ctx->status_code);
870 err = -EPROTO;
871 goto out;
872 }
873 out:
874 mana_hwc_put_msg_index(hwc, msg_id);
875 return err;
876 }
877