1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include "otx2_cptpf_ucode.h"
7 #include "otx2_cpt_common.h"
8 #include "otx2_cptpf.h"
9 #include "otx2_cptlf.h"
10 #include "otx2_cpt_reqmgr.h"
11 #include "rvu_reg.h"
12 
13 #define CSR_DELAY 30
14 
15 #define LOADFVC_RLEN 8
16 #define LOADFVC_MAJOR_OP 0x01
17 #define LOADFVC_MINOR_OP 0x08
18 
19 struct fw_info_t {
20 	struct list_head ucodes;
21 };
22 
23 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
24 					struct otx2_cpt_eng_grp_info *eng_grp)
25 {
26 	struct otx2_cpt_bitmap bmap = { {0} };
27 	bool found = false;
28 	int i;
29 
30 	if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
31 		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
32 			eng_grp->g->engs_num);
33 		return bmap;
34 	}
35 
36 	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
37 		if (eng_grp->engs[i].type) {
38 			bitmap_or(bmap.bits, bmap.bits,
39 				  eng_grp->engs[i].bmap,
40 				  eng_grp->g->engs_num);
41 			bmap.size = eng_grp->g->engs_num;
42 			found = true;
43 		}
44 	}
45 
46 	if (!found)
47 		dev_err(dev, "No engines reserved for engine group %d\n",
48 			eng_grp->idx);
49 	return bmap;
50 }
51 
52 static int is_eng_type(int val, int eng_type)
53 {
54 	return val & (1 << eng_type);
55 }
56 
57 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
58 {
59 	if (eng_grp->ucode[1].type)
60 		return true;
61 	else
62 		return false;
63 }
64 
65 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
66 			       const char *filename)
67 {
68 	strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
69 }
70 
71 static char *get_eng_type_str(int eng_type)
72 {
73 	char *str = "unknown";
74 
75 	switch (eng_type) {
76 	case OTX2_CPT_SE_TYPES:
77 		str = "SE";
78 		break;
79 
80 	case OTX2_CPT_IE_TYPES:
81 		str = "IE";
82 		break;
83 
84 	case OTX2_CPT_AE_TYPES:
85 		str = "AE";
86 		break;
87 	}
88 	return str;
89 }
90 
91 static char *get_ucode_type_str(int ucode_type)
92 {
93 	char *str = "unknown";
94 
95 	switch (ucode_type) {
96 	case (1 << OTX2_CPT_SE_TYPES):
97 		str = "SE";
98 		break;
99 
100 	case (1 << OTX2_CPT_IE_TYPES):
101 		str = "IE";
102 		break;
103 
104 	case (1 << OTX2_CPT_AE_TYPES):
105 		str = "AE";
106 		break;
107 
108 	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
109 		str = "SE+IPSEC";
110 		break;
111 	}
112 	return str;
113 }
114 
115 static int get_ucode_type(struct device *dev,
116 			  struct otx2_cpt_ucode_hdr *ucode_hdr,
117 			  int *ucode_type)
118 {
119 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
120 	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
121 	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
122 	struct pci_dev *pdev = cptpf->pdev;
123 	int i, val = 0;
124 	u8 nn;
125 
126 	strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
127 	for (i = 0; i < strlen(tmp_ver_str); i++)
128 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
129 
130 	sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
131 	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
132 		return -EINVAL;
133 
134 	nn = ucode_hdr->ver_num.nn;
135 	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
136 	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
137 	     nn == OTX2_CPT_SE_UC_TYPE3))
138 		val |= 1 << OTX2_CPT_SE_TYPES;
139 	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
140 	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
141 	     nn == OTX2_CPT_IE_UC_TYPE3))
142 		val |= 1 << OTX2_CPT_IE_TYPES;
143 	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
144 	    nn == OTX2_CPT_AE_UC_TYPE)
145 		val |= 1 << OTX2_CPT_AE_TYPES;
146 
147 	*ucode_type = val;
148 
149 	if (!val)
150 		return -EINVAL;
151 
152 	return 0;
153 }
154 
155 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
156 			      dma_addr_t dma_addr)
157 {
158 	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
159 				     CPT_AF_EXEX_UCODE_BASE(eng),
160 				     (u64)dma_addr);
161 }
162 
163 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
164 {
165 	struct otx2_cptpf_dev *cptpf = obj;
166 	struct otx2_cpt_engs_rsvd *engs;
167 	dma_addr_t dma_addr;
168 	int i, bit, ret;
169 
170 	/* Set PF number for microcode fetches */
171 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
172 				    CPT_AF_PF_FUNC,
173 				    cptpf->pf_id << RVU_PFVF_PF_SHIFT);
174 	if (ret)
175 		return ret;
176 
177 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
178 		engs = &eng_grp->engs[i];
179 		if (!engs->type)
180 			continue;
181 
182 		dma_addr = engs->ucode->dma;
183 
184 		/*
185 		 * Set UCODE_BASE only for the cores which are not used,
186 		 * other cores should have already valid UCODE_BASE set
187 		 */
188 		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
189 			if (!eng_grp->g->eng_ref_cnt[bit]) {
190 				ret = __write_ucode_base(cptpf, bit, dma_addr);
191 				if (ret)
192 					return ret;
193 			}
194 	}
195 	return 0;
196 }
197 
198 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
199 					void *obj)
200 {
201 	struct otx2_cptpf_dev *cptpf = obj;
202 	struct otx2_cpt_bitmap bmap;
203 	int i, timeout = 10;
204 	int busy, ret;
205 	u64 reg = 0;
206 
207 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
208 	if (!bmap.size)
209 		return -EINVAL;
210 
211 	/* Detach the cores from group */
212 	for_each_set_bit(i, bmap.bits, bmap.size) {
213 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
214 					   CPT_AF_EXEX_CTL2(i), &reg);
215 		if (ret)
216 			return ret;
217 
218 		if (reg & (1ull << eng_grp->idx)) {
219 			eng_grp->g->eng_ref_cnt[i]--;
220 			reg &= ~(1ull << eng_grp->idx);
221 
222 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
223 						    cptpf->pdev,
224 						    CPT_AF_EXEX_CTL2(i), reg);
225 			if (ret)
226 				return ret;
227 		}
228 	}
229 
230 	/* Wait for cores to become idle */
231 	do {
232 		busy = 0;
233 		usleep_range(10000, 20000);
234 		if (timeout-- < 0)
235 			return -EBUSY;
236 
237 		for_each_set_bit(i, bmap.bits, bmap.size) {
238 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
239 						   cptpf->pdev,
240 						   CPT_AF_EXEX_STS(i), &reg);
241 			if (ret)
242 				return ret;
243 
244 			if (reg & 0x1) {
245 				busy = 1;
246 				break;
247 			}
248 		}
249 	} while (busy);
250 
251 	/* Disable the cores only if they are not used anymore */
252 	for_each_set_bit(i, bmap.bits, bmap.size) {
253 		if (!eng_grp->g->eng_ref_cnt[i]) {
254 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
255 						    cptpf->pdev,
256 						    CPT_AF_EXEX_CTL(i), 0x0);
257 			if (ret)
258 				return ret;
259 		}
260 	}
261 
262 	return 0;
263 }
264 
265 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
266 				       void *obj)
267 {
268 	struct otx2_cptpf_dev *cptpf = obj;
269 	struct otx2_cpt_bitmap bmap;
270 	u64 reg = 0;
271 	int i, ret;
272 
273 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
274 	if (!bmap.size)
275 		return -EINVAL;
276 
277 	/* Attach the cores to the group */
278 	for_each_set_bit(i, bmap.bits, bmap.size) {
279 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
280 					   CPT_AF_EXEX_CTL2(i), &reg);
281 		if (ret)
282 			return ret;
283 
284 		if (!(reg & (1ull << eng_grp->idx))) {
285 			eng_grp->g->eng_ref_cnt[i]++;
286 			reg |= 1ull << eng_grp->idx;
287 
288 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
289 						    cptpf->pdev,
290 						    CPT_AF_EXEX_CTL2(i), reg);
291 			if (ret)
292 				return ret;
293 		}
294 	}
295 
296 	/* Enable the cores */
297 	for_each_set_bit(i, bmap.bits, bmap.size) {
298 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
299 						cptpf->pdev,
300 						CPT_AF_EXEX_CTL(i), 0x1);
301 		if (ret)
302 			return ret;
303 	}
304 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
305 
306 	return ret;
307 }
308 
309 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
310 		   char *filename)
311 {
312 	struct otx2_cpt_ucode_hdr *ucode_hdr;
313 	struct otx2_cpt_uc_info_t *uc_info;
314 	int ucode_type, ucode_size;
315 	int ret;
316 
317 	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
318 	if (!uc_info)
319 		return -ENOMEM;
320 
321 	ret = request_firmware(&uc_info->fw, filename, dev);
322 	if (ret)
323 		goto free_uc_info;
324 
325 	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
326 	ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
327 	if (ret)
328 		goto release_fw;
329 
330 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
331 	if (!ucode_size) {
332 		dev_err(dev, "Ucode %s invalid size\n", filename);
333 		ret = -EINVAL;
334 		goto release_fw;
335 	}
336 
337 	set_ucode_filename(&uc_info->ucode, filename);
338 	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
339 	       OTX2_CPT_UCODE_VER_STR_SZ);
340 	uc_info->ucode.ver_num = ucode_hdr->ver_num;
341 	uc_info->ucode.type = ucode_type;
342 	uc_info->ucode.size = ucode_size;
343 	list_add_tail(&uc_info->list, &fw_info->ucodes);
344 
345 	return 0;
346 
347 release_fw:
348 	release_firmware(uc_info->fw);
349 free_uc_info:
350 	kfree(uc_info);
351 	return ret;
352 }
353 
354 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
355 {
356 	struct otx2_cpt_uc_info_t *curr, *temp;
357 
358 	if (!fw_info)
359 		return;
360 
361 	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
362 		list_del(&curr->list);
363 		release_firmware(curr->fw);
364 		kfree(curr);
365 	}
366 }
367 
368 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
369 					    int ucode_type)
370 {
371 	struct otx2_cpt_uc_info_t *curr;
372 
373 	list_for_each_entry(curr, &fw_info->ucodes, list) {
374 		if (!is_eng_type(curr->ucode.type, ucode_type))
375 			continue;
376 
377 		return curr;
378 	}
379 	return NULL;
380 }
381 
382 static void print_uc_info(struct fw_info_t *fw_info)
383 {
384 	struct otx2_cpt_uc_info_t *curr;
385 
386 	list_for_each_entry(curr, &fw_info->ucodes, list) {
387 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
388 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
389 		pr_debug("Ucode version %d.%d.%d.%d\n",
390 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
391 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
392 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
393 			 get_ucode_type_str(curr->ucode.type));
394 		pr_debug("Ucode size %d\n", curr->ucode.size);
395 		pr_debug("Ucode ptr %p\n", curr->fw->data);
396 	}
397 }
398 
399 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
400 {
401 	char filename[OTX2_CPT_NAME_LENGTH];
402 	char eng_type[8] = {0};
403 	int ret, e, i;
404 
405 	INIT_LIST_HEAD(&fw_info->ucodes);
406 
407 	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
408 		strcpy(eng_type, get_eng_type_str(e));
409 		for (i = 0; i < strlen(eng_type); i++)
410 			eng_type[i] = tolower(eng_type[i]);
411 
412 		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
413 			 pdev->revision, eng_type);
414 		/* Request firmware for each engine type */
415 		ret = load_fw(&pdev->dev, fw_info, filename);
416 		if (ret)
417 			goto release_fw;
418 	}
419 	print_uc_info(fw_info);
420 	return 0;
421 
422 release_fw:
423 	cpt_ucode_release_fw(fw_info);
424 	return ret;
425 }
426 
427 static struct otx2_cpt_engs_rsvd *find_engines_by_type(
428 					struct otx2_cpt_eng_grp_info *eng_grp,
429 					int eng_type)
430 {
431 	int i;
432 
433 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
434 		if (!eng_grp->engs[i].type)
435 			continue;
436 
437 		if (eng_grp->engs[i].type == eng_type)
438 			return &eng_grp->engs[i];
439 	}
440 	return NULL;
441 }
442 
443 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
444 				int eng_type)
445 {
446 	struct otx2_cpt_engs_rsvd *engs;
447 
448 	engs = find_engines_by_type(eng_grp, eng_type);
449 
450 	return (engs != NULL ? 1 : 0);
451 }
452 
453 static int update_engines_avail_count(struct device *dev,
454 				      struct otx2_cpt_engs_available *avail,
455 				      struct otx2_cpt_engs_rsvd *engs, int val)
456 {
457 	switch (engs->type) {
458 	case OTX2_CPT_SE_TYPES:
459 		avail->se_cnt += val;
460 		break;
461 
462 	case OTX2_CPT_IE_TYPES:
463 		avail->ie_cnt += val;
464 		break;
465 
466 	case OTX2_CPT_AE_TYPES:
467 		avail->ae_cnt += val;
468 		break;
469 
470 	default:
471 		dev_err(dev, "Invalid engine type %d\n", engs->type);
472 		return -EINVAL;
473 	}
474 	return 0;
475 }
476 
477 static int update_engines_offset(struct device *dev,
478 				 struct otx2_cpt_engs_available *avail,
479 				 struct otx2_cpt_engs_rsvd *engs)
480 {
481 	switch (engs->type) {
482 	case OTX2_CPT_SE_TYPES:
483 		engs->offset = 0;
484 		break;
485 
486 	case OTX2_CPT_IE_TYPES:
487 		engs->offset = avail->max_se_cnt;
488 		break;
489 
490 	case OTX2_CPT_AE_TYPES:
491 		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
492 		break;
493 
494 	default:
495 		dev_err(dev, "Invalid engine type %d\n", engs->type);
496 		return -EINVAL;
497 	}
498 	return 0;
499 }
500 
501 static int release_engines(struct device *dev,
502 			   struct otx2_cpt_eng_grp_info *grp)
503 {
504 	int i, ret = 0;
505 
506 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
507 		if (!grp->engs[i].type)
508 			continue;
509 
510 		if (grp->engs[i].count > 0) {
511 			ret = update_engines_avail_count(dev, &grp->g->avail,
512 							 &grp->engs[i],
513 							 grp->engs[i].count);
514 			if (ret)
515 				return ret;
516 		}
517 
518 		grp->engs[i].type = 0;
519 		grp->engs[i].count = 0;
520 		grp->engs[i].offset = 0;
521 		grp->engs[i].ucode = NULL;
522 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
523 	}
524 	return 0;
525 }
526 
527 static int do_reserve_engines(struct device *dev,
528 			      struct otx2_cpt_eng_grp_info *grp,
529 			      struct otx2_cpt_engines *req_engs)
530 {
531 	struct otx2_cpt_engs_rsvd *engs = NULL;
532 	int i, ret;
533 
534 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
535 		if (!grp->engs[i].type) {
536 			engs = &grp->engs[i];
537 			break;
538 		}
539 	}
540 
541 	if (!engs)
542 		return -ENOMEM;
543 
544 	engs->type = req_engs->type;
545 	engs->count = req_engs->count;
546 
547 	ret = update_engines_offset(dev, &grp->g->avail, engs);
548 	if (ret)
549 		return ret;
550 
551 	if (engs->count > 0) {
552 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
553 						 -engs->count);
554 		if (ret)
555 			return ret;
556 	}
557 
558 	return 0;
559 }
560 
561 static int check_engines_availability(struct device *dev,
562 				      struct otx2_cpt_eng_grp_info *grp,
563 				      struct otx2_cpt_engines *req_eng)
564 {
565 	int avail_cnt = 0;
566 
567 	switch (req_eng->type) {
568 	case OTX2_CPT_SE_TYPES:
569 		avail_cnt = grp->g->avail.se_cnt;
570 		break;
571 
572 	case OTX2_CPT_IE_TYPES:
573 		avail_cnt = grp->g->avail.ie_cnt;
574 		break;
575 
576 	case OTX2_CPT_AE_TYPES:
577 		avail_cnt = grp->g->avail.ae_cnt;
578 		break;
579 
580 	default:
581 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
582 		return -EINVAL;
583 	}
584 
585 	if (avail_cnt < req_eng->count) {
586 		dev_err(dev,
587 			"Error available %s engines %d < than requested %d\n",
588 			get_eng_type_str(req_eng->type),
589 			avail_cnt, req_eng->count);
590 		return -EBUSY;
591 	}
592 	return 0;
593 }
594 
595 static int reserve_engines(struct device *dev,
596 			   struct otx2_cpt_eng_grp_info *grp,
597 			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
598 {
599 	int i, ret = 0;
600 
601 	/* Validate if a number of requested engines are available */
602 	for (i = 0; i < ucodes_cnt; i++) {
603 		ret = check_engines_availability(dev, grp, &req_engs[i]);
604 		if (ret)
605 			return ret;
606 	}
607 
608 	/* Reserve requested engines for this engine group */
609 	for (i = 0; i < ucodes_cnt; i++) {
610 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
611 		if (ret)
612 			return ret;
613 	}
614 	return 0;
615 }
616 
617 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
618 {
619 	if (ucode->va) {
620 		dma_free_coherent(dev, ucode->size, ucode->va, ucode->dma);
621 		ucode->va = NULL;
622 		ucode->dma = 0;
623 		ucode->size = 0;
624 	}
625 
626 	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
627 	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
628 	set_ucode_filename(ucode, "");
629 	ucode->type = 0;
630 }
631 
632 static int copy_ucode_to_dma_mem(struct device *dev,
633 				 struct otx2_cpt_ucode *ucode,
634 				 const u8 *ucode_data)
635 {
636 	u32 i;
637 
638 	/*  Allocate DMAable space */
639 	ucode->va = dma_alloc_coherent(dev, ucode->size, &ucode->dma,
640 				       GFP_KERNEL);
641 	if (!ucode->va)
642 		return -ENOMEM;
643 
644 	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
645 	       ucode->size);
646 
647 	/* Byte swap 64-bit */
648 	for (i = 0; i < (ucode->size / 8); i++)
649 		cpu_to_be64s(&((u64 *)ucode->va)[i]);
650 	/*  Ucode needs 16-bit swap */
651 	for (i = 0; i < (ucode->size / 2); i++)
652 		cpu_to_be16s(&((u16 *)ucode->va)[i]);
653 	return 0;
654 }
655 
656 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
657 			  void *obj)
658 {
659 	int ret;
660 
661 	/* Point microcode to each core of the group */
662 	ret = cpt_set_ucode_base(eng_grp, obj);
663 	if (ret)
664 		return ret;
665 
666 	/* Attach the cores to the group and enable them */
667 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
668 
669 	return ret;
670 }
671 
672 static int disable_eng_grp(struct device *dev,
673 			   struct otx2_cpt_eng_grp_info *eng_grp,
674 			   void *obj)
675 {
676 	int i, ret;
677 
678 	/* Disable all engines used by this group */
679 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
680 	if (ret)
681 		return ret;
682 
683 	/* Unload ucode used by this engine group */
684 	ucode_unload(dev, &eng_grp->ucode[0]);
685 	ucode_unload(dev, &eng_grp->ucode[1]);
686 
687 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
688 		if (!eng_grp->engs[i].type)
689 			continue;
690 
691 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
692 	}
693 
694 	/* Clear UCODE_BASE register for each engine used by this group */
695 	ret = cpt_set_ucode_base(eng_grp, obj);
696 
697 	return ret;
698 }
699 
700 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
701 				    struct otx2_cpt_eng_grp_info *src_grp)
702 {
703 	/* Setup fields for engine group which is mirrored */
704 	src_grp->mirror.is_ena = false;
705 	src_grp->mirror.idx = 0;
706 	src_grp->mirror.ref_count++;
707 
708 	/* Setup fields for mirroring engine group */
709 	dst_grp->mirror.is_ena = true;
710 	dst_grp->mirror.idx = src_grp->idx;
711 	dst_grp->mirror.ref_count = 0;
712 }
713 
714 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
715 {
716 	struct otx2_cpt_eng_grp_info *src_grp;
717 
718 	if (!dst_grp->mirror.is_ena)
719 		return;
720 
721 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
722 
723 	src_grp->mirror.ref_count--;
724 	dst_grp->mirror.is_ena = false;
725 	dst_grp->mirror.idx = 0;
726 	dst_grp->mirror.ref_count = 0;
727 }
728 
729 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
730 				  struct otx2_cpt_engines *engs, int engs_cnt)
731 {
732 	struct otx2_cpt_engs_rsvd *mirrored_engs;
733 	int i;
734 
735 	for (i = 0; i < engs_cnt; i++) {
736 		mirrored_engs = find_engines_by_type(mirror_eng_grp,
737 						     engs[i].type);
738 		if (!mirrored_engs)
739 			continue;
740 
741 		/*
742 		 * If mirrored group has this type of engines attached then
743 		 * there are 3 scenarios possible:
744 		 * 1) mirrored_engs.count == engs[i].count then all engines
745 		 * from mirrored engine group will be shared with this engine
746 		 * group
747 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
748 		 * engines from mirrored engine group will be shared with this
749 		 * engine group
750 		 * 3) mirrored_engs.count < engs[i].count then all engines
751 		 * from mirrored engine group will be shared with this group
752 		 * and additional engines will be reserved for exclusively use
753 		 * by this engine group
754 		 */
755 		engs[i].count -= mirrored_engs->count;
756 	}
757 }
758 
759 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
760 					struct otx2_cpt_eng_grp_info *grp)
761 {
762 	struct otx2_cpt_eng_grps *eng_grps = grp->g;
763 	int i;
764 
765 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
766 		if (!eng_grps->grp[i].is_enabled)
767 			continue;
768 		if (eng_grps->grp[i].ucode[0].type &&
769 		    eng_grps->grp[i].ucode[1].type)
770 			continue;
771 		if (grp->idx == i)
772 			continue;
773 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
774 				 grp->ucode[0].ver_str,
775 				 OTX2_CPT_UCODE_VER_STR_SZ))
776 			return &eng_grps->grp[i];
777 	}
778 
779 	return NULL;
780 }
781 
782 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
783 					struct otx2_cpt_eng_grps *eng_grps)
784 {
785 	int i;
786 
787 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
788 		if (!eng_grps->grp[i].is_enabled)
789 			return &eng_grps->grp[i];
790 	}
791 	return NULL;
792 }
793 
794 static int eng_grp_update_masks(struct device *dev,
795 				struct otx2_cpt_eng_grp_info *eng_grp)
796 {
797 	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
798 	struct otx2_cpt_bitmap tmp_bmap = { {0} };
799 	int i, j, cnt, max_cnt;
800 	int bit;
801 
802 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
803 		engs = &eng_grp->engs[i];
804 		if (!engs->type)
805 			continue;
806 		if (engs->count <= 0)
807 			continue;
808 
809 		switch (engs->type) {
810 		case OTX2_CPT_SE_TYPES:
811 			max_cnt = eng_grp->g->avail.max_se_cnt;
812 			break;
813 
814 		case OTX2_CPT_IE_TYPES:
815 			max_cnt = eng_grp->g->avail.max_ie_cnt;
816 			break;
817 
818 		case OTX2_CPT_AE_TYPES:
819 			max_cnt = eng_grp->g->avail.max_ae_cnt;
820 			break;
821 
822 		default:
823 			dev_err(dev, "Invalid engine type %d\n", engs->type);
824 			return -EINVAL;
825 		}
826 
827 		cnt = engs->count;
828 		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
829 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
830 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
831 			if (!eng_grp->g->eng_ref_cnt[j]) {
832 				bitmap_set(tmp_bmap.bits, j, 1);
833 				cnt--;
834 				if (!cnt)
835 					break;
836 			}
837 		}
838 
839 		if (cnt)
840 			return -ENOSPC;
841 
842 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
843 	}
844 
845 	if (!eng_grp->mirror.is_ena)
846 		return 0;
847 
848 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
849 		engs = &eng_grp->engs[i];
850 		if (!engs->type)
851 			continue;
852 
853 		mirrored_engs = find_engines_by_type(
854 					&eng_grp->g->grp[eng_grp->mirror.idx],
855 					engs->type);
856 		WARN_ON(!mirrored_engs && engs->count <= 0);
857 		if (!mirrored_engs)
858 			continue;
859 
860 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
861 			    eng_grp->g->engs_num);
862 		if (engs->count < 0) {
863 			bit = find_first_bit(mirrored_engs->bmap,
864 					     eng_grp->g->engs_num);
865 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
866 		}
867 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
868 			  eng_grp->g->engs_num);
869 	}
870 	return 0;
871 }
872 
873 static int delete_engine_group(struct device *dev,
874 			       struct otx2_cpt_eng_grp_info *eng_grp)
875 {
876 	int ret;
877 
878 	if (!eng_grp->is_enabled)
879 		return 0;
880 
881 	if (eng_grp->mirror.ref_count)
882 		return -EINVAL;
883 
884 	/* Removing engine group mirroring if enabled */
885 	remove_eng_grp_mirroring(eng_grp);
886 
887 	/* Disable engine group */
888 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
889 	if (ret)
890 		return ret;
891 
892 	/* Release all engines held by this engine group */
893 	ret = release_engines(dev, eng_grp);
894 	if (ret)
895 		return ret;
896 
897 	eng_grp->is_enabled = false;
898 
899 	return 0;
900 }
901 
902 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
903 {
904 	struct otx2_cpt_ucode *ucode;
905 
906 	if (eng_grp->mirror.is_ena)
907 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
908 	else
909 		ucode = &eng_grp->ucode[0];
910 	WARN_ON(!eng_grp->engs[0].type);
911 	eng_grp->engs[0].ucode = ucode;
912 
913 	if (eng_grp->engs[1].type) {
914 		if (is_2nd_ucode_used(eng_grp))
915 			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
916 		else
917 			eng_grp->engs[1].ucode = ucode;
918 	}
919 }
920 
921 static int create_engine_group(struct device *dev,
922 			       struct otx2_cpt_eng_grps *eng_grps,
923 			       struct otx2_cpt_engines *engs, int ucodes_cnt,
924 			       void *ucode_data[], int is_print)
925 {
926 	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
927 	struct otx2_cpt_eng_grp_info *eng_grp;
928 	struct otx2_cpt_uc_info_t *uc_info;
929 	int i, ret = 0;
930 
931 	/* Find engine group which is not used */
932 	eng_grp = find_unused_eng_grp(eng_grps);
933 	if (!eng_grp) {
934 		dev_err(dev, "Error all engine groups are being used\n");
935 		return -ENOSPC;
936 	}
937 	/* Load ucode */
938 	for (i = 0; i < ucodes_cnt; i++) {
939 		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
940 		eng_grp->ucode[i] = uc_info->ucode;
941 		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
942 					    uc_info->fw->data);
943 		if (ret)
944 			goto unload_ucode;
945 	}
946 
947 	/* Check if this group mirrors another existing engine group */
948 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
949 	if (mirrored_eng_grp) {
950 		/* Setup mirroring */
951 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
952 
953 		/*
954 		 * Update count of requested engines because some
955 		 * of them might be shared with mirrored group
956 		 */
957 		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
958 	}
959 	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
960 	if (ret)
961 		goto unload_ucode;
962 
963 	/* Update ucode pointers used by engines */
964 	update_ucode_ptrs(eng_grp);
965 
966 	/* Update engine masks used by this group */
967 	ret = eng_grp_update_masks(dev, eng_grp);
968 	if (ret)
969 		goto release_engs;
970 
971 	/* Enable engine group */
972 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
973 	if (ret)
974 		goto release_engs;
975 
976 	/*
977 	 * If this engine group mirrors another engine group
978 	 * then we need to unload ucode as we will use ucode
979 	 * from mirrored engine group
980 	 */
981 	if (eng_grp->mirror.is_ena)
982 		ucode_unload(dev, &eng_grp->ucode[0]);
983 
984 	eng_grp->is_enabled = true;
985 
986 	if (!is_print)
987 		return 0;
988 
989 	if (mirrored_eng_grp)
990 		dev_info(dev,
991 			 "Engine_group%d: reuse microcode %s from group %d\n",
992 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
993 			 mirrored_eng_grp->idx);
994 	else
995 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
996 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
997 	if (is_2nd_ucode_used(eng_grp))
998 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
999 			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1000 
1001 	return 0;
1002 
1003 release_engs:
1004 	release_engines(dev, eng_grp);
1005 unload_ucode:
1006 	ucode_unload(dev, &eng_grp->ucode[0]);
1007 	ucode_unload(dev, &eng_grp->ucode[1]);
1008 	return ret;
1009 }
1010 
1011 static void delete_engine_grps(struct pci_dev *pdev,
1012 			       struct otx2_cpt_eng_grps *eng_grps)
1013 {
1014 	int i;
1015 
1016 	/* First delete all mirroring engine groups */
1017 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1018 		if (eng_grps->grp[i].mirror.is_ena)
1019 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1020 
1021 	/* Delete remaining engine groups */
1022 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1023 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1024 }
1025 
1026 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1027 {
1028 
1029 	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1030 	struct otx2_cpt_eng_grp_info *grp;
1031 	int i;
1032 
1033 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1034 		grp = &eng_grps->grp[i];
1035 		if (!grp->is_enabled)
1036 			continue;
1037 
1038 		if (eng_type == OTX2_CPT_SE_TYPES) {
1039 			if (eng_grp_has_eng_type(grp, eng_type) &&
1040 			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1041 				eng_grp_num = i;
1042 				break;
1043 			}
1044 		} else {
1045 			if (eng_grp_has_eng_type(grp, eng_type)) {
1046 				eng_grp_num = i;
1047 				break;
1048 			}
1049 		}
1050 	}
1051 	return eng_grp_num;
1052 }
1053 
1054 int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
1055 			     struct otx2_cpt_eng_grps *eng_grps)
1056 {
1057 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1058 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1059 	struct fw_info_t fw_info;
1060 	int ret;
1061 
1062 	/*
1063 	 * We don't create engine groups if it was already
1064 	 * made (when user enabled VFs for the first time)
1065 	 */
1066 	if (eng_grps->is_grps_created)
1067 		return 0;
1068 
1069 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1070 	if (ret)
1071 		return ret;
1072 
1073 	/*
1074 	 * Create engine group with SE engines for kernel
1075 	 * crypto functionality (symmetric crypto)
1076 	 */
1077 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1078 	if (uc_info[0] == NULL) {
1079 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1080 		ret = -EINVAL;
1081 		goto release_fw;
1082 	}
1083 	engs[0].type = OTX2_CPT_SE_TYPES;
1084 	engs[0].count = eng_grps->avail.max_se_cnt;
1085 
1086 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1087 				  (void **) uc_info, 1);
1088 	if (ret)
1089 		goto release_fw;
1090 
1091 	/*
1092 	 * Create engine group with SE+IE engines for IPSec.
1093 	 * All SE engines will be shared with engine group 0.
1094 	 */
1095 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1096 	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1097 
1098 	if (uc_info[1] == NULL) {
1099 		dev_err(&pdev->dev, "Unable to find firmware for IE");
1100 		ret = -EINVAL;
1101 		goto delete_eng_grp;
1102 	}
1103 	engs[0].type = OTX2_CPT_SE_TYPES;
1104 	engs[0].count = eng_grps->avail.max_se_cnt;
1105 	engs[1].type = OTX2_CPT_IE_TYPES;
1106 	engs[1].count = eng_grps->avail.max_ie_cnt;
1107 
1108 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1109 				  (void **) uc_info, 1);
1110 	if (ret)
1111 		goto delete_eng_grp;
1112 
1113 	/*
1114 	 * Create engine group with AE engines for asymmetric
1115 	 * crypto functionality.
1116 	 */
1117 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1118 	if (uc_info[0] == NULL) {
1119 		dev_err(&pdev->dev, "Unable to find firmware for AE");
1120 		ret = -EINVAL;
1121 		goto delete_eng_grp;
1122 	}
1123 	engs[0].type = OTX2_CPT_AE_TYPES;
1124 	engs[0].count = eng_grps->avail.max_ae_cnt;
1125 
1126 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1127 				  (void **) uc_info, 1);
1128 	if (ret)
1129 		goto delete_eng_grp;
1130 
1131 	eng_grps->is_grps_created = true;
1132 
1133 	cpt_ucode_release_fw(&fw_info);
1134 	return 0;
1135 
1136 delete_eng_grp:
1137 	delete_engine_grps(pdev, eng_grps);
1138 release_fw:
1139 	cpt_ucode_release_fw(&fw_info);
1140 	return ret;
1141 }
1142 
1143 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1144 {
1145 	int i, ret, busy, total_cores;
1146 	int timeout = 10;
1147 	u64 reg = 0;
1148 
1149 	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1150 		      cptpf->eng_grps.avail.max_ie_cnt +
1151 		      cptpf->eng_grps.avail.max_ae_cnt;
1152 
1153 	/* Disengage the cores from groups */
1154 	for (i = 0; i < total_cores; i++) {
1155 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1156 						CPT_AF_EXEX_CTL2(i), 0x0);
1157 		if (ret)
1158 			return ret;
1159 
1160 		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1161 	}
1162 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1163 	if (ret)
1164 		return ret;
1165 
1166 	/* Wait for cores to become idle */
1167 	do {
1168 		busy = 0;
1169 		usleep_range(10000, 20000);
1170 		if (timeout-- < 0)
1171 			return -EBUSY;
1172 
1173 		for (i = 0; i < total_cores; i++) {
1174 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1175 						   cptpf->pdev,
1176 						   CPT_AF_EXEX_STS(i), &reg);
1177 			if (ret)
1178 				return ret;
1179 
1180 			if (reg & 0x1) {
1181 				busy = 1;
1182 				break;
1183 			}
1184 		}
1185 	} while (busy);
1186 
1187 	/* Disable the cores */
1188 	for (i = 0; i < total_cores; i++) {
1189 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1190 						CPT_AF_EXEX_CTL(i), 0x0);
1191 		if (ret)
1192 			return ret;
1193 	}
1194 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1195 }
1196 
1197 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1198 			       struct otx2_cpt_eng_grps *eng_grps)
1199 {
1200 	struct otx2_cpt_eng_grp_info *grp;
1201 	int i, j;
1202 
1203 	delete_engine_grps(pdev, eng_grps);
1204 	/* Release memory */
1205 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1206 		grp = &eng_grps->grp[i];
1207 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1208 			kfree(grp->engs[j].bmap);
1209 			grp->engs[j].bmap = NULL;
1210 		}
1211 	}
1212 }
1213 
1214 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1215 			   struct otx2_cpt_eng_grps *eng_grps)
1216 {
1217 	struct otx2_cpt_eng_grp_info *grp;
1218 	int i, j, ret;
1219 
1220 	eng_grps->obj = pci_get_drvdata(pdev);
1221 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1222 	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1223 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1224 
1225 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1226 			     eng_grps->avail.max_ie_cnt +
1227 			     eng_grps->avail.max_ae_cnt;
1228 	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1229 		dev_err(&pdev->dev,
1230 			"Number of engines %d > than max supported %d\n",
1231 			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1232 		ret = -EINVAL;
1233 		goto cleanup_eng_grps;
1234 	}
1235 
1236 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1237 		grp = &eng_grps->grp[i];
1238 		grp->g = eng_grps;
1239 		grp->idx = i;
1240 
1241 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1242 			grp->engs[j].bmap =
1243 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1244 					sizeof(long), GFP_KERNEL);
1245 			if (!grp->engs[j].bmap) {
1246 				ret = -ENOMEM;
1247 				goto cleanup_eng_grps;
1248 			}
1249 		}
1250 	}
1251 	return 0;
1252 
1253 cleanup_eng_grps:
1254 	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1255 	return ret;
1256 }
1257 
1258 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1259 					  struct otx2_cpt_eng_grps *eng_grps)
1260 {
1261 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1262 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1263 	struct fw_info_t fw_info;
1264 	int ret;
1265 
1266 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1267 	if (ret)
1268 		return ret;
1269 
1270 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1271 	if (uc_info[0] == NULL) {
1272 		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1273 		ret = -EINVAL;
1274 		goto release_fw;
1275 	}
1276 	engs[0].type = OTX2_CPT_AE_TYPES;
1277 	engs[0].count = 2;
1278 
1279 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1280 				  (void **) uc_info, 0);
1281 	if (ret)
1282 		goto release_fw;
1283 
1284 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1285 	if (uc_info[0] == NULL) {
1286 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1287 		ret = -EINVAL;
1288 		goto delete_eng_grp;
1289 	}
1290 	engs[0].type = OTX2_CPT_SE_TYPES;
1291 	engs[0].count = 2;
1292 
1293 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1294 				  (void **) uc_info, 0);
1295 	if (ret)
1296 		goto delete_eng_grp;
1297 
1298 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1299 	if (uc_info[0] == NULL) {
1300 		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1301 		ret = -EINVAL;
1302 		goto delete_eng_grp;
1303 	}
1304 	engs[0].type = OTX2_CPT_IE_TYPES;
1305 	engs[0].count = 2;
1306 
1307 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1308 				  (void **) uc_info, 0);
1309 	if (ret)
1310 		goto delete_eng_grp;
1311 
1312 	cpt_ucode_release_fw(&fw_info);
1313 	return 0;
1314 
1315 delete_eng_grp:
1316 	delete_engine_grps(pdev, eng_grps);
1317 release_fw:
1318 	cpt_ucode_release_fw(&fw_info);
1319 	return ret;
1320 }
1321 
1322 /*
1323  * Get CPT HW capabilities using LOAD_FVC operation.
1324  */
1325 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1326 {
1327 	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1328 	struct otx2_cpt_iq_command iq_cmd;
1329 	union otx2_cpt_opcode opcode;
1330 	union otx2_cpt_res_s *result;
1331 	union otx2_cpt_inst_s inst;
1332 	dma_addr_t rptr_baddr;
1333 	struct pci_dev *pdev;
1334 	u32 len, compl_rlen;
1335 	int ret, etype;
1336 	void *rptr;
1337 
1338 	/*
1339 	 * We don't get capabilities if it was already done
1340 	 * (when user enabled VFs for the first time)
1341 	 */
1342 	if (cptpf->is_eng_caps_discovered)
1343 		return 0;
1344 
1345 	pdev = cptpf->pdev;
1346 	/*
1347 	 * Create engine groups for each type to submit LOAD_FVC op and
1348 	 * get engine's capabilities.
1349 	 */
1350 	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1351 	if (ret)
1352 		goto delete_grps;
1353 
1354 	lfs->pdev = pdev;
1355 	lfs->reg_base = cptpf->reg_base;
1356 	lfs->mbox = &cptpf->afpf_mbox;
1357 	ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1358 			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1359 	if (ret)
1360 		goto delete_grps;
1361 
1362 	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1363 	len = compl_rlen + LOADFVC_RLEN;
1364 
1365 	result = kzalloc(len, GFP_KERNEL);
1366 	if (!result) {
1367 		ret = -ENOMEM;
1368 		goto lf_cleanup;
1369 	}
1370 	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1371 				    DMA_BIDIRECTIONAL);
1372 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1373 		dev_err(&pdev->dev, "DMA mapping failed\n");
1374 		ret = -EFAULT;
1375 		goto free_result;
1376 	}
1377 	rptr = (u8 *)result + compl_rlen;
1378 
1379 	/* Fill in the command */
1380 	opcode.s.major = LOADFVC_MAJOR_OP;
1381 	opcode.s.minor = LOADFVC_MINOR_OP;
1382 
1383 	iq_cmd.cmd.u = 0;
1384 	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1385 
1386 	/* 64-bit swap for microcode data reads, not needed for addresses */
1387 	cpu_to_be64s(&iq_cmd.cmd.u);
1388 	iq_cmd.dptr = 0;
1389 	iq_cmd.rptr = rptr_baddr + compl_rlen;
1390 	iq_cmd.cptr.u = 0;
1391 
1392 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1393 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1394 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1395 							 etype);
1396 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1397 		otx2_cpt_send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1398 
1399 		while (result->s.compcode == OTX2_CPT_COMPLETION_CODE_INIT)
1400 			cpu_relax();
1401 
1402 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1403 	}
1404 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1405 	cptpf->is_eng_caps_discovered = true;
1406 
1407 free_result:
1408 	kfree(result);
1409 lf_cleanup:
1410 	otx2_cptlf_shutdown(&cptpf->lfs);
1411 delete_grps:
1412 	delete_engine_grps(pdev, &cptpf->eng_grps);
1413 
1414 	return ret;
1415 }
1416