1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include "otx2_cptpf_ucode.h"
7 #include "otx2_cpt_common.h"
8 #include "otx2_cptpf.h"
9 #include "otx2_cptlf.h"
10 #include "otx2_cpt_reqmgr.h"
11 #include "rvu_reg.h"
12 
13 #define CSR_DELAY 30
14 
15 #define LOADFVC_RLEN 8
16 #define LOADFVC_MAJOR_OP 0x01
17 #define LOADFVC_MINOR_OP 0x08
18 
19 struct fw_info_t {
20 	struct list_head ucodes;
21 };
22 
23 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
24 					struct otx2_cpt_eng_grp_info *eng_grp)
25 {
26 	struct otx2_cpt_bitmap bmap = { {0} };
27 	bool found = false;
28 	int i;
29 
30 	if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
31 		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
32 			eng_grp->g->engs_num);
33 		return bmap;
34 	}
35 
36 	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
37 		if (eng_grp->engs[i].type) {
38 			bitmap_or(bmap.bits, bmap.bits,
39 				  eng_grp->engs[i].bmap,
40 				  eng_grp->g->engs_num);
41 			bmap.size = eng_grp->g->engs_num;
42 			found = true;
43 		}
44 	}
45 
46 	if (!found)
47 		dev_err(dev, "No engines reserved for engine group %d\n",
48 			eng_grp->idx);
49 	return bmap;
50 }
51 
52 static int is_eng_type(int val, int eng_type)
53 {
54 	return val & (1 << eng_type);
55 }
56 
57 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
58 {
59 	if (eng_grp->ucode[1].type)
60 		return true;
61 	else
62 		return false;
63 }
64 
65 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
66 			       const char *filename)
67 {
68 	strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
69 }
70 
71 static char *get_eng_type_str(int eng_type)
72 {
73 	char *str = "unknown";
74 
75 	switch (eng_type) {
76 	case OTX2_CPT_SE_TYPES:
77 		str = "SE";
78 		break;
79 
80 	case OTX2_CPT_IE_TYPES:
81 		str = "IE";
82 		break;
83 
84 	case OTX2_CPT_AE_TYPES:
85 		str = "AE";
86 		break;
87 	}
88 	return str;
89 }
90 
91 static char *get_ucode_type_str(int ucode_type)
92 {
93 	char *str = "unknown";
94 
95 	switch (ucode_type) {
96 	case (1 << OTX2_CPT_SE_TYPES):
97 		str = "SE";
98 		break;
99 
100 	case (1 << OTX2_CPT_IE_TYPES):
101 		str = "IE";
102 		break;
103 
104 	case (1 << OTX2_CPT_AE_TYPES):
105 		str = "AE";
106 		break;
107 
108 	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
109 		str = "SE+IPSEC";
110 		break;
111 	}
112 	return str;
113 }
114 
115 static int get_ucode_type(struct device *dev,
116 			  struct otx2_cpt_ucode_hdr *ucode_hdr,
117 			  int *ucode_type)
118 {
119 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
120 	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
121 	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
122 	struct pci_dev *pdev = cptpf->pdev;
123 	int i, val = 0;
124 	u8 nn;
125 
126 	strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
127 	for (i = 0; i < strlen(tmp_ver_str); i++)
128 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
129 
130 	sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
131 	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
132 		return -EINVAL;
133 
134 	nn = ucode_hdr->ver_num.nn;
135 	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
136 	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
137 	     nn == OTX2_CPT_SE_UC_TYPE3))
138 		val |= 1 << OTX2_CPT_SE_TYPES;
139 	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
140 	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
141 	     nn == OTX2_CPT_IE_UC_TYPE3))
142 		val |= 1 << OTX2_CPT_IE_TYPES;
143 	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
144 	    nn == OTX2_CPT_AE_UC_TYPE)
145 		val |= 1 << OTX2_CPT_AE_TYPES;
146 
147 	*ucode_type = val;
148 
149 	if (!val)
150 		return -EINVAL;
151 
152 	return 0;
153 }
154 
155 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
156 			      dma_addr_t dma_addr, int blkaddr)
157 {
158 	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
159 				     CPT_AF_EXEX_UCODE_BASE(eng),
160 				     (u64)dma_addr, blkaddr);
161 }
162 
163 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
164 			       struct otx2_cptpf_dev *cptpf, int blkaddr)
165 {
166 	struct otx2_cpt_engs_rsvd *engs;
167 	dma_addr_t dma_addr;
168 	int i, bit, ret;
169 
170 	/* Set PF number for microcode fetches */
171 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
172 				    CPT_AF_PF_FUNC,
173 				    cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
174 	if (ret)
175 		return ret;
176 
177 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
178 		engs = &eng_grp->engs[i];
179 		if (!engs->type)
180 			continue;
181 
182 		dma_addr = engs->ucode->dma;
183 
184 		/*
185 		 * Set UCODE_BASE only for the cores which are not used,
186 		 * other cores should have already valid UCODE_BASE set
187 		 */
188 		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
189 			if (!eng_grp->g->eng_ref_cnt[bit]) {
190 				ret = __write_ucode_base(cptpf, bit, dma_addr,
191 							 blkaddr);
192 				if (ret)
193 					return ret;
194 			}
195 	}
196 	return 0;
197 }
198 
199 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
200 {
201 	struct otx2_cptpf_dev *cptpf = obj;
202 	int ret;
203 
204 	if (cptpf->has_cpt1) {
205 		ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
206 		if (ret)
207 			return ret;
208 	}
209 	return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
210 }
211 
212 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
213 					 struct otx2_cptpf_dev *cptpf,
214 					 struct otx2_cpt_bitmap bmap,
215 					 int blkaddr)
216 {
217 	int i, timeout = 10;
218 	int busy, ret;
219 	u64 reg = 0;
220 
221 	/* Detach the cores from group */
222 	for_each_set_bit(i, bmap.bits, bmap.size) {
223 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
224 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
225 		if (ret)
226 			return ret;
227 
228 		if (reg & (1ull << eng_grp->idx)) {
229 			eng_grp->g->eng_ref_cnt[i]--;
230 			reg &= ~(1ull << eng_grp->idx);
231 
232 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
233 						    cptpf->pdev,
234 						    CPT_AF_EXEX_CTL2(i), reg,
235 						    blkaddr);
236 			if (ret)
237 				return ret;
238 		}
239 	}
240 
241 	/* Wait for cores to become idle */
242 	do {
243 		busy = 0;
244 		usleep_range(10000, 20000);
245 		if (timeout-- < 0)
246 			return -EBUSY;
247 
248 		for_each_set_bit(i, bmap.bits, bmap.size) {
249 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
250 						   cptpf->pdev,
251 						   CPT_AF_EXEX_STS(i), &reg,
252 						   blkaddr);
253 			if (ret)
254 				return ret;
255 
256 			if (reg & 0x1) {
257 				busy = 1;
258 				break;
259 			}
260 		}
261 	} while (busy);
262 
263 	/* Disable the cores only if they are not used anymore */
264 	for_each_set_bit(i, bmap.bits, bmap.size) {
265 		if (!eng_grp->g->eng_ref_cnt[i]) {
266 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
267 						    cptpf->pdev,
268 						    CPT_AF_EXEX_CTL(i), 0x0,
269 						    blkaddr);
270 			if (ret)
271 				return ret;
272 		}
273 	}
274 
275 	return 0;
276 }
277 
278 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
279 					void *obj)
280 {
281 	struct otx2_cptpf_dev *cptpf = obj;
282 	struct otx2_cpt_bitmap bmap;
283 	int ret;
284 
285 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
286 	if (!bmap.size)
287 		return -EINVAL;
288 
289 	if (cptpf->has_cpt1) {
290 		ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
291 						    BLKADDR_CPT1);
292 		if (ret)
293 			return ret;
294 	}
295 	return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
296 					     BLKADDR_CPT0);
297 }
298 
299 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
300 					struct otx2_cptpf_dev *cptpf,
301 					struct otx2_cpt_bitmap bmap,
302 					int blkaddr)
303 {
304 	u64 reg = 0;
305 	int i, ret;
306 
307 	/* Attach the cores to the group */
308 	for_each_set_bit(i, bmap.bits, bmap.size) {
309 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
310 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
311 		if (ret)
312 			return ret;
313 
314 		if (!(reg & (1ull << eng_grp->idx))) {
315 			eng_grp->g->eng_ref_cnt[i]++;
316 			reg |= 1ull << eng_grp->idx;
317 
318 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
319 						    cptpf->pdev,
320 						    CPT_AF_EXEX_CTL2(i), reg,
321 						    blkaddr);
322 			if (ret)
323 				return ret;
324 		}
325 	}
326 
327 	/* Enable the cores */
328 	for_each_set_bit(i, bmap.bits, bmap.size) {
329 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
330 						CPT_AF_EXEX_CTL(i), 0x1,
331 						blkaddr);
332 		if (ret)
333 			return ret;
334 	}
335 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
336 }
337 
338 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
339 				       void *obj)
340 {
341 	struct otx2_cptpf_dev *cptpf = obj;
342 	struct otx2_cpt_bitmap bmap;
343 	int ret;
344 
345 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
346 	if (!bmap.size)
347 		return -EINVAL;
348 
349 	if (cptpf->has_cpt1) {
350 		ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
351 						   BLKADDR_CPT1);
352 		if (ret)
353 			return ret;
354 	}
355 	return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
356 }
357 
358 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
359 		   char *filename)
360 {
361 	struct otx2_cpt_ucode_hdr *ucode_hdr;
362 	struct otx2_cpt_uc_info_t *uc_info;
363 	int ucode_type, ucode_size;
364 	int ret;
365 
366 	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
367 	if (!uc_info)
368 		return -ENOMEM;
369 
370 	ret = request_firmware(&uc_info->fw, filename, dev);
371 	if (ret)
372 		goto free_uc_info;
373 
374 	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
375 	ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
376 	if (ret)
377 		goto release_fw;
378 
379 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
380 	if (!ucode_size) {
381 		dev_err(dev, "Ucode %s invalid size\n", filename);
382 		ret = -EINVAL;
383 		goto release_fw;
384 	}
385 
386 	set_ucode_filename(&uc_info->ucode, filename);
387 	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
388 	       OTX2_CPT_UCODE_VER_STR_SZ);
389 	uc_info->ucode.ver_num = ucode_hdr->ver_num;
390 	uc_info->ucode.type = ucode_type;
391 	uc_info->ucode.size = ucode_size;
392 	list_add_tail(&uc_info->list, &fw_info->ucodes);
393 
394 	return 0;
395 
396 release_fw:
397 	release_firmware(uc_info->fw);
398 free_uc_info:
399 	kfree(uc_info);
400 	return ret;
401 }
402 
403 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
404 {
405 	struct otx2_cpt_uc_info_t *curr, *temp;
406 
407 	if (!fw_info)
408 		return;
409 
410 	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
411 		list_del(&curr->list);
412 		release_firmware(curr->fw);
413 		kfree(curr);
414 	}
415 }
416 
417 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
418 					    int ucode_type)
419 {
420 	struct otx2_cpt_uc_info_t *curr;
421 
422 	list_for_each_entry(curr, &fw_info->ucodes, list) {
423 		if (!is_eng_type(curr->ucode.type, ucode_type))
424 			continue;
425 
426 		return curr;
427 	}
428 	return NULL;
429 }
430 
431 static void print_uc_info(struct fw_info_t *fw_info)
432 {
433 	struct otx2_cpt_uc_info_t *curr;
434 
435 	list_for_each_entry(curr, &fw_info->ucodes, list) {
436 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
437 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
438 		pr_debug("Ucode version %d.%d.%d.%d\n",
439 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
440 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
441 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
442 			 get_ucode_type_str(curr->ucode.type));
443 		pr_debug("Ucode size %d\n", curr->ucode.size);
444 		pr_debug("Ucode ptr %p\n", curr->fw->data);
445 	}
446 }
447 
448 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
449 {
450 	char filename[OTX2_CPT_NAME_LENGTH];
451 	char eng_type[8] = {0};
452 	int ret, e, i;
453 
454 	INIT_LIST_HEAD(&fw_info->ucodes);
455 
456 	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
457 		strcpy(eng_type, get_eng_type_str(e));
458 		for (i = 0; i < strlen(eng_type); i++)
459 			eng_type[i] = tolower(eng_type[i]);
460 
461 		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
462 			 pdev->revision, eng_type);
463 		/* Request firmware for each engine type */
464 		ret = load_fw(&pdev->dev, fw_info, filename);
465 		if (ret)
466 			goto release_fw;
467 	}
468 	print_uc_info(fw_info);
469 	return 0;
470 
471 release_fw:
472 	cpt_ucode_release_fw(fw_info);
473 	return ret;
474 }
475 
476 static struct otx2_cpt_engs_rsvd *find_engines_by_type(
477 					struct otx2_cpt_eng_grp_info *eng_grp,
478 					int eng_type)
479 {
480 	int i;
481 
482 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
483 		if (!eng_grp->engs[i].type)
484 			continue;
485 
486 		if (eng_grp->engs[i].type == eng_type)
487 			return &eng_grp->engs[i];
488 	}
489 	return NULL;
490 }
491 
492 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
493 				int eng_type)
494 {
495 	struct otx2_cpt_engs_rsvd *engs;
496 
497 	engs = find_engines_by_type(eng_grp, eng_type);
498 
499 	return (engs != NULL ? 1 : 0);
500 }
501 
502 static int update_engines_avail_count(struct device *dev,
503 				      struct otx2_cpt_engs_available *avail,
504 				      struct otx2_cpt_engs_rsvd *engs, int val)
505 {
506 	switch (engs->type) {
507 	case OTX2_CPT_SE_TYPES:
508 		avail->se_cnt += val;
509 		break;
510 
511 	case OTX2_CPT_IE_TYPES:
512 		avail->ie_cnt += val;
513 		break;
514 
515 	case OTX2_CPT_AE_TYPES:
516 		avail->ae_cnt += val;
517 		break;
518 
519 	default:
520 		dev_err(dev, "Invalid engine type %d\n", engs->type);
521 		return -EINVAL;
522 	}
523 	return 0;
524 }
525 
526 static int update_engines_offset(struct device *dev,
527 				 struct otx2_cpt_engs_available *avail,
528 				 struct otx2_cpt_engs_rsvd *engs)
529 {
530 	switch (engs->type) {
531 	case OTX2_CPT_SE_TYPES:
532 		engs->offset = 0;
533 		break;
534 
535 	case OTX2_CPT_IE_TYPES:
536 		engs->offset = avail->max_se_cnt;
537 		break;
538 
539 	case OTX2_CPT_AE_TYPES:
540 		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
541 		break;
542 
543 	default:
544 		dev_err(dev, "Invalid engine type %d\n", engs->type);
545 		return -EINVAL;
546 	}
547 	return 0;
548 }
549 
550 static int release_engines(struct device *dev,
551 			   struct otx2_cpt_eng_grp_info *grp)
552 {
553 	int i, ret = 0;
554 
555 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
556 		if (!grp->engs[i].type)
557 			continue;
558 
559 		if (grp->engs[i].count > 0) {
560 			ret = update_engines_avail_count(dev, &grp->g->avail,
561 							 &grp->engs[i],
562 							 grp->engs[i].count);
563 			if (ret)
564 				return ret;
565 		}
566 
567 		grp->engs[i].type = 0;
568 		grp->engs[i].count = 0;
569 		grp->engs[i].offset = 0;
570 		grp->engs[i].ucode = NULL;
571 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
572 	}
573 	return 0;
574 }
575 
576 static int do_reserve_engines(struct device *dev,
577 			      struct otx2_cpt_eng_grp_info *grp,
578 			      struct otx2_cpt_engines *req_engs)
579 {
580 	struct otx2_cpt_engs_rsvd *engs = NULL;
581 	int i, ret;
582 
583 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
584 		if (!grp->engs[i].type) {
585 			engs = &grp->engs[i];
586 			break;
587 		}
588 	}
589 
590 	if (!engs)
591 		return -ENOMEM;
592 
593 	engs->type = req_engs->type;
594 	engs->count = req_engs->count;
595 
596 	ret = update_engines_offset(dev, &grp->g->avail, engs);
597 	if (ret)
598 		return ret;
599 
600 	if (engs->count > 0) {
601 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
602 						 -engs->count);
603 		if (ret)
604 			return ret;
605 	}
606 
607 	return 0;
608 }
609 
610 static int check_engines_availability(struct device *dev,
611 				      struct otx2_cpt_eng_grp_info *grp,
612 				      struct otx2_cpt_engines *req_eng)
613 {
614 	int avail_cnt = 0;
615 
616 	switch (req_eng->type) {
617 	case OTX2_CPT_SE_TYPES:
618 		avail_cnt = grp->g->avail.se_cnt;
619 		break;
620 
621 	case OTX2_CPT_IE_TYPES:
622 		avail_cnt = grp->g->avail.ie_cnt;
623 		break;
624 
625 	case OTX2_CPT_AE_TYPES:
626 		avail_cnt = grp->g->avail.ae_cnt;
627 		break;
628 
629 	default:
630 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
631 		return -EINVAL;
632 	}
633 
634 	if (avail_cnt < req_eng->count) {
635 		dev_err(dev,
636 			"Error available %s engines %d < than requested %d\n",
637 			get_eng_type_str(req_eng->type),
638 			avail_cnt, req_eng->count);
639 		return -EBUSY;
640 	}
641 	return 0;
642 }
643 
644 static int reserve_engines(struct device *dev,
645 			   struct otx2_cpt_eng_grp_info *grp,
646 			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
647 {
648 	int i, ret = 0;
649 
650 	/* Validate if a number of requested engines are available */
651 	for (i = 0; i < ucodes_cnt; i++) {
652 		ret = check_engines_availability(dev, grp, &req_engs[i]);
653 		if (ret)
654 			return ret;
655 	}
656 
657 	/* Reserve requested engines for this engine group */
658 	for (i = 0; i < ucodes_cnt; i++) {
659 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
660 		if (ret)
661 			return ret;
662 	}
663 	return 0;
664 }
665 
666 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
667 {
668 	if (ucode->va) {
669 		dma_free_coherent(dev, ucode->size, ucode->va, ucode->dma);
670 		ucode->va = NULL;
671 		ucode->dma = 0;
672 		ucode->size = 0;
673 	}
674 
675 	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
676 	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
677 	set_ucode_filename(ucode, "");
678 	ucode->type = 0;
679 }
680 
681 static int copy_ucode_to_dma_mem(struct device *dev,
682 				 struct otx2_cpt_ucode *ucode,
683 				 const u8 *ucode_data)
684 {
685 	u32 i;
686 
687 	/*  Allocate DMAable space */
688 	ucode->va = dma_alloc_coherent(dev, ucode->size, &ucode->dma,
689 				       GFP_KERNEL);
690 	if (!ucode->va)
691 		return -ENOMEM;
692 
693 	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
694 	       ucode->size);
695 
696 	/* Byte swap 64-bit */
697 	for (i = 0; i < (ucode->size / 8); i++)
698 		cpu_to_be64s(&((u64 *)ucode->va)[i]);
699 	/*  Ucode needs 16-bit swap */
700 	for (i = 0; i < (ucode->size / 2); i++)
701 		cpu_to_be16s(&((u16 *)ucode->va)[i]);
702 	return 0;
703 }
704 
705 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
706 			  void *obj)
707 {
708 	int ret;
709 
710 	/* Point microcode to each core of the group */
711 	ret = cpt_set_ucode_base(eng_grp, obj);
712 	if (ret)
713 		return ret;
714 
715 	/* Attach the cores to the group and enable them */
716 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
717 
718 	return ret;
719 }
720 
721 static int disable_eng_grp(struct device *dev,
722 			   struct otx2_cpt_eng_grp_info *eng_grp,
723 			   void *obj)
724 {
725 	int i, ret;
726 
727 	/* Disable all engines used by this group */
728 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
729 	if (ret)
730 		return ret;
731 
732 	/* Unload ucode used by this engine group */
733 	ucode_unload(dev, &eng_grp->ucode[0]);
734 	ucode_unload(dev, &eng_grp->ucode[1]);
735 
736 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
737 		if (!eng_grp->engs[i].type)
738 			continue;
739 
740 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
741 	}
742 
743 	/* Clear UCODE_BASE register for each engine used by this group */
744 	ret = cpt_set_ucode_base(eng_grp, obj);
745 
746 	return ret;
747 }
748 
749 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
750 				    struct otx2_cpt_eng_grp_info *src_grp)
751 {
752 	/* Setup fields for engine group which is mirrored */
753 	src_grp->mirror.is_ena = false;
754 	src_grp->mirror.idx = 0;
755 	src_grp->mirror.ref_count++;
756 
757 	/* Setup fields for mirroring engine group */
758 	dst_grp->mirror.is_ena = true;
759 	dst_grp->mirror.idx = src_grp->idx;
760 	dst_grp->mirror.ref_count = 0;
761 }
762 
763 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
764 {
765 	struct otx2_cpt_eng_grp_info *src_grp;
766 
767 	if (!dst_grp->mirror.is_ena)
768 		return;
769 
770 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
771 
772 	src_grp->mirror.ref_count--;
773 	dst_grp->mirror.is_ena = false;
774 	dst_grp->mirror.idx = 0;
775 	dst_grp->mirror.ref_count = 0;
776 }
777 
778 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
779 				  struct otx2_cpt_engines *engs, int engs_cnt)
780 {
781 	struct otx2_cpt_engs_rsvd *mirrored_engs;
782 	int i;
783 
784 	for (i = 0; i < engs_cnt; i++) {
785 		mirrored_engs = find_engines_by_type(mirror_eng_grp,
786 						     engs[i].type);
787 		if (!mirrored_engs)
788 			continue;
789 
790 		/*
791 		 * If mirrored group has this type of engines attached then
792 		 * there are 3 scenarios possible:
793 		 * 1) mirrored_engs.count == engs[i].count then all engines
794 		 * from mirrored engine group will be shared with this engine
795 		 * group
796 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
797 		 * engines from mirrored engine group will be shared with this
798 		 * engine group
799 		 * 3) mirrored_engs.count < engs[i].count then all engines
800 		 * from mirrored engine group will be shared with this group
801 		 * and additional engines will be reserved for exclusively use
802 		 * by this engine group
803 		 */
804 		engs[i].count -= mirrored_engs->count;
805 	}
806 }
807 
808 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
809 					struct otx2_cpt_eng_grp_info *grp)
810 {
811 	struct otx2_cpt_eng_grps *eng_grps = grp->g;
812 	int i;
813 
814 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
815 		if (!eng_grps->grp[i].is_enabled)
816 			continue;
817 		if (eng_grps->grp[i].ucode[0].type &&
818 		    eng_grps->grp[i].ucode[1].type)
819 			continue;
820 		if (grp->idx == i)
821 			continue;
822 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
823 				 grp->ucode[0].ver_str,
824 				 OTX2_CPT_UCODE_VER_STR_SZ))
825 			return &eng_grps->grp[i];
826 	}
827 
828 	return NULL;
829 }
830 
831 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
832 					struct otx2_cpt_eng_grps *eng_grps)
833 {
834 	int i;
835 
836 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
837 		if (!eng_grps->grp[i].is_enabled)
838 			return &eng_grps->grp[i];
839 	}
840 	return NULL;
841 }
842 
843 static int eng_grp_update_masks(struct device *dev,
844 				struct otx2_cpt_eng_grp_info *eng_grp)
845 {
846 	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
847 	struct otx2_cpt_bitmap tmp_bmap = { {0} };
848 	int i, j, cnt, max_cnt;
849 	int bit;
850 
851 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
852 		engs = &eng_grp->engs[i];
853 		if (!engs->type)
854 			continue;
855 		if (engs->count <= 0)
856 			continue;
857 
858 		switch (engs->type) {
859 		case OTX2_CPT_SE_TYPES:
860 			max_cnt = eng_grp->g->avail.max_se_cnt;
861 			break;
862 
863 		case OTX2_CPT_IE_TYPES:
864 			max_cnt = eng_grp->g->avail.max_ie_cnt;
865 			break;
866 
867 		case OTX2_CPT_AE_TYPES:
868 			max_cnt = eng_grp->g->avail.max_ae_cnt;
869 			break;
870 
871 		default:
872 			dev_err(dev, "Invalid engine type %d\n", engs->type);
873 			return -EINVAL;
874 		}
875 
876 		cnt = engs->count;
877 		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
878 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
879 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
880 			if (!eng_grp->g->eng_ref_cnt[j]) {
881 				bitmap_set(tmp_bmap.bits, j, 1);
882 				cnt--;
883 				if (!cnt)
884 					break;
885 			}
886 		}
887 
888 		if (cnt)
889 			return -ENOSPC;
890 
891 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
892 	}
893 
894 	if (!eng_grp->mirror.is_ena)
895 		return 0;
896 
897 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
898 		engs = &eng_grp->engs[i];
899 		if (!engs->type)
900 			continue;
901 
902 		mirrored_engs = find_engines_by_type(
903 					&eng_grp->g->grp[eng_grp->mirror.idx],
904 					engs->type);
905 		WARN_ON(!mirrored_engs && engs->count <= 0);
906 		if (!mirrored_engs)
907 			continue;
908 
909 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
910 			    eng_grp->g->engs_num);
911 		if (engs->count < 0) {
912 			bit = find_first_bit(mirrored_engs->bmap,
913 					     eng_grp->g->engs_num);
914 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
915 		}
916 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
917 			  eng_grp->g->engs_num);
918 	}
919 	return 0;
920 }
921 
922 static int delete_engine_group(struct device *dev,
923 			       struct otx2_cpt_eng_grp_info *eng_grp)
924 {
925 	int ret;
926 
927 	if (!eng_grp->is_enabled)
928 		return 0;
929 
930 	if (eng_grp->mirror.ref_count)
931 		return -EINVAL;
932 
933 	/* Removing engine group mirroring if enabled */
934 	remove_eng_grp_mirroring(eng_grp);
935 
936 	/* Disable engine group */
937 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
938 	if (ret)
939 		return ret;
940 
941 	/* Release all engines held by this engine group */
942 	ret = release_engines(dev, eng_grp);
943 	if (ret)
944 		return ret;
945 
946 	eng_grp->is_enabled = false;
947 
948 	return 0;
949 }
950 
951 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
952 {
953 	struct otx2_cpt_ucode *ucode;
954 
955 	if (eng_grp->mirror.is_ena)
956 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
957 	else
958 		ucode = &eng_grp->ucode[0];
959 	WARN_ON(!eng_grp->engs[0].type);
960 	eng_grp->engs[0].ucode = ucode;
961 
962 	if (eng_grp->engs[1].type) {
963 		if (is_2nd_ucode_used(eng_grp))
964 			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
965 		else
966 			eng_grp->engs[1].ucode = ucode;
967 	}
968 }
969 
970 static int create_engine_group(struct device *dev,
971 			       struct otx2_cpt_eng_grps *eng_grps,
972 			       struct otx2_cpt_engines *engs, int ucodes_cnt,
973 			       void *ucode_data[], int is_print)
974 {
975 	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
976 	struct otx2_cpt_eng_grp_info *eng_grp;
977 	struct otx2_cpt_uc_info_t *uc_info;
978 	int i, ret = 0;
979 
980 	/* Find engine group which is not used */
981 	eng_grp = find_unused_eng_grp(eng_grps);
982 	if (!eng_grp) {
983 		dev_err(dev, "Error all engine groups are being used\n");
984 		return -ENOSPC;
985 	}
986 	/* Load ucode */
987 	for (i = 0; i < ucodes_cnt; i++) {
988 		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
989 		eng_grp->ucode[i] = uc_info->ucode;
990 		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
991 					    uc_info->fw->data);
992 		if (ret)
993 			goto unload_ucode;
994 	}
995 
996 	/* Check if this group mirrors another existing engine group */
997 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
998 	if (mirrored_eng_grp) {
999 		/* Setup mirroring */
1000 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1001 
1002 		/*
1003 		 * Update count of requested engines because some
1004 		 * of them might be shared with mirrored group
1005 		 */
1006 		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1007 	}
1008 	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1009 	if (ret)
1010 		goto unload_ucode;
1011 
1012 	/* Update ucode pointers used by engines */
1013 	update_ucode_ptrs(eng_grp);
1014 
1015 	/* Update engine masks used by this group */
1016 	ret = eng_grp_update_masks(dev, eng_grp);
1017 	if (ret)
1018 		goto release_engs;
1019 
1020 	/* Enable engine group */
1021 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1022 	if (ret)
1023 		goto release_engs;
1024 
1025 	/*
1026 	 * If this engine group mirrors another engine group
1027 	 * then we need to unload ucode as we will use ucode
1028 	 * from mirrored engine group
1029 	 */
1030 	if (eng_grp->mirror.is_ena)
1031 		ucode_unload(dev, &eng_grp->ucode[0]);
1032 
1033 	eng_grp->is_enabled = true;
1034 
1035 	if (!is_print)
1036 		return 0;
1037 
1038 	if (mirrored_eng_grp)
1039 		dev_info(dev,
1040 			 "Engine_group%d: reuse microcode %s from group %d\n",
1041 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1042 			 mirrored_eng_grp->idx);
1043 	else
1044 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1045 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1046 	if (is_2nd_ucode_used(eng_grp))
1047 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1048 			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1049 
1050 	return 0;
1051 
1052 release_engs:
1053 	release_engines(dev, eng_grp);
1054 unload_ucode:
1055 	ucode_unload(dev, &eng_grp->ucode[0]);
1056 	ucode_unload(dev, &eng_grp->ucode[1]);
1057 	return ret;
1058 }
1059 
1060 static void delete_engine_grps(struct pci_dev *pdev,
1061 			       struct otx2_cpt_eng_grps *eng_grps)
1062 {
1063 	int i;
1064 
1065 	/* First delete all mirroring engine groups */
1066 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1067 		if (eng_grps->grp[i].mirror.is_ena)
1068 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1069 
1070 	/* Delete remaining engine groups */
1071 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1072 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1073 }
1074 
1075 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1076 {
1077 
1078 	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1079 	struct otx2_cpt_eng_grp_info *grp;
1080 	int i;
1081 
1082 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1083 		grp = &eng_grps->grp[i];
1084 		if (!grp->is_enabled)
1085 			continue;
1086 
1087 		if (eng_type == OTX2_CPT_SE_TYPES) {
1088 			if (eng_grp_has_eng_type(grp, eng_type) &&
1089 			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1090 				eng_grp_num = i;
1091 				break;
1092 			}
1093 		} else {
1094 			if (eng_grp_has_eng_type(grp, eng_type)) {
1095 				eng_grp_num = i;
1096 				break;
1097 			}
1098 		}
1099 	}
1100 	return eng_grp_num;
1101 }
1102 
1103 int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
1104 			     struct otx2_cpt_eng_grps *eng_grps)
1105 {
1106 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1107 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1108 	struct fw_info_t fw_info;
1109 	int ret;
1110 
1111 	/*
1112 	 * We don't create engine groups if it was already
1113 	 * made (when user enabled VFs for the first time)
1114 	 */
1115 	if (eng_grps->is_grps_created)
1116 		return 0;
1117 
1118 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1119 	if (ret)
1120 		return ret;
1121 
1122 	/*
1123 	 * Create engine group with SE engines for kernel
1124 	 * crypto functionality (symmetric crypto)
1125 	 */
1126 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1127 	if (uc_info[0] == NULL) {
1128 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1129 		ret = -EINVAL;
1130 		goto release_fw;
1131 	}
1132 	engs[0].type = OTX2_CPT_SE_TYPES;
1133 	engs[0].count = eng_grps->avail.max_se_cnt;
1134 
1135 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1136 				  (void **) uc_info, 1);
1137 	if (ret)
1138 		goto release_fw;
1139 
1140 	/*
1141 	 * Create engine group with SE+IE engines for IPSec.
1142 	 * All SE engines will be shared with engine group 0.
1143 	 */
1144 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1145 	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1146 
1147 	if (uc_info[1] == NULL) {
1148 		dev_err(&pdev->dev, "Unable to find firmware for IE");
1149 		ret = -EINVAL;
1150 		goto delete_eng_grp;
1151 	}
1152 	engs[0].type = OTX2_CPT_SE_TYPES;
1153 	engs[0].count = eng_grps->avail.max_se_cnt;
1154 	engs[1].type = OTX2_CPT_IE_TYPES;
1155 	engs[1].count = eng_grps->avail.max_ie_cnt;
1156 
1157 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1158 				  (void **) uc_info, 1);
1159 	if (ret)
1160 		goto delete_eng_grp;
1161 
1162 	/*
1163 	 * Create engine group with AE engines for asymmetric
1164 	 * crypto functionality.
1165 	 */
1166 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1167 	if (uc_info[0] == NULL) {
1168 		dev_err(&pdev->dev, "Unable to find firmware for AE");
1169 		ret = -EINVAL;
1170 		goto delete_eng_grp;
1171 	}
1172 	engs[0].type = OTX2_CPT_AE_TYPES;
1173 	engs[0].count = eng_grps->avail.max_ae_cnt;
1174 
1175 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1176 				  (void **) uc_info, 1);
1177 	if (ret)
1178 		goto delete_eng_grp;
1179 
1180 	eng_grps->is_grps_created = true;
1181 
1182 	cpt_ucode_release_fw(&fw_info);
1183 	return 0;
1184 
1185 delete_eng_grp:
1186 	delete_engine_grps(pdev, eng_grps);
1187 release_fw:
1188 	cpt_ucode_release_fw(&fw_info);
1189 	return ret;
1190 }
1191 
1192 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1193 				  int blkaddr)
1194 {
1195 	int timeout = 10, ret;
1196 	int i, busy;
1197 	u64 reg;
1198 
1199 	/* Disengage the cores from groups */
1200 	for (i = 0; i < total_cores; i++) {
1201 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1202 						CPT_AF_EXEX_CTL2(i), 0x0,
1203 						blkaddr);
1204 		if (ret)
1205 			return ret;
1206 
1207 		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1208 	}
1209 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1210 	if (ret)
1211 		return ret;
1212 
1213 	/* Wait for cores to become idle */
1214 	do {
1215 		busy = 0;
1216 		usleep_range(10000, 20000);
1217 		if (timeout-- < 0)
1218 			return -EBUSY;
1219 
1220 		for (i = 0; i < total_cores; i++) {
1221 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1222 						   cptpf->pdev,
1223 						   CPT_AF_EXEX_STS(i), &reg,
1224 						   blkaddr);
1225 			if (ret)
1226 				return ret;
1227 
1228 			if (reg & 0x1) {
1229 				busy = 1;
1230 				break;
1231 			}
1232 		}
1233 	} while (busy);
1234 
1235 	/* Disable the cores */
1236 	for (i = 0; i < total_cores; i++) {
1237 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1238 						CPT_AF_EXEX_CTL(i), 0x0,
1239 						blkaddr);
1240 		if (ret)
1241 			return ret;
1242 	}
1243 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1244 }
1245 
1246 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1247 {
1248 	int total_cores, ret;
1249 
1250 	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1251 		      cptpf->eng_grps.avail.max_ie_cnt +
1252 		      cptpf->eng_grps.avail.max_ae_cnt;
1253 
1254 	if (cptpf->has_cpt1) {
1255 		ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1256 		if (ret)
1257 			return ret;
1258 	}
1259 	return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1260 }
1261 
1262 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1263 			       struct otx2_cpt_eng_grps *eng_grps)
1264 {
1265 	struct otx2_cpt_eng_grp_info *grp;
1266 	int i, j;
1267 
1268 	delete_engine_grps(pdev, eng_grps);
1269 	/* Release memory */
1270 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1271 		grp = &eng_grps->grp[i];
1272 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1273 			kfree(grp->engs[j].bmap);
1274 			grp->engs[j].bmap = NULL;
1275 		}
1276 	}
1277 }
1278 
1279 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1280 			   struct otx2_cpt_eng_grps *eng_grps)
1281 {
1282 	struct otx2_cpt_eng_grp_info *grp;
1283 	int i, j, ret;
1284 
1285 	eng_grps->obj = pci_get_drvdata(pdev);
1286 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1287 	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1288 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1289 
1290 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1291 			     eng_grps->avail.max_ie_cnt +
1292 			     eng_grps->avail.max_ae_cnt;
1293 	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1294 		dev_err(&pdev->dev,
1295 			"Number of engines %d > than max supported %d\n",
1296 			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1297 		ret = -EINVAL;
1298 		goto cleanup_eng_grps;
1299 	}
1300 
1301 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1302 		grp = &eng_grps->grp[i];
1303 		grp->g = eng_grps;
1304 		grp->idx = i;
1305 
1306 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1307 			grp->engs[j].bmap =
1308 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1309 					sizeof(long), GFP_KERNEL);
1310 			if (!grp->engs[j].bmap) {
1311 				ret = -ENOMEM;
1312 				goto cleanup_eng_grps;
1313 			}
1314 		}
1315 	}
1316 	return 0;
1317 
1318 cleanup_eng_grps:
1319 	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1320 	return ret;
1321 }
1322 
1323 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1324 					  struct otx2_cpt_eng_grps *eng_grps)
1325 {
1326 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1327 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1328 	struct fw_info_t fw_info;
1329 	int ret;
1330 
1331 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1332 	if (ret)
1333 		return ret;
1334 
1335 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1336 	if (uc_info[0] == NULL) {
1337 		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1338 		ret = -EINVAL;
1339 		goto release_fw;
1340 	}
1341 	engs[0].type = OTX2_CPT_AE_TYPES;
1342 	engs[0].count = 2;
1343 
1344 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1345 				  (void **) uc_info, 0);
1346 	if (ret)
1347 		goto release_fw;
1348 
1349 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1350 	if (uc_info[0] == NULL) {
1351 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1352 		ret = -EINVAL;
1353 		goto delete_eng_grp;
1354 	}
1355 	engs[0].type = OTX2_CPT_SE_TYPES;
1356 	engs[0].count = 2;
1357 
1358 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1359 				  (void **) uc_info, 0);
1360 	if (ret)
1361 		goto delete_eng_grp;
1362 
1363 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1364 	if (uc_info[0] == NULL) {
1365 		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1366 		ret = -EINVAL;
1367 		goto delete_eng_grp;
1368 	}
1369 	engs[0].type = OTX2_CPT_IE_TYPES;
1370 	engs[0].count = 2;
1371 
1372 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1373 				  (void **) uc_info, 0);
1374 	if (ret)
1375 		goto delete_eng_grp;
1376 
1377 	cpt_ucode_release_fw(&fw_info);
1378 	return 0;
1379 
1380 delete_eng_grp:
1381 	delete_engine_grps(pdev, eng_grps);
1382 release_fw:
1383 	cpt_ucode_release_fw(&fw_info);
1384 	return ret;
1385 }
1386 
1387 /*
1388  * Get CPT HW capabilities using LOAD_FVC operation.
1389  */
1390 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1391 {
1392 	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1393 	struct otx2_cpt_iq_command iq_cmd;
1394 	union otx2_cpt_opcode opcode;
1395 	union otx2_cpt_res_s *result;
1396 	union otx2_cpt_inst_s inst;
1397 	dma_addr_t rptr_baddr;
1398 	struct pci_dev *pdev;
1399 	u32 len, compl_rlen;
1400 	int ret, etype;
1401 	void *rptr;
1402 
1403 	/*
1404 	 * We don't get capabilities if it was already done
1405 	 * (when user enabled VFs for the first time)
1406 	 */
1407 	if (cptpf->is_eng_caps_discovered)
1408 		return 0;
1409 
1410 	pdev = cptpf->pdev;
1411 	/*
1412 	 * Create engine groups for each type to submit LOAD_FVC op and
1413 	 * get engine's capabilities.
1414 	 */
1415 	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1416 	if (ret)
1417 		goto delete_grps;
1418 
1419 	lfs->pdev = pdev;
1420 	lfs->reg_base = cptpf->reg_base;
1421 	lfs->mbox = &cptpf->afpf_mbox;
1422 	lfs->blkaddr = BLKADDR_CPT0;
1423 	ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1424 			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1425 	if (ret)
1426 		goto delete_grps;
1427 
1428 	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1429 	len = compl_rlen + LOADFVC_RLEN;
1430 
1431 	result = kzalloc(len, GFP_KERNEL);
1432 	if (!result) {
1433 		ret = -ENOMEM;
1434 		goto lf_cleanup;
1435 	}
1436 	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1437 				    DMA_BIDIRECTIONAL);
1438 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1439 		dev_err(&pdev->dev, "DMA mapping failed\n");
1440 		ret = -EFAULT;
1441 		goto free_result;
1442 	}
1443 	rptr = (u8 *)result + compl_rlen;
1444 
1445 	/* Fill in the command */
1446 	opcode.s.major = LOADFVC_MAJOR_OP;
1447 	opcode.s.minor = LOADFVC_MINOR_OP;
1448 
1449 	iq_cmd.cmd.u = 0;
1450 	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1451 
1452 	/* 64-bit swap for microcode data reads, not needed for addresses */
1453 	cpu_to_be64s(&iq_cmd.cmd.u);
1454 	iq_cmd.dptr = 0;
1455 	iq_cmd.rptr = rptr_baddr + compl_rlen;
1456 	iq_cmd.cptr.u = 0;
1457 
1458 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1459 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1460 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1461 							 etype);
1462 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1463 		otx2_cpt_send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1464 
1465 		while (result->s.compcode == OTX2_CPT_COMPLETION_CODE_INIT)
1466 			cpu_relax();
1467 
1468 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1469 	}
1470 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1471 	cptpf->is_eng_caps_discovered = true;
1472 
1473 free_result:
1474 	kfree(result);
1475 lf_cleanup:
1476 	otx2_cptlf_shutdown(&cptpf->lfs);
1477 delete_grps:
1478 	delete_engine_grps(pdev, &cptpf->eng_grps);
1479 
1480 	return ret;
1481 }
1482