1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include "otx2_cptpf_ucode.h"
7 #include "otx2_cpt_common.h"
8 #include "otx2_cptpf.h"
9 #include "otx2_cptlf.h"
10 #include "otx2_cpt_reqmgr.h"
11 #include "rvu_reg.h"
12 
13 #define CSR_DELAY 30
14 
15 #define LOADFVC_RLEN 8
16 #define LOADFVC_MAJOR_OP 0x01
17 #define LOADFVC_MINOR_OP 0x08
18 
19 #define CTX_FLUSH_TIMER_CNT 0xFFFFFF
20 
21 struct fw_info_t {
22 	struct list_head ucodes;
23 };
24 
25 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
26 					struct otx2_cpt_eng_grp_info *eng_grp)
27 {
28 	struct otx2_cpt_bitmap bmap = { {0} };
29 	bool found = false;
30 	int i;
31 
32 	if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
33 		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
34 			eng_grp->g->engs_num);
35 		return bmap;
36 	}
37 
38 	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
39 		if (eng_grp->engs[i].type) {
40 			bitmap_or(bmap.bits, bmap.bits,
41 				  eng_grp->engs[i].bmap,
42 				  eng_grp->g->engs_num);
43 			bmap.size = eng_grp->g->engs_num;
44 			found = true;
45 		}
46 	}
47 
48 	if (!found)
49 		dev_err(dev, "No engines reserved for engine group %d\n",
50 			eng_grp->idx);
51 	return bmap;
52 }
53 
54 static int is_eng_type(int val, int eng_type)
55 {
56 	return val & (1 << eng_type);
57 }
58 
59 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
60 {
61 	if (eng_grp->ucode[1].type)
62 		return true;
63 	else
64 		return false;
65 }
66 
67 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
68 			       const char *filename)
69 {
70 	strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
71 }
72 
73 static char *get_eng_type_str(int eng_type)
74 {
75 	char *str = "unknown";
76 
77 	switch (eng_type) {
78 	case OTX2_CPT_SE_TYPES:
79 		str = "SE";
80 		break;
81 
82 	case OTX2_CPT_IE_TYPES:
83 		str = "IE";
84 		break;
85 
86 	case OTX2_CPT_AE_TYPES:
87 		str = "AE";
88 		break;
89 	}
90 	return str;
91 }
92 
93 static char *get_ucode_type_str(int ucode_type)
94 {
95 	char *str = "unknown";
96 
97 	switch (ucode_type) {
98 	case (1 << OTX2_CPT_SE_TYPES):
99 		str = "SE";
100 		break;
101 
102 	case (1 << OTX2_CPT_IE_TYPES):
103 		str = "IE";
104 		break;
105 
106 	case (1 << OTX2_CPT_AE_TYPES):
107 		str = "AE";
108 		break;
109 
110 	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
111 		str = "SE+IPSEC";
112 		break;
113 	}
114 	return str;
115 }
116 
117 static int get_ucode_type(struct device *dev,
118 			  struct otx2_cpt_ucode_hdr *ucode_hdr,
119 			  int *ucode_type)
120 {
121 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
122 	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
123 	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
124 	struct pci_dev *pdev = cptpf->pdev;
125 	int i, val = 0;
126 	u8 nn;
127 
128 	strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
129 	for (i = 0; i < strlen(tmp_ver_str); i++)
130 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
131 
132 	sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
133 	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
134 		return -EINVAL;
135 
136 	nn = ucode_hdr->ver_num.nn;
137 	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
138 	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
139 	     nn == OTX2_CPT_SE_UC_TYPE3))
140 		val |= 1 << OTX2_CPT_SE_TYPES;
141 	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
142 	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
143 	     nn == OTX2_CPT_IE_UC_TYPE3))
144 		val |= 1 << OTX2_CPT_IE_TYPES;
145 	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
146 	    nn == OTX2_CPT_AE_UC_TYPE)
147 		val |= 1 << OTX2_CPT_AE_TYPES;
148 
149 	*ucode_type = val;
150 
151 	if (!val)
152 		return -EINVAL;
153 
154 	return 0;
155 }
156 
157 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
158 			      dma_addr_t dma_addr, int blkaddr)
159 {
160 	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
161 				     CPT_AF_EXEX_UCODE_BASE(eng),
162 				     (u64)dma_addr, blkaddr);
163 }
164 
165 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
166 			       struct otx2_cptpf_dev *cptpf, int blkaddr)
167 {
168 	struct otx2_cpt_engs_rsvd *engs;
169 	dma_addr_t dma_addr;
170 	int i, bit, ret;
171 
172 	/* Set PF number for microcode fetches */
173 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
174 				    CPT_AF_PF_FUNC,
175 				    cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
176 	if (ret)
177 		return ret;
178 
179 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
180 		engs = &eng_grp->engs[i];
181 		if (!engs->type)
182 			continue;
183 
184 		dma_addr = engs->ucode->dma;
185 
186 		/*
187 		 * Set UCODE_BASE only for the cores which are not used,
188 		 * other cores should have already valid UCODE_BASE set
189 		 */
190 		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
191 			if (!eng_grp->g->eng_ref_cnt[bit]) {
192 				ret = __write_ucode_base(cptpf, bit, dma_addr,
193 							 blkaddr);
194 				if (ret)
195 					return ret;
196 			}
197 	}
198 	return 0;
199 }
200 
201 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
202 {
203 	struct otx2_cptpf_dev *cptpf = obj;
204 	int ret;
205 
206 	if (cptpf->has_cpt1) {
207 		ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
208 		if (ret)
209 			return ret;
210 	}
211 	return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
212 }
213 
214 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
215 					 struct otx2_cptpf_dev *cptpf,
216 					 struct otx2_cpt_bitmap bmap,
217 					 int blkaddr)
218 {
219 	int i, timeout = 10;
220 	int busy, ret;
221 	u64 reg = 0;
222 
223 	/* Detach the cores from group */
224 	for_each_set_bit(i, bmap.bits, bmap.size) {
225 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
226 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
227 		if (ret)
228 			return ret;
229 
230 		if (reg & (1ull << eng_grp->idx)) {
231 			eng_grp->g->eng_ref_cnt[i]--;
232 			reg &= ~(1ull << eng_grp->idx);
233 
234 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
235 						    cptpf->pdev,
236 						    CPT_AF_EXEX_CTL2(i), reg,
237 						    blkaddr);
238 			if (ret)
239 				return ret;
240 		}
241 	}
242 
243 	/* Wait for cores to become idle */
244 	do {
245 		busy = 0;
246 		usleep_range(10000, 20000);
247 		if (timeout-- < 0)
248 			return -EBUSY;
249 
250 		for_each_set_bit(i, bmap.bits, bmap.size) {
251 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
252 						   cptpf->pdev,
253 						   CPT_AF_EXEX_STS(i), &reg,
254 						   blkaddr);
255 			if (ret)
256 				return ret;
257 
258 			if (reg & 0x1) {
259 				busy = 1;
260 				break;
261 			}
262 		}
263 	} while (busy);
264 
265 	/* Disable the cores only if they are not used anymore */
266 	for_each_set_bit(i, bmap.bits, bmap.size) {
267 		if (!eng_grp->g->eng_ref_cnt[i]) {
268 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
269 						    cptpf->pdev,
270 						    CPT_AF_EXEX_CTL(i), 0x0,
271 						    blkaddr);
272 			if (ret)
273 				return ret;
274 		}
275 	}
276 
277 	return 0;
278 }
279 
280 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
281 					void *obj)
282 {
283 	struct otx2_cptpf_dev *cptpf = obj;
284 	struct otx2_cpt_bitmap bmap;
285 	int ret;
286 
287 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
288 	if (!bmap.size)
289 		return -EINVAL;
290 
291 	if (cptpf->has_cpt1) {
292 		ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
293 						    BLKADDR_CPT1);
294 		if (ret)
295 			return ret;
296 	}
297 	return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
298 					     BLKADDR_CPT0);
299 }
300 
301 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
302 					struct otx2_cptpf_dev *cptpf,
303 					struct otx2_cpt_bitmap bmap,
304 					int blkaddr)
305 {
306 	u64 reg = 0;
307 	int i, ret;
308 
309 	/* Attach the cores to the group */
310 	for_each_set_bit(i, bmap.bits, bmap.size) {
311 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
312 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
313 		if (ret)
314 			return ret;
315 
316 		if (!(reg & (1ull << eng_grp->idx))) {
317 			eng_grp->g->eng_ref_cnt[i]++;
318 			reg |= 1ull << eng_grp->idx;
319 
320 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
321 						    cptpf->pdev,
322 						    CPT_AF_EXEX_CTL2(i), reg,
323 						    blkaddr);
324 			if (ret)
325 				return ret;
326 		}
327 	}
328 
329 	/* Enable the cores */
330 	for_each_set_bit(i, bmap.bits, bmap.size) {
331 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
332 						CPT_AF_EXEX_CTL(i), 0x1,
333 						blkaddr);
334 		if (ret)
335 			return ret;
336 	}
337 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
338 }
339 
340 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
341 				       void *obj)
342 {
343 	struct otx2_cptpf_dev *cptpf = obj;
344 	struct otx2_cpt_bitmap bmap;
345 	int ret;
346 
347 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
348 	if (!bmap.size)
349 		return -EINVAL;
350 
351 	if (cptpf->has_cpt1) {
352 		ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
353 						   BLKADDR_CPT1);
354 		if (ret)
355 			return ret;
356 	}
357 	return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
358 }
359 
360 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
361 		   char *filename)
362 {
363 	struct otx2_cpt_ucode_hdr *ucode_hdr;
364 	struct otx2_cpt_uc_info_t *uc_info;
365 	int ucode_type, ucode_size;
366 	int ret;
367 
368 	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
369 	if (!uc_info)
370 		return -ENOMEM;
371 
372 	ret = request_firmware(&uc_info->fw, filename, dev);
373 	if (ret)
374 		goto free_uc_info;
375 
376 	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
377 	ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
378 	if (ret)
379 		goto release_fw;
380 
381 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
382 	if (!ucode_size) {
383 		dev_err(dev, "Ucode %s invalid size\n", filename);
384 		ret = -EINVAL;
385 		goto release_fw;
386 	}
387 
388 	set_ucode_filename(&uc_info->ucode, filename);
389 	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
390 	       OTX2_CPT_UCODE_VER_STR_SZ);
391 	uc_info->ucode.ver_num = ucode_hdr->ver_num;
392 	uc_info->ucode.type = ucode_type;
393 	uc_info->ucode.size = ucode_size;
394 	list_add_tail(&uc_info->list, &fw_info->ucodes);
395 
396 	return 0;
397 
398 release_fw:
399 	release_firmware(uc_info->fw);
400 free_uc_info:
401 	kfree(uc_info);
402 	return ret;
403 }
404 
405 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
406 {
407 	struct otx2_cpt_uc_info_t *curr, *temp;
408 
409 	if (!fw_info)
410 		return;
411 
412 	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
413 		list_del(&curr->list);
414 		release_firmware(curr->fw);
415 		kfree(curr);
416 	}
417 }
418 
419 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
420 					    int ucode_type)
421 {
422 	struct otx2_cpt_uc_info_t *curr;
423 
424 	list_for_each_entry(curr, &fw_info->ucodes, list) {
425 		if (!is_eng_type(curr->ucode.type, ucode_type))
426 			continue;
427 
428 		return curr;
429 	}
430 	return NULL;
431 }
432 
433 static void print_uc_info(struct fw_info_t *fw_info)
434 {
435 	struct otx2_cpt_uc_info_t *curr;
436 
437 	list_for_each_entry(curr, &fw_info->ucodes, list) {
438 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
439 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
440 		pr_debug("Ucode version %d.%d.%d.%d\n",
441 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
442 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
443 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
444 			 get_ucode_type_str(curr->ucode.type));
445 		pr_debug("Ucode size %d\n", curr->ucode.size);
446 		pr_debug("Ucode ptr %p\n", curr->fw->data);
447 	}
448 }
449 
450 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
451 {
452 	char filename[OTX2_CPT_NAME_LENGTH];
453 	char eng_type[8] = {0};
454 	int ret, e, i;
455 
456 	INIT_LIST_HEAD(&fw_info->ucodes);
457 
458 	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
459 		strcpy(eng_type, get_eng_type_str(e));
460 		for (i = 0; i < strlen(eng_type); i++)
461 			eng_type[i] = tolower(eng_type[i]);
462 
463 		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
464 			 pdev->revision, eng_type);
465 		/* Request firmware for each engine type */
466 		ret = load_fw(&pdev->dev, fw_info, filename);
467 		if (ret)
468 			goto release_fw;
469 	}
470 	print_uc_info(fw_info);
471 	return 0;
472 
473 release_fw:
474 	cpt_ucode_release_fw(fw_info);
475 	return ret;
476 }
477 
478 static struct otx2_cpt_engs_rsvd *find_engines_by_type(
479 					struct otx2_cpt_eng_grp_info *eng_grp,
480 					int eng_type)
481 {
482 	int i;
483 
484 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
485 		if (!eng_grp->engs[i].type)
486 			continue;
487 
488 		if (eng_grp->engs[i].type == eng_type)
489 			return &eng_grp->engs[i];
490 	}
491 	return NULL;
492 }
493 
494 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
495 				int eng_type)
496 {
497 	struct otx2_cpt_engs_rsvd *engs;
498 
499 	engs = find_engines_by_type(eng_grp, eng_type);
500 
501 	return (engs != NULL ? 1 : 0);
502 }
503 
504 static int update_engines_avail_count(struct device *dev,
505 				      struct otx2_cpt_engs_available *avail,
506 				      struct otx2_cpt_engs_rsvd *engs, int val)
507 {
508 	switch (engs->type) {
509 	case OTX2_CPT_SE_TYPES:
510 		avail->se_cnt += val;
511 		break;
512 
513 	case OTX2_CPT_IE_TYPES:
514 		avail->ie_cnt += val;
515 		break;
516 
517 	case OTX2_CPT_AE_TYPES:
518 		avail->ae_cnt += val;
519 		break;
520 
521 	default:
522 		dev_err(dev, "Invalid engine type %d\n", engs->type);
523 		return -EINVAL;
524 	}
525 	return 0;
526 }
527 
528 static int update_engines_offset(struct device *dev,
529 				 struct otx2_cpt_engs_available *avail,
530 				 struct otx2_cpt_engs_rsvd *engs)
531 {
532 	switch (engs->type) {
533 	case OTX2_CPT_SE_TYPES:
534 		engs->offset = 0;
535 		break;
536 
537 	case OTX2_CPT_IE_TYPES:
538 		engs->offset = avail->max_se_cnt;
539 		break;
540 
541 	case OTX2_CPT_AE_TYPES:
542 		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
543 		break;
544 
545 	default:
546 		dev_err(dev, "Invalid engine type %d\n", engs->type);
547 		return -EINVAL;
548 	}
549 	return 0;
550 }
551 
552 static int release_engines(struct device *dev,
553 			   struct otx2_cpt_eng_grp_info *grp)
554 {
555 	int i, ret = 0;
556 
557 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
558 		if (!grp->engs[i].type)
559 			continue;
560 
561 		if (grp->engs[i].count > 0) {
562 			ret = update_engines_avail_count(dev, &grp->g->avail,
563 							 &grp->engs[i],
564 							 grp->engs[i].count);
565 			if (ret)
566 				return ret;
567 		}
568 
569 		grp->engs[i].type = 0;
570 		grp->engs[i].count = 0;
571 		grp->engs[i].offset = 0;
572 		grp->engs[i].ucode = NULL;
573 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
574 	}
575 	return 0;
576 }
577 
578 static int do_reserve_engines(struct device *dev,
579 			      struct otx2_cpt_eng_grp_info *grp,
580 			      struct otx2_cpt_engines *req_engs)
581 {
582 	struct otx2_cpt_engs_rsvd *engs = NULL;
583 	int i, ret;
584 
585 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
586 		if (!grp->engs[i].type) {
587 			engs = &grp->engs[i];
588 			break;
589 		}
590 	}
591 
592 	if (!engs)
593 		return -ENOMEM;
594 
595 	engs->type = req_engs->type;
596 	engs->count = req_engs->count;
597 
598 	ret = update_engines_offset(dev, &grp->g->avail, engs);
599 	if (ret)
600 		return ret;
601 
602 	if (engs->count > 0) {
603 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
604 						 -engs->count);
605 		if (ret)
606 			return ret;
607 	}
608 
609 	return 0;
610 }
611 
612 static int check_engines_availability(struct device *dev,
613 				      struct otx2_cpt_eng_grp_info *grp,
614 				      struct otx2_cpt_engines *req_eng)
615 {
616 	int avail_cnt = 0;
617 
618 	switch (req_eng->type) {
619 	case OTX2_CPT_SE_TYPES:
620 		avail_cnt = grp->g->avail.se_cnt;
621 		break;
622 
623 	case OTX2_CPT_IE_TYPES:
624 		avail_cnt = grp->g->avail.ie_cnt;
625 		break;
626 
627 	case OTX2_CPT_AE_TYPES:
628 		avail_cnt = grp->g->avail.ae_cnt;
629 		break;
630 
631 	default:
632 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
633 		return -EINVAL;
634 	}
635 
636 	if (avail_cnt < req_eng->count) {
637 		dev_err(dev,
638 			"Error available %s engines %d < than requested %d\n",
639 			get_eng_type_str(req_eng->type),
640 			avail_cnt, req_eng->count);
641 		return -EBUSY;
642 	}
643 	return 0;
644 }
645 
646 static int reserve_engines(struct device *dev,
647 			   struct otx2_cpt_eng_grp_info *grp,
648 			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
649 {
650 	int i, ret = 0;
651 
652 	/* Validate if a number of requested engines are available */
653 	for (i = 0; i < ucodes_cnt; i++) {
654 		ret = check_engines_availability(dev, grp, &req_engs[i]);
655 		if (ret)
656 			return ret;
657 	}
658 
659 	/* Reserve requested engines for this engine group */
660 	for (i = 0; i < ucodes_cnt; i++) {
661 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
662 		if (ret)
663 			return ret;
664 	}
665 	return 0;
666 }
667 
668 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
669 {
670 	if (ucode->va) {
671 		dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
672 				  ucode->dma);
673 		ucode->va = NULL;
674 		ucode->dma = 0;
675 		ucode->size = 0;
676 	}
677 
678 	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
679 	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
680 	set_ucode_filename(ucode, "");
681 	ucode->type = 0;
682 }
683 
684 static int copy_ucode_to_dma_mem(struct device *dev,
685 				 struct otx2_cpt_ucode *ucode,
686 				 const u8 *ucode_data)
687 {
688 	u32 i;
689 
690 	/*  Allocate DMAable space */
691 	ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
692 				       GFP_KERNEL);
693 	if (!ucode->va)
694 		return -ENOMEM;
695 
696 	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
697 	       ucode->size);
698 
699 	/* Byte swap 64-bit */
700 	for (i = 0; i < (ucode->size / 8); i++)
701 		cpu_to_be64s(&((u64 *)ucode->va)[i]);
702 	/*  Ucode needs 16-bit swap */
703 	for (i = 0; i < (ucode->size / 2); i++)
704 		cpu_to_be16s(&((u16 *)ucode->va)[i]);
705 	return 0;
706 }
707 
708 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
709 			  void *obj)
710 {
711 	int ret;
712 
713 	/* Point microcode to each core of the group */
714 	ret = cpt_set_ucode_base(eng_grp, obj);
715 	if (ret)
716 		return ret;
717 
718 	/* Attach the cores to the group and enable them */
719 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
720 
721 	return ret;
722 }
723 
724 static int disable_eng_grp(struct device *dev,
725 			   struct otx2_cpt_eng_grp_info *eng_grp,
726 			   void *obj)
727 {
728 	int i, ret;
729 
730 	/* Disable all engines used by this group */
731 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
732 	if (ret)
733 		return ret;
734 
735 	/* Unload ucode used by this engine group */
736 	ucode_unload(dev, &eng_grp->ucode[0]);
737 	ucode_unload(dev, &eng_grp->ucode[1]);
738 
739 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
740 		if (!eng_grp->engs[i].type)
741 			continue;
742 
743 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
744 	}
745 
746 	/* Clear UCODE_BASE register for each engine used by this group */
747 	ret = cpt_set_ucode_base(eng_grp, obj);
748 
749 	return ret;
750 }
751 
752 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
753 				    struct otx2_cpt_eng_grp_info *src_grp)
754 {
755 	/* Setup fields for engine group which is mirrored */
756 	src_grp->mirror.is_ena = false;
757 	src_grp->mirror.idx = 0;
758 	src_grp->mirror.ref_count++;
759 
760 	/* Setup fields for mirroring engine group */
761 	dst_grp->mirror.is_ena = true;
762 	dst_grp->mirror.idx = src_grp->idx;
763 	dst_grp->mirror.ref_count = 0;
764 }
765 
766 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
767 {
768 	struct otx2_cpt_eng_grp_info *src_grp;
769 
770 	if (!dst_grp->mirror.is_ena)
771 		return;
772 
773 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
774 
775 	src_grp->mirror.ref_count--;
776 	dst_grp->mirror.is_ena = false;
777 	dst_grp->mirror.idx = 0;
778 	dst_grp->mirror.ref_count = 0;
779 }
780 
781 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
782 				  struct otx2_cpt_engines *engs, int engs_cnt)
783 {
784 	struct otx2_cpt_engs_rsvd *mirrored_engs;
785 	int i;
786 
787 	for (i = 0; i < engs_cnt; i++) {
788 		mirrored_engs = find_engines_by_type(mirror_eng_grp,
789 						     engs[i].type);
790 		if (!mirrored_engs)
791 			continue;
792 
793 		/*
794 		 * If mirrored group has this type of engines attached then
795 		 * there are 3 scenarios possible:
796 		 * 1) mirrored_engs.count == engs[i].count then all engines
797 		 * from mirrored engine group will be shared with this engine
798 		 * group
799 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
800 		 * engines from mirrored engine group will be shared with this
801 		 * engine group
802 		 * 3) mirrored_engs.count < engs[i].count then all engines
803 		 * from mirrored engine group will be shared with this group
804 		 * and additional engines will be reserved for exclusively use
805 		 * by this engine group
806 		 */
807 		engs[i].count -= mirrored_engs->count;
808 	}
809 }
810 
811 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
812 					struct otx2_cpt_eng_grp_info *grp)
813 {
814 	struct otx2_cpt_eng_grps *eng_grps = grp->g;
815 	int i;
816 
817 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
818 		if (!eng_grps->grp[i].is_enabled)
819 			continue;
820 		if (eng_grps->grp[i].ucode[0].type &&
821 		    eng_grps->grp[i].ucode[1].type)
822 			continue;
823 		if (grp->idx == i)
824 			continue;
825 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
826 				 grp->ucode[0].ver_str,
827 				 OTX2_CPT_UCODE_VER_STR_SZ))
828 			return &eng_grps->grp[i];
829 	}
830 
831 	return NULL;
832 }
833 
834 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
835 					struct otx2_cpt_eng_grps *eng_grps)
836 {
837 	int i;
838 
839 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
840 		if (!eng_grps->grp[i].is_enabled)
841 			return &eng_grps->grp[i];
842 	}
843 	return NULL;
844 }
845 
846 static int eng_grp_update_masks(struct device *dev,
847 				struct otx2_cpt_eng_grp_info *eng_grp)
848 {
849 	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
850 	struct otx2_cpt_bitmap tmp_bmap = { {0} };
851 	int i, j, cnt, max_cnt;
852 	int bit;
853 
854 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
855 		engs = &eng_grp->engs[i];
856 		if (!engs->type)
857 			continue;
858 		if (engs->count <= 0)
859 			continue;
860 
861 		switch (engs->type) {
862 		case OTX2_CPT_SE_TYPES:
863 			max_cnt = eng_grp->g->avail.max_se_cnt;
864 			break;
865 
866 		case OTX2_CPT_IE_TYPES:
867 			max_cnt = eng_grp->g->avail.max_ie_cnt;
868 			break;
869 
870 		case OTX2_CPT_AE_TYPES:
871 			max_cnt = eng_grp->g->avail.max_ae_cnt;
872 			break;
873 
874 		default:
875 			dev_err(dev, "Invalid engine type %d\n", engs->type);
876 			return -EINVAL;
877 		}
878 
879 		cnt = engs->count;
880 		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
881 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
882 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
883 			if (!eng_grp->g->eng_ref_cnt[j]) {
884 				bitmap_set(tmp_bmap.bits, j, 1);
885 				cnt--;
886 				if (!cnt)
887 					break;
888 			}
889 		}
890 
891 		if (cnt)
892 			return -ENOSPC;
893 
894 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
895 	}
896 
897 	if (!eng_grp->mirror.is_ena)
898 		return 0;
899 
900 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
901 		engs = &eng_grp->engs[i];
902 		if (!engs->type)
903 			continue;
904 
905 		mirrored_engs = find_engines_by_type(
906 					&eng_grp->g->grp[eng_grp->mirror.idx],
907 					engs->type);
908 		WARN_ON(!mirrored_engs && engs->count <= 0);
909 		if (!mirrored_engs)
910 			continue;
911 
912 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
913 			    eng_grp->g->engs_num);
914 		if (engs->count < 0) {
915 			bit = find_first_bit(mirrored_engs->bmap,
916 					     eng_grp->g->engs_num);
917 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
918 		}
919 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
920 			  eng_grp->g->engs_num);
921 	}
922 	return 0;
923 }
924 
925 static int delete_engine_group(struct device *dev,
926 			       struct otx2_cpt_eng_grp_info *eng_grp)
927 {
928 	int ret;
929 
930 	if (!eng_grp->is_enabled)
931 		return 0;
932 
933 	if (eng_grp->mirror.ref_count)
934 		return -EINVAL;
935 
936 	/* Removing engine group mirroring if enabled */
937 	remove_eng_grp_mirroring(eng_grp);
938 
939 	/* Disable engine group */
940 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
941 	if (ret)
942 		return ret;
943 
944 	/* Release all engines held by this engine group */
945 	ret = release_engines(dev, eng_grp);
946 	if (ret)
947 		return ret;
948 
949 	eng_grp->is_enabled = false;
950 
951 	return 0;
952 }
953 
954 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
955 {
956 	struct otx2_cpt_ucode *ucode;
957 
958 	if (eng_grp->mirror.is_ena)
959 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
960 	else
961 		ucode = &eng_grp->ucode[0];
962 	WARN_ON(!eng_grp->engs[0].type);
963 	eng_grp->engs[0].ucode = ucode;
964 
965 	if (eng_grp->engs[1].type) {
966 		if (is_2nd_ucode_used(eng_grp))
967 			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
968 		else
969 			eng_grp->engs[1].ucode = ucode;
970 	}
971 }
972 
973 static int create_engine_group(struct device *dev,
974 			       struct otx2_cpt_eng_grps *eng_grps,
975 			       struct otx2_cpt_engines *engs, int ucodes_cnt,
976 			       void *ucode_data[], int is_print)
977 {
978 	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
979 	struct otx2_cpt_eng_grp_info *eng_grp;
980 	struct otx2_cpt_uc_info_t *uc_info;
981 	int i, ret = 0;
982 
983 	/* Find engine group which is not used */
984 	eng_grp = find_unused_eng_grp(eng_grps);
985 	if (!eng_grp) {
986 		dev_err(dev, "Error all engine groups are being used\n");
987 		return -ENOSPC;
988 	}
989 	/* Load ucode */
990 	for (i = 0; i < ucodes_cnt; i++) {
991 		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
992 		eng_grp->ucode[i] = uc_info->ucode;
993 		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
994 					    uc_info->fw->data);
995 		if (ret)
996 			goto unload_ucode;
997 	}
998 
999 	/* Check if this group mirrors another existing engine group */
1000 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1001 	if (mirrored_eng_grp) {
1002 		/* Setup mirroring */
1003 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1004 
1005 		/*
1006 		 * Update count of requested engines because some
1007 		 * of them might be shared with mirrored group
1008 		 */
1009 		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1010 	}
1011 	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1012 	if (ret)
1013 		goto unload_ucode;
1014 
1015 	/* Update ucode pointers used by engines */
1016 	update_ucode_ptrs(eng_grp);
1017 
1018 	/* Update engine masks used by this group */
1019 	ret = eng_grp_update_masks(dev, eng_grp);
1020 	if (ret)
1021 		goto release_engs;
1022 
1023 	/* Enable engine group */
1024 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1025 	if (ret)
1026 		goto release_engs;
1027 
1028 	/*
1029 	 * If this engine group mirrors another engine group
1030 	 * then we need to unload ucode as we will use ucode
1031 	 * from mirrored engine group
1032 	 */
1033 	if (eng_grp->mirror.is_ena)
1034 		ucode_unload(dev, &eng_grp->ucode[0]);
1035 
1036 	eng_grp->is_enabled = true;
1037 
1038 	if (!is_print)
1039 		return 0;
1040 
1041 	if (mirrored_eng_grp)
1042 		dev_info(dev,
1043 			 "Engine_group%d: reuse microcode %s from group %d\n",
1044 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1045 			 mirrored_eng_grp->idx);
1046 	else
1047 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1048 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1049 	if (is_2nd_ucode_used(eng_grp))
1050 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1051 			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1052 
1053 	return 0;
1054 
1055 release_engs:
1056 	release_engines(dev, eng_grp);
1057 unload_ucode:
1058 	ucode_unload(dev, &eng_grp->ucode[0]);
1059 	ucode_unload(dev, &eng_grp->ucode[1]);
1060 	return ret;
1061 }
1062 
1063 static void delete_engine_grps(struct pci_dev *pdev,
1064 			       struct otx2_cpt_eng_grps *eng_grps)
1065 {
1066 	int i;
1067 
1068 	/* First delete all mirroring engine groups */
1069 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1070 		if (eng_grps->grp[i].mirror.is_ena)
1071 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1072 
1073 	/* Delete remaining engine groups */
1074 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1075 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1076 }
1077 
1078 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1079 {
1080 
1081 	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1082 	struct otx2_cpt_eng_grp_info *grp;
1083 	int i;
1084 
1085 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1086 		grp = &eng_grps->grp[i];
1087 		if (!grp->is_enabled)
1088 			continue;
1089 
1090 		if (eng_type == OTX2_CPT_SE_TYPES) {
1091 			if (eng_grp_has_eng_type(grp, eng_type) &&
1092 			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1093 				eng_grp_num = i;
1094 				break;
1095 			}
1096 		} else {
1097 			if (eng_grp_has_eng_type(grp, eng_type)) {
1098 				eng_grp_num = i;
1099 				break;
1100 			}
1101 		}
1102 	}
1103 	return eng_grp_num;
1104 }
1105 
1106 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1107 			     struct otx2_cpt_eng_grps *eng_grps)
1108 {
1109 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1110 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1111 	struct pci_dev *pdev = cptpf->pdev;
1112 	struct fw_info_t fw_info;
1113 	int ret;
1114 
1115 	/*
1116 	 * We don't create engine groups if it was already
1117 	 * made (when user enabled VFs for the first time)
1118 	 */
1119 	if (eng_grps->is_grps_created)
1120 		return 0;
1121 
1122 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1123 	if (ret)
1124 		return ret;
1125 
1126 	/*
1127 	 * Create engine group with SE engines for kernel
1128 	 * crypto functionality (symmetric crypto)
1129 	 */
1130 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1131 	if (uc_info[0] == NULL) {
1132 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1133 		ret = -EINVAL;
1134 		goto release_fw;
1135 	}
1136 	engs[0].type = OTX2_CPT_SE_TYPES;
1137 	engs[0].count = eng_grps->avail.max_se_cnt;
1138 
1139 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1140 				  (void **) uc_info, 1);
1141 	if (ret)
1142 		goto release_fw;
1143 
1144 	/*
1145 	 * Create engine group with SE+IE engines for IPSec.
1146 	 * All SE engines will be shared with engine group 0.
1147 	 */
1148 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1149 	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1150 
1151 	if (uc_info[1] == NULL) {
1152 		dev_err(&pdev->dev, "Unable to find firmware for IE");
1153 		ret = -EINVAL;
1154 		goto delete_eng_grp;
1155 	}
1156 	engs[0].type = OTX2_CPT_SE_TYPES;
1157 	engs[0].count = eng_grps->avail.max_se_cnt;
1158 	engs[1].type = OTX2_CPT_IE_TYPES;
1159 	engs[1].count = eng_grps->avail.max_ie_cnt;
1160 
1161 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1162 				  (void **) uc_info, 1);
1163 	if (ret)
1164 		goto delete_eng_grp;
1165 
1166 	/*
1167 	 * Create engine group with AE engines for asymmetric
1168 	 * crypto functionality.
1169 	 */
1170 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1171 	if (uc_info[0] == NULL) {
1172 		dev_err(&pdev->dev, "Unable to find firmware for AE");
1173 		ret = -EINVAL;
1174 		goto delete_eng_grp;
1175 	}
1176 	engs[0].type = OTX2_CPT_AE_TYPES;
1177 	engs[0].count = eng_grps->avail.max_ae_cnt;
1178 
1179 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1180 				  (void **) uc_info, 1);
1181 	if (ret)
1182 		goto delete_eng_grp;
1183 
1184 	eng_grps->is_grps_created = true;
1185 
1186 	cpt_ucode_release_fw(&fw_info);
1187 
1188 	if (is_dev_otx2(pdev))
1189 		return 0;
1190 	/*
1191 	 * Configure engine group mask to allow context prefetching
1192 	 * for the groups.
1193 	 */
1194 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1195 			      OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
1196 			      BLKADDR_CPT0);
1197 	/*
1198 	 * Set interval to periodically flush dirty data for the next
1199 	 * CTX cache entry. Set the interval count to maximum supported
1200 	 * value.
1201 	 */
1202 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1203 			      CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1204 	return 0;
1205 
1206 delete_eng_grp:
1207 	delete_engine_grps(pdev, eng_grps);
1208 release_fw:
1209 	cpt_ucode_release_fw(&fw_info);
1210 	return ret;
1211 }
1212 
1213 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1214 				  int blkaddr)
1215 {
1216 	int timeout = 10, ret;
1217 	int i, busy;
1218 	u64 reg;
1219 
1220 	/* Disengage the cores from groups */
1221 	for (i = 0; i < total_cores; i++) {
1222 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1223 						CPT_AF_EXEX_CTL2(i), 0x0,
1224 						blkaddr);
1225 		if (ret)
1226 			return ret;
1227 
1228 		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1229 	}
1230 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1231 	if (ret)
1232 		return ret;
1233 
1234 	/* Wait for cores to become idle */
1235 	do {
1236 		busy = 0;
1237 		usleep_range(10000, 20000);
1238 		if (timeout-- < 0)
1239 			return -EBUSY;
1240 
1241 		for (i = 0; i < total_cores; i++) {
1242 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1243 						   cptpf->pdev,
1244 						   CPT_AF_EXEX_STS(i), &reg,
1245 						   blkaddr);
1246 			if (ret)
1247 				return ret;
1248 
1249 			if (reg & 0x1) {
1250 				busy = 1;
1251 				break;
1252 			}
1253 		}
1254 	} while (busy);
1255 
1256 	/* Disable the cores */
1257 	for (i = 0; i < total_cores; i++) {
1258 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1259 						CPT_AF_EXEX_CTL(i), 0x0,
1260 						blkaddr);
1261 		if (ret)
1262 			return ret;
1263 	}
1264 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1265 }
1266 
1267 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1268 {
1269 	int total_cores, ret;
1270 
1271 	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1272 		      cptpf->eng_grps.avail.max_ie_cnt +
1273 		      cptpf->eng_grps.avail.max_ae_cnt;
1274 
1275 	if (cptpf->has_cpt1) {
1276 		ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1277 		if (ret)
1278 			return ret;
1279 	}
1280 	return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1281 }
1282 
1283 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1284 			       struct otx2_cpt_eng_grps *eng_grps)
1285 {
1286 	struct otx2_cpt_eng_grp_info *grp;
1287 	int i, j;
1288 
1289 	delete_engine_grps(pdev, eng_grps);
1290 	/* Release memory */
1291 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1292 		grp = &eng_grps->grp[i];
1293 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1294 			kfree(grp->engs[j].bmap);
1295 			grp->engs[j].bmap = NULL;
1296 		}
1297 	}
1298 }
1299 
1300 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1301 			   struct otx2_cpt_eng_grps *eng_grps)
1302 {
1303 	struct otx2_cpt_eng_grp_info *grp;
1304 	int i, j, ret;
1305 
1306 	eng_grps->obj = pci_get_drvdata(pdev);
1307 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1308 	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1309 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1310 
1311 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1312 			     eng_grps->avail.max_ie_cnt +
1313 			     eng_grps->avail.max_ae_cnt;
1314 	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1315 		dev_err(&pdev->dev,
1316 			"Number of engines %d > than max supported %d\n",
1317 			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1318 		ret = -EINVAL;
1319 		goto cleanup_eng_grps;
1320 	}
1321 
1322 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1323 		grp = &eng_grps->grp[i];
1324 		grp->g = eng_grps;
1325 		grp->idx = i;
1326 
1327 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1328 			grp->engs[j].bmap =
1329 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1330 					sizeof(long), GFP_KERNEL);
1331 			if (!grp->engs[j].bmap) {
1332 				ret = -ENOMEM;
1333 				goto cleanup_eng_grps;
1334 			}
1335 		}
1336 	}
1337 	return 0;
1338 
1339 cleanup_eng_grps:
1340 	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1341 	return ret;
1342 }
1343 
1344 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1345 					  struct otx2_cpt_eng_grps *eng_grps)
1346 {
1347 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1348 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1349 	struct fw_info_t fw_info;
1350 	int ret;
1351 
1352 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1353 	if (ret)
1354 		return ret;
1355 
1356 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1357 	if (uc_info[0] == NULL) {
1358 		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1359 		ret = -EINVAL;
1360 		goto release_fw;
1361 	}
1362 	engs[0].type = OTX2_CPT_AE_TYPES;
1363 	engs[0].count = 2;
1364 
1365 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1366 				  (void **) uc_info, 0);
1367 	if (ret)
1368 		goto release_fw;
1369 
1370 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1371 	if (uc_info[0] == NULL) {
1372 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1373 		ret = -EINVAL;
1374 		goto delete_eng_grp;
1375 	}
1376 	engs[0].type = OTX2_CPT_SE_TYPES;
1377 	engs[0].count = 2;
1378 
1379 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1380 				  (void **) uc_info, 0);
1381 	if (ret)
1382 		goto delete_eng_grp;
1383 
1384 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1385 	if (uc_info[0] == NULL) {
1386 		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1387 		ret = -EINVAL;
1388 		goto delete_eng_grp;
1389 	}
1390 	engs[0].type = OTX2_CPT_IE_TYPES;
1391 	engs[0].count = 2;
1392 
1393 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1394 				  (void **) uc_info, 0);
1395 	if (ret)
1396 		goto delete_eng_grp;
1397 
1398 	cpt_ucode_release_fw(&fw_info);
1399 	return 0;
1400 
1401 delete_eng_grp:
1402 	delete_engine_grps(pdev, eng_grps);
1403 release_fw:
1404 	cpt_ucode_release_fw(&fw_info);
1405 	return ret;
1406 }
1407 
1408 /*
1409  * Get CPT HW capabilities using LOAD_FVC operation.
1410  */
1411 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1412 {
1413 	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1414 	struct otx2_cpt_iq_command iq_cmd;
1415 	union otx2_cpt_opcode opcode;
1416 	union otx2_cpt_res_s *result;
1417 	union otx2_cpt_inst_s inst;
1418 	dma_addr_t rptr_baddr;
1419 	struct pci_dev *pdev;
1420 	u32 len, compl_rlen;
1421 	int ret, etype;
1422 	void *rptr;
1423 
1424 	/*
1425 	 * We don't get capabilities if it was already done
1426 	 * (when user enabled VFs for the first time)
1427 	 */
1428 	if (cptpf->is_eng_caps_discovered)
1429 		return 0;
1430 
1431 	pdev = cptpf->pdev;
1432 	/*
1433 	 * Create engine groups for each type to submit LOAD_FVC op and
1434 	 * get engine's capabilities.
1435 	 */
1436 	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1437 	if (ret)
1438 		goto delete_grps;
1439 
1440 	lfs->pdev = pdev;
1441 	lfs->reg_base = cptpf->reg_base;
1442 	lfs->mbox = &cptpf->afpf_mbox;
1443 	lfs->blkaddr = BLKADDR_CPT0;
1444 	ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1445 			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1446 	if (ret)
1447 		goto delete_grps;
1448 
1449 	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1450 	len = compl_rlen + LOADFVC_RLEN;
1451 
1452 	result = kzalloc(len, GFP_KERNEL);
1453 	if (!result) {
1454 		ret = -ENOMEM;
1455 		goto lf_cleanup;
1456 	}
1457 	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1458 				    DMA_BIDIRECTIONAL);
1459 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1460 		dev_err(&pdev->dev, "DMA mapping failed\n");
1461 		ret = -EFAULT;
1462 		goto free_result;
1463 	}
1464 	rptr = (u8 *)result + compl_rlen;
1465 
1466 	/* Fill in the command */
1467 	opcode.s.major = LOADFVC_MAJOR_OP;
1468 	opcode.s.minor = LOADFVC_MINOR_OP;
1469 
1470 	iq_cmd.cmd.u = 0;
1471 	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1472 
1473 	/* 64-bit swap for microcode data reads, not needed for addresses */
1474 	cpu_to_be64s(&iq_cmd.cmd.u);
1475 	iq_cmd.dptr = 0;
1476 	iq_cmd.rptr = rptr_baddr + compl_rlen;
1477 	iq_cmd.cptr.u = 0;
1478 
1479 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1480 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1481 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1482 							 etype);
1483 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1484 		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1485 
1486 		while (lfs->ops->cpt_get_compcode(result) ==
1487 						OTX2_CPT_COMPLETION_CODE_INIT)
1488 			cpu_relax();
1489 
1490 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1491 	}
1492 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1493 	cptpf->is_eng_caps_discovered = true;
1494 
1495 free_result:
1496 	kfree(result);
1497 lf_cleanup:
1498 	otx2_cptlf_shutdown(&cptpf->lfs);
1499 delete_grps:
1500 	delete_engine_grps(pdev, &cptpf->eng_grps);
1501 
1502 	return ret;
1503 }
1504