1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include "otx2_cptpf_ucode.h"
7 #include "otx2_cpt_common.h"
8 #include "otx2_cptpf.h"
9 #include "otx2_cptlf.h"
10 #include "otx2_cpt_reqmgr.h"
11 #include "rvu_reg.h"
12 
13 #define CSR_DELAY 30
14 
15 #define LOADFVC_RLEN 8
16 #define LOADFVC_MAJOR_OP 0x01
17 #define LOADFVC_MINOR_OP 0x08
18 
19 #define CTX_FLUSH_TIMER_CNT 0xFFFFFF
20 
21 struct fw_info_t {
22 	struct list_head ucodes;
23 };
24 
25 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
26 					struct otx2_cpt_eng_grp_info *eng_grp)
27 {
28 	struct otx2_cpt_bitmap bmap = { {0} };
29 	bool found = false;
30 	int i;
31 
32 	if (eng_grp->g->engs_num < 0 ||
33 	    eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
34 		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
35 			eng_grp->g->engs_num);
36 		return bmap;
37 	}
38 
39 	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
40 		if (eng_grp->engs[i].type) {
41 			bitmap_or(bmap.bits, bmap.bits,
42 				  eng_grp->engs[i].bmap,
43 				  eng_grp->g->engs_num);
44 			bmap.size = eng_grp->g->engs_num;
45 			found = true;
46 		}
47 	}
48 
49 	if (!found)
50 		dev_err(dev, "No engines reserved for engine group %d\n",
51 			eng_grp->idx);
52 	return bmap;
53 }
54 
55 static int is_eng_type(int val, int eng_type)
56 {
57 	return val & (1 << eng_type);
58 }
59 
60 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
61 {
62 	if (eng_grp->ucode[1].type)
63 		return true;
64 	else
65 		return false;
66 }
67 
68 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
69 			       const char *filename)
70 {
71 	strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
72 }
73 
74 static char *get_eng_type_str(int eng_type)
75 {
76 	char *str = "unknown";
77 
78 	switch (eng_type) {
79 	case OTX2_CPT_SE_TYPES:
80 		str = "SE";
81 		break;
82 
83 	case OTX2_CPT_IE_TYPES:
84 		str = "IE";
85 		break;
86 
87 	case OTX2_CPT_AE_TYPES:
88 		str = "AE";
89 		break;
90 	}
91 	return str;
92 }
93 
94 static char *get_ucode_type_str(int ucode_type)
95 {
96 	char *str = "unknown";
97 
98 	switch (ucode_type) {
99 	case (1 << OTX2_CPT_SE_TYPES):
100 		str = "SE";
101 		break;
102 
103 	case (1 << OTX2_CPT_IE_TYPES):
104 		str = "IE";
105 		break;
106 
107 	case (1 << OTX2_CPT_AE_TYPES):
108 		str = "AE";
109 		break;
110 
111 	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
112 		str = "SE+IPSEC";
113 		break;
114 	}
115 	return str;
116 }
117 
118 static int get_ucode_type(struct device *dev,
119 			  struct otx2_cpt_ucode_hdr *ucode_hdr,
120 			  int *ucode_type)
121 {
122 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
123 	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
124 	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
125 	struct pci_dev *pdev = cptpf->pdev;
126 	int i, val = 0;
127 	u8 nn;
128 
129 	strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
130 	for (i = 0; i < strlen(tmp_ver_str); i++)
131 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
132 
133 	sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
134 	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
135 		return -EINVAL;
136 
137 	nn = ucode_hdr->ver_num.nn;
138 	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
139 	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
140 	     nn == OTX2_CPT_SE_UC_TYPE3))
141 		val |= 1 << OTX2_CPT_SE_TYPES;
142 	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
143 	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
144 	     nn == OTX2_CPT_IE_UC_TYPE3))
145 		val |= 1 << OTX2_CPT_IE_TYPES;
146 	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
147 	    nn == OTX2_CPT_AE_UC_TYPE)
148 		val |= 1 << OTX2_CPT_AE_TYPES;
149 
150 	*ucode_type = val;
151 
152 	if (!val)
153 		return -EINVAL;
154 
155 	return 0;
156 }
157 
158 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
159 			      dma_addr_t dma_addr, int blkaddr)
160 {
161 	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
162 				     CPT_AF_EXEX_UCODE_BASE(eng),
163 				     (u64)dma_addr, blkaddr);
164 }
165 
166 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
167 			       struct otx2_cptpf_dev *cptpf, int blkaddr)
168 {
169 	struct otx2_cpt_engs_rsvd *engs;
170 	dma_addr_t dma_addr;
171 	int i, bit, ret;
172 
173 	/* Set PF number for microcode fetches */
174 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
175 				    CPT_AF_PF_FUNC,
176 				    cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
177 	if (ret)
178 		return ret;
179 
180 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
181 		engs = &eng_grp->engs[i];
182 		if (!engs->type)
183 			continue;
184 
185 		dma_addr = engs->ucode->dma;
186 
187 		/*
188 		 * Set UCODE_BASE only for the cores which are not used,
189 		 * other cores should have already valid UCODE_BASE set
190 		 */
191 		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
192 			if (!eng_grp->g->eng_ref_cnt[bit]) {
193 				ret = __write_ucode_base(cptpf, bit, dma_addr,
194 							 blkaddr);
195 				if (ret)
196 					return ret;
197 			}
198 	}
199 	return 0;
200 }
201 
202 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
203 {
204 	struct otx2_cptpf_dev *cptpf = obj;
205 	int ret;
206 
207 	if (cptpf->has_cpt1) {
208 		ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
209 		if (ret)
210 			return ret;
211 	}
212 	return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
213 }
214 
215 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
216 					 struct otx2_cptpf_dev *cptpf,
217 					 struct otx2_cpt_bitmap bmap,
218 					 int blkaddr)
219 {
220 	int i, timeout = 10;
221 	int busy, ret;
222 	u64 reg = 0;
223 
224 	/* Detach the cores from group */
225 	for_each_set_bit(i, bmap.bits, bmap.size) {
226 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
227 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
228 		if (ret)
229 			return ret;
230 
231 		if (reg & (1ull << eng_grp->idx)) {
232 			eng_grp->g->eng_ref_cnt[i]--;
233 			reg &= ~(1ull << eng_grp->idx);
234 
235 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
236 						    cptpf->pdev,
237 						    CPT_AF_EXEX_CTL2(i), reg,
238 						    blkaddr);
239 			if (ret)
240 				return ret;
241 		}
242 	}
243 
244 	/* Wait for cores to become idle */
245 	do {
246 		busy = 0;
247 		usleep_range(10000, 20000);
248 		if (timeout-- < 0)
249 			return -EBUSY;
250 
251 		for_each_set_bit(i, bmap.bits, bmap.size) {
252 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
253 						   cptpf->pdev,
254 						   CPT_AF_EXEX_STS(i), &reg,
255 						   blkaddr);
256 			if (ret)
257 				return ret;
258 
259 			if (reg & 0x1) {
260 				busy = 1;
261 				break;
262 			}
263 		}
264 	} while (busy);
265 
266 	/* Disable the cores only if they are not used anymore */
267 	for_each_set_bit(i, bmap.bits, bmap.size) {
268 		if (!eng_grp->g->eng_ref_cnt[i]) {
269 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
270 						    cptpf->pdev,
271 						    CPT_AF_EXEX_CTL(i), 0x0,
272 						    blkaddr);
273 			if (ret)
274 				return ret;
275 		}
276 	}
277 
278 	return 0;
279 }
280 
281 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
282 					void *obj)
283 {
284 	struct otx2_cptpf_dev *cptpf = obj;
285 	struct otx2_cpt_bitmap bmap;
286 	int ret;
287 
288 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
289 	if (!bmap.size)
290 		return -EINVAL;
291 
292 	if (cptpf->has_cpt1) {
293 		ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
294 						    BLKADDR_CPT1);
295 		if (ret)
296 			return ret;
297 	}
298 	return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
299 					     BLKADDR_CPT0);
300 }
301 
302 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
303 					struct otx2_cptpf_dev *cptpf,
304 					struct otx2_cpt_bitmap bmap,
305 					int blkaddr)
306 {
307 	u64 reg = 0;
308 	int i, ret;
309 
310 	/* Attach the cores to the group */
311 	for_each_set_bit(i, bmap.bits, bmap.size) {
312 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
313 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
314 		if (ret)
315 			return ret;
316 
317 		if (!(reg & (1ull << eng_grp->idx))) {
318 			eng_grp->g->eng_ref_cnt[i]++;
319 			reg |= 1ull << eng_grp->idx;
320 
321 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
322 						    cptpf->pdev,
323 						    CPT_AF_EXEX_CTL2(i), reg,
324 						    blkaddr);
325 			if (ret)
326 				return ret;
327 		}
328 	}
329 
330 	/* Enable the cores */
331 	for_each_set_bit(i, bmap.bits, bmap.size) {
332 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
333 						CPT_AF_EXEX_CTL(i), 0x1,
334 						blkaddr);
335 		if (ret)
336 			return ret;
337 	}
338 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
339 }
340 
341 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
342 				       void *obj)
343 {
344 	struct otx2_cptpf_dev *cptpf = obj;
345 	struct otx2_cpt_bitmap bmap;
346 	int ret;
347 
348 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
349 	if (!bmap.size)
350 		return -EINVAL;
351 
352 	if (cptpf->has_cpt1) {
353 		ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
354 						   BLKADDR_CPT1);
355 		if (ret)
356 			return ret;
357 	}
358 	return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
359 }
360 
361 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
362 		   char *filename)
363 {
364 	struct otx2_cpt_ucode_hdr *ucode_hdr;
365 	struct otx2_cpt_uc_info_t *uc_info;
366 	int ucode_type, ucode_size;
367 	int ret;
368 
369 	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
370 	if (!uc_info)
371 		return -ENOMEM;
372 
373 	ret = request_firmware(&uc_info->fw, filename, dev);
374 	if (ret)
375 		goto free_uc_info;
376 
377 	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
378 	ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
379 	if (ret)
380 		goto release_fw;
381 
382 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
383 	if (!ucode_size) {
384 		dev_err(dev, "Ucode %s invalid size\n", filename);
385 		ret = -EINVAL;
386 		goto release_fw;
387 	}
388 
389 	set_ucode_filename(&uc_info->ucode, filename);
390 	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
391 	       OTX2_CPT_UCODE_VER_STR_SZ);
392 	uc_info->ucode.ver_num = ucode_hdr->ver_num;
393 	uc_info->ucode.type = ucode_type;
394 	uc_info->ucode.size = ucode_size;
395 	list_add_tail(&uc_info->list, &fw_info->ucodes);
396 
397 	return 0;
398 
399 release_fw:
400 	release_firmware(uc_info->fw);
401 free_uc_info:
402 	kfree(uc_info);
403 	return ret;
404 }
405 
406 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
407 {
408 	struct otx2_cpt_uc_info_t *curr, *temp;
409 
410 	if (!fw_info)
411 		return;
412 
413 	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
414 		list_del(&curr->list);
415 		release_firmware(curr->fw);
416 		kfree(curr);
417 	}
418 }
419 
420 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
421 					    int ucode_type)
422 {
423 	struct otx2_cpt_uc_info_t *curr;
424 
425 	list_for_each_entry(curr, &fw_info->ucodes, list) {
426 		if (!is_eng_type(curr->ucode.type, ucode_type))
427 			continue;
428 
429 		return curr;
430 	}
431 	return NULL;
432 }
433 
434 static void print_uc_info(struct fw_info_t *fw_info)
435 {
436 	struct otx2_cpt_uc_info_t *curr;
437 
438 	list_for_each_entry(curr, &fw_info->ucodes, list) {
439 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
440 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
441 		pr_debug("Ucode version %d.%d.%d.%d\n",
442 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
443 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
444 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
445 			 get_ucode_type_str(curr->ucode.type));
446 		pr_debug("Ucode size %d\n", curr->ucode.size);
447 		pr_debug("Ucode ptr %p\n", curr->fw->data);
448 	}
449 }
450 
451 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
452 {
453 	char filename[OTX2_CPT_NAME_LENGTH];
454 	char eng_type[8] = {0};
455 	int ret, e, i;
456 
457 	INIT_LIST_HEAD(&fw_info->ucodes);
458 
459 	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
460 		strcpy(eng_type, get_eng_type_str(e));
461 		for (i = 0; i < strlen(eng_type); i++)
462 			eng_type[i] = tolower(eng_type[i]);
463 
464 		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
465 			 pdev->revision, eng_type);
466 		/* Request firmware for each engine type */
467 		ret = load_fw(&pdev->dev, fw_info, filename);
468 		if (ret)
469 			goto release_fw;
470 	}
471 	print_uc_info(fw_info);
472 	return 0;
473 
474 release_fw:
475 	cpt_ucode_release_fw(fw_info);
476 	return ret;
477 }
478 
479 static struct otx2_cpt_engs_rsvd *find_engines_by_type(
480 					struct otx2_cpt_eng_grp_info *eng_grp,
481 					int eng_type)
482 {
483 	int i;
484 
485 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
486 		if (!eng_grp->engs[i].type)
487 			continue;
488 
489 		if (eng_grp->engs[i].type == eng_type)
490 			return &eng_grp->engs[i];
491 	}
492 	return NULL;
493 }
494 
495 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
496 				int eng_type)
497 {
498 	struct otx2_cpt_engs_rsvd *engs;
499 
500 	engs = find_engines_by_type(eng_grp, eng_type);
501 
502 	return (engs != NULL ? 1 : 0);
503 }
504 
505 static int update_engines_avail_count(struct device *dev,
506 				      struct otx2_cpt_engs_available *avail,
507 				      struct otx2_cpt_engs_rsvd *engs, int val)
508 {
509 	switch (engs->type) {
510 	case OTX2_CPT_SE_TYPES:
511 		avail->se_cnt += val;
512 		break;
513 
514 	case OTX2_CPT_IE_TYPES:
515 		avail->ie_cnt += val;
516 		break;
517 
518 	case OTX2_CPT_AE_TYPES:
519 		avail->ae_cnt += val;
520 		break;
521 
522 	default:
523 		dev_err(dev, "Invalid engine type %d\n", engs->type);
524 		return -EINVAL;
525 	}
526 	return 0;
527 }
528 
529 static int update_engines_offset(struct device *dev,
530 				 struct otx2_cpt_engs_available *avail,
531 				 struct otx2_cpt_engs_rsvd *engs)
532 {
533 	switch (engs->type) {
534 	case OTX2_CPT_SE_TYPES:
535 		engs->offset = 0;
536 		break;
537 
538 	case OTX2_CPT_IE_TYPES:
539 		engs->offset = avail->max_se_cnt;
540 		break;
541 
542 	case OTX2_CPT_AE_TYPES:
543 		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
544 		break;
545 
546 	default:
547 		dev_err(dev, "Invalid engine type %d\n", engs->type);
548 		return -EINVAL;
549 	}
550 	return 0;
551 }
552 
553 static int release_engines(struct device *dev,
554 			   struct otx2_cpt_eng_grp_info *grp)
555 {
556 	int i, ret = 0;
557 
558 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
559 		if (!grp->engs[i].type)
560 			continue;
561 
562 		if (grp->engs[i].count > 0) {
563 			ret = update_engines_avail_count(dev, &grp->g->avail,
564 							 &grp->engs[i],
565 							 grp->engs[i].count);
566 			if (ret)
567 				return ret;
568 		}
569 
570 		grp->engs[i].type = 0;
571 		grp->engs[i].count = 0;
572 		grp->engs[i].offset = 0;
573 		grp->engs[i].ucode = NULL;
574 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
575 	}
576 	return 0;
577 }
578 
579 static int do_reserve_engines(struct device *dev,
580 			      struct otx2_cpt_eng_grp_info *grp,
581 			      struct otx2_cpt_engines *req_engs)
582 {
583 	struct otx2_cpt_engs_rsvd *engs = NULL;
584 	int i, ret;
585 
586 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
587 		if (!grp->engs[i].type) {
588 			engs = &grp->engs[i];
589 			break;
590 		}
591 	}
592 
593 	if (!engs)
594 		return -ENOMEM;
595 
596 	engs->type = req_engs->type;
597 	engs->count = req_engs->count;
598 
599 	ret = update_engines_offset(dev, &grp->g->avail, engs);
600 	if (ret)
601 		return ret;
602 
603 	if (engs->count > 0) {
604 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
605 						 -engs->count);
606 		if (ret)
607 			return ret;
608 	}
609 
610 	return 0;
611 }
612 
613 static int check_engines_availability(struct device *dev,
614 				      struct otx2_cpt_eng_grp_info *grp,
615 				      struct otx2_cpt_engines *req_eng)
616 {
617 	int avail_cnt = 0;
618 
619 	switch (req_eng->type) {
620 	case OTX2_CPT_SE_TYPES:
621 		avail_cnt = grp->g->avail.se_cnt;
622 		break;
623 
624 	case OTX2_CPT_IE_TYPES:
625 		avail_cnt = grp->g->avail.ie_cnt;
626 		break;
627 
628 	case OTX2_CPT_AE_TYPES:
629 		avail_cnt = grp->g->avail.ae_cnt;
630 		break;
631 
632 	default:
633 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
634 		return -EINVAL;
635 	}
636 
637 	if (avail_cnt < req_eng->count) {
638 		dev_err(dev,
639 			"Error available %s engines %d < than requested %d\n",
640 			get_eng_type_str(req_eng->type),
641 			avail_cnt, req_eng->count);
642 		return -EBUSY;
643 	}
644 	return 0;
645 }
646 
647 static int reserve_engines(struct device *dev,
648 			   struct otx2_cpt_eng_grp_info *grp,
649 			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
650 {
651 	int i, ret = 0;
652 
653 	/* Validate if a number of requested engines are available */
654 	for (i = 0; i < ucodes_cnt; i++) {
655 		ret = check_engines_availability(dev, grp, &req_engs[i]);
656 		if (ret)
657 			return ret;
658 	}
659 
660 	/* Reserve requested engines for this engine group */
661 	for (i = 0; i < ucodes_cnt; i++) {
662 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
663 		if (ret)
664 			return ret;
665 	}
666 	return 0;
667 }
668 
669 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
670 {
671 	if (ucode->va) {
672 		dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
673 				  ucode->dma);
674 		ucode->va = NULL;
675 		ucode->dma = 0;
676 		ucode->size = 0;
677 	}
678 
679 	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
680 	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
681 	set_ucode_filename(ucode, "");
682 	ucode->type = 0;
683 }
684 
685 static int copy_ucode_to_dma_mem(struct device *dev,
686 				 struct otx2_cpt_ucode *ucode,
687 				 const u8 *ucode_data)
688 {
689 	u32 i;
690 
691 	/*  Allocate DMAable space */
692 	ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
693 				       GFP_KERNEL);
694 	if (!ucode->va)
695 		return -ENOMEM;
696 
697 	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
698 	       ucode->size);
699 
700 	/* Byte swap 64-bit */
701 	for (i = 0; i < (ucode->size / 8); i++)
702 		cpu_to_be64s(&((u64 *)ucode->va)[i]);
703 	/*  Ucode needs 16-bit swap */
704 	for (i = 0; i < (ucode->size / 2); i++)
705 		cpu_to_be16s(&((u16 *)ucode->va)[i]);
706 	return 0;
707 }
708 
709 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
710 			  void *obj)
711 {
712 	int ret;
713 
714 	/* Point microcode to each core of the group */
715 	ret = cpt_set_ucode_base(eng_grp, obj);
716 	if (ret)
717 		return ret;
718 
719 	/* Attach the cores to the group and enable them */
720 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
721 
722 	return ret;
723 }
724 
725 static int disable_eng_grp(struct device *dev,
726 			   struct otx2_cpt_eng_grp_info *eng_grp,
727 			   void *obj)
728 {
729 	int i, ret;
730 
731 	/* Disable all engines used by this group */
732 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
733 	if (ret)
734 		return ret;
735 
736 	/* Unload ucode used by this engine group */
737 	ucode_unload(dev, &eng_grp->ucode[0]);
738 	ucode_unload(dev, &eng_grp->ucode[1]);
739 
740 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
741 		if (!eng_grp->engs[i].type)
742 			continue;
743 
744 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
745 	}
746 
747 	/* Clear UCODE_BASE register for each engine used by this group */
748 	ret = cpt_set_ucode_base(eng_grp, obj);
749 
750 	return ret;
751 }
752 
753 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
754 				    struct otx2_cpt_eng_grp_info *src_grp)
755 {
756 	/* Setup fields for engine group which is mirrored */
757 	src_grp->mirror.is_ena = false;
758 	src_grp->mirror.idx = 0;
759 	src_grp->mirror.ref_count++;
760 
761 	/* Setup fields for mirroring engine group */
762 	dst_grp->mirror.is_ena = true;
763 	dst_grp->mirror.idx = src_grp->idx;
764 	dst_grp->mirror.ref_count = 0;
765 }
766 
767 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
768 {
769 	struct otx2_cpt_eng_grp_info *src_grp;
770 
771 	if (!dst_grp->mirror.is_ena)
772 		return;
773 
774 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
775 
776 	src_grp->mirror.ref_count--;
777 	dst_grp->mirror.is_ena = false;
778 	dst_grp->mirror.idx = 0;
779 	dst_grp->mirror.ref_count = 0;
780 }
781 
782 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
783 				  struct otx2_cpt_engines *engs, int engs_cnt)
784 {
785 	struct otx2_cpt_engs_rsvd *mirrored_engs;
786 	int i;
787 
788 	for (i = 0; i < engs_cnt; i++) {
789 		mirrored_engs = find_engines_by_type(mirror_eng_grp,
790 						     engs[i].type);
791 		if (!mirrored_engs)
792 			continue;
793 
794 		/*
795 		 * If mirrored group has this type of engines attached then
796 		 * there are 3 scenarios possible:
797 		 * 1) mirrored_engs.count == engs[i].count then all engines
798 		 * from mirrored engine group will be shared with this engine
799 		 * group
800 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
801 		 * engines from mirrored engine group will be shared with this
802 		 * engine group
803 		 * 3) mirrored_engs.count < engs[i].count then all engines
804 		 * from mirrored engine group will be shared with this group
805 		 * and additional engines will be reserved for exclusively use
806 		 * by this engine group
807 		 */
808 		engs[i].count -= mirrored_engs->count;
809 	}
810 }
811 
812 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
813 					struct otx2_cpt_eng_grp_info *grp)
814 {
815 	struct otx2_cpt_eng_grps *eng_grps = grp->g;
816 	int i;
817 
818 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
819 		if (!eng_grps->grp[i].is_enabled)
820 			continue;
821 		if (eng_grps->grp[i].ucode[0].type &&
822 		    eng_grps->grp[i].ucode[1].type)
823 			continue;
824 		if (grp->idx == i)
825 			continue;
826 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
827 				 grp->ucode[0].ver_str,
828 				 OTX2_CPT_UCODE_VER_STR_SZ))
829 			return &eng_grps->grp[i];
830 	}
831 
832 	return NULL;
833 }
834 
835 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
836 					struct otx2_cpt_eng_grps *eng_grps)
837 {
838 	int i;
839 
840 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
841 		if (!eng_grps->grp[i].is_enabled)
842 			return &eng_grps->grp[i];
843 	}
844 	return NULL;
845 }
846 
847 static int eng_grp_update_masks(struct device *dev,
848 				struct otx2_cpt_eng_grp_info *eng_grp)
849 {
850 	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
851 	struct otx2_cpt_bitmap tmp_bmap = { {0} };
852 	int i, j, cnt, max_cnt;
853 	int bit;
854 
855 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
856 		engs = &eng_grp->engs[i];
857 		if (!engs->type)
858 			continue;
859 		if (engs->count <= 0)
860 			continue;
861 
862 		switch (engs->type) {
863 		case OTX2_CPT_SE_TYPES:
864 			max_cnt = eng_grp->g->avail.max_se_cnt;
865 			break;
866 
867 		case OTX2_CPT_IE_TYPES:
868 			max_cnt = eng_grp->g->avail.max_ie_cnt;
869 			break;
870 
871 		case OTX2_CPT_AE_TYPES:
872 			max_cnt = eng_grp->g->avail.max_ae_cnt;
873 			break;
874 
875 		default:
876 			dev_err(dev, "Invalid engine type %d\n", engs->type);
877 			return -EINVAL;
878 		}
879 
880 		cnt = engs->count;
881 		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
882 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
883 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
884 			if (!eng_grp->g->eng_ref_cnt[j]) {
885 				bitmap_set(tmp_bmap.bits, j, 1);
886 				cnt--;
887 				if (!cnt)
888 					break;
889 			}
890 		}
891 
892 		if (cnt)
893 			return -ENOSPC;
894 
895 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
896 	}
897 
898 	if (!eng_grp->mirror.is_ena)
899 		return 0;
900 
901 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
902 		engs = &eng_grp->engs[i];
903 		if (!engs->type)
904 			continue;
905 
906 		mirrored_engs = find_engines_by_type(
907 					&eng_grp->g->grp[eng_grp->mirror.idx],
908 					engs->type);
909 		WARN_ON(!mirrored_engs && engs->count <= 0);
910 		if (!mirrored_engs)
911 			continue;
912 
913 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
914 			    eng_grp->g->engs_num);
915 		if (engs->count < 0) {
916 			bit = find_first_bit(mirrored_engs->bmap,
917 					     eng_grp->g->engs_num);
918 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
919 		}
920 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
921 			  eng_grp->g->engs_num);
922 	}
923 	return 0;
924 }
925 
926 static int delete_engine_group(struct device *dev,
927 			       struct otx2_cpt_eng_grp_info *eng_grp)
928 {
929 	int ret;
930 
931 	if (!eng_grp->is_enabled)
932 		return 0;
933 
934 	if (eng_grp->mirror.ref_count)
935 		return -EINVAL;
936 
937 	/* Removing engine group mirroring if enabled */
938 	remove_eng_grp_mirroring(eng_grp);
939 
940 	/* Disable engine group */
941 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
942 	if (ret)
943 		return ret;
944 
945 	/* Release all engines held by this engine group */
946 	ret = release_engines(dev, eng_grp);
947 	if (ret)
948 		return ret;
949 
950 	eng_grp->is_enabled = false;
951 
952 	return 0;
953 }
954 
955 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
956 {
957 	struct otx2_cpt_ucode *ucode;
958 
959 	if (eng_grp->mirror.is_ena)
960 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
961 	else
962 		ucode = &eng_grp->ucode[0];
963 	WARN_ON(!eng_grp->engs[0].type);
964 	eng_grp->engs[0].ucode = ucode;
965 
966 	if (eng_grp->engs[1].type) {
967 		if (is_2nd_ucode_used(eng_grp))
968 			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
969 		else
970 			eng_grp->engs[1].ucode = ucode;
971 	}
972 }
973 
974 static int create_engine_group(struct device *dev,
975 			       struct otx2_cpt_eng_grps *eng_grps,
976 			       struct otx2_cpt_engines *engs, int ucodes_cnt,
977 			       void *ucode_data[], int is_print)
978 {
979 	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
980 	struct otx2_cpt_eng_grp_info *eng_grp;
981 	struct otx2_cpt_uc_info_t *uc_info;
982 	int i, ret = 0;
983 
984 	/* Find engine group which is not used */
985 	eng_grp = find_unused_eng_grp(eng_grps);
986 	if (!eng_grp) {
987 		dev_err(dev, "Error all engine groups are being used\n");
988 		return -ENOSPC;
989 	}
990 	/* Load ucode */
991 	for (i = 0; i < ucodes_cnt; i++) {
992 		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
993 		eng_grp->ucode[i] = uc_info->ucode;
994 		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
995 					    uc_info->fw->data);
996 		if (ret)
997 			goto unload_ucode;
998 	}
999 
1000 	/* Check if this group mirrors another existing engine group */
1001 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1002 	if (mirrored_eng_grp) {
1003 		/* Setup mirroring */
1004 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1005 
1006 		/*
1007 		 * Update count of requested engines because some
1008 		 * of them might be shared with mirrored group
1009 		 */
1010 		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1011 	}
1012 	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1013 	if (ret)
1014 		goto unload_ucode;
1015 
1016 	/* Update ucode pointers used by engines */
1017 	update_ucode_ptrs(eng_grp);
1018 
1019 	/* Update engine masks used by this group */
1020 	ret = eng_grp_update_masks(dev, eng_grp);
1021 	if (ret)
1022 		goto release_engs;
1023 
1024 	/* Enable engine group */
1025 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1026 	if (ret)
1027 		goto release_engs;
1028 
1029 	/*
1030 	 * If this engine group mirrors another engine group
1031 	 * then we need to unload ucode as we will use ucode
1032 	 * from mirrored engine group
1033 	 */
1034 	if (eng_grp->mirror.is_ena)
1035 		ucode_unload(dev, &eng_grp->ucode[0]);
1036 
1037 	eng_grp->is_enabled = true;
1038 
1039 	if (!is_print)
1040 		return 0;
1041 
1042 	if (mirrored_eng_grp)
1043 		dev_info(dev,
1044 			 "Engine_group%d: reuse microcode %s from group %d\n",
1045 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1046 			 mirrored_eng_grp->idx);
1047 	else
1048 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1049 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1050 	if (is_2nd_ucode_used(eng_grp))
1051 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1052 			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1053 
1054 	return 0;
1055 
1056 release_engs:
1057 	release_engines(dev, eng_grp);
1058 unload_ucode:
1059 	ucode_unload(dev, &eng_grp->ucode[0]);
1060 	ucode_unload(dev, &eng_grp->ucode[1]);
1061 	return ret;
1062 }
1063 
1064 static void delete_engine_grps(struct pci_dev *pdev,
1065 			       struct otx2_cpt_eng_grps *eng_grps)
1066 {
1067 	int i;
1068 
1069 	/* First delete all mirroring engine groups */
1070 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1071 		if (eng_grps->grp[i].mirror.is_ena)
1072 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1073 
1074 	/* Delete remaining engine groups */
1075 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1076 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1077 }
1078 
1079 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1080 {
1081 
1082 	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1083 	struct otx2_cpt_eng_grp_info *grp;
1084 	int i;
1085 
1086 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1087 		grp = &eng_grps->grp[i];
1088 		if (!grp->is_enabled)
1089 			continue;
1090 
1091 		if (eng_type == OTX2_CPT_SE_TYPES) {
1092 			if (eng_grp_has_eng_type(grp, eng_type) &&
1093 			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1094 				eng_grp_num = i;
1095 				break;
1096 			}
1097 		} else {
1098 			if (eng_grp_has_eng_type(grp, eng_type)) {
1099 				eng_grp_num = i;
1100 				break;
1101 			}
1102 		}
1103 	}
1104 	return eng_grp_num;
1105 }
1106 
1107 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1108 			     struct otx2_cpt_eng_grps *eng_grps)
1109 {
1110 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1111 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1112 	struct pci_dev *pdev = cptpf->pdev;
1113 	struct fw_info_t fw_info;
1114 	int ret = 0;
1115 
1116 	mutex_lock(&eng_grps->lock);
1117 	/*
1118 	 * We don't create engine groups if it was already
1119 	 * made (when user enabled VFs for the first time)
1120 	 */
1121 	if (eng_grps->is_grps_created)
1122 		goto unlock;
1123 
1124 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1125 	if (ret)
1126 		goto unlock;
1127 
1128 	/*
1129 	 * Create engine group with SE engines for kernel
1130 	 * crypto functionality (symmetric crypto)
1131 	 */
1132 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1133 	if (uc_info[0] == NULL) {
1134 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1135 		ret = -EINVAL;
1136 		goto release_fw;
1137 	}
1138 	engs[0].type = OTX2_CPT_SE_TYPES;
1139 	engs[0].count = eng_grps->avail.max_se_cnt;
1140 
1141 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1142 				  (void **) uc_info, 1);
1143 	if (ret)
1144 		goto release_fw;
1145 
1146 	/*
1147 	 * Create engine group with SE+IE engines for IPSec.
1148 	 * All SE engines will be shared with engine group 0.
1149 	 */
1150 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1151 	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1152 
1153 	if (uc_info[1] == NULL) {
1154 		dev_err(&pdev->dev, "Unable to find firmware for IE");
1155 		ret = -EINVAL;
1156 		goto delete_eng_grp;
1157 	}
1158 	engs[0].type = OTX2_CPT_SE_TYPES;
1159 	engs[0].count = eng_grps->avail.max_se_cnt;
1160 	engs[1].type = OTX2_CPT_IE_TYPES;
1161 	engs[1].count = eng_grps->avail.max_ie_cnt;
1162 
1163 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1164 				  (void **) uc_info, 1);
1165 	if (ret)
1166 		goto delete_eng_grp;
1167 
1168 	/*
1169 	 * Create engine group with AE engines for asymmetric
1170 	 * crypto functionality.
1171 	 */
1172 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1173 	if (uc_info[0] == NULL) {
1174 		dev_err(&pdev->dev, "Unable to find firmware for AE");
1175 		ret = -EINVAL;
1176 		goto delete_eng_grp;
1177 	}
1178 	engs[0].type = OTX2_CPT_AE_TYPES;
1179 	engs[0].count = eng_grps->avail.max_ae_cnt;
1180 
1181 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1182 				  (void **) uc_info, 1);
1183 	if (ret)
1184 		goto delete_eng_grp;
1185 
1186 	eng_grps->is_grps_created = true;
1187 
1188 	cpt_ucode_release_fw(&fw_info);
1189 
1190 	if (is_dev_otx2(pdev))
1191 		goto unlock;
1192 	/*
1193 	 * Configure engine group mask to allow context prefetching
1194 	 * for the groups.
1195 	 */
1196 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1197 			      OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
1198 			      BLKADDR_CPT0);
1199 	/*
1200 	 * Set interval to periodically flush dirty data for the next
1201 	 * CTX cache entry. Set the interval count to maximum supported
1202 	 * value.
1203 	 */
1204 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1205 			      CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1206 	mutex_unlock(&eng_grps->lock);
1207 	return 0;
1208 
1209 delete_eng_grp:
1210 	delete_engine_grps(pdev, eng_grps);
1211 release_fw:
1212 	cpt_ucode_release_fw(&fw_info);
1213 unlock:
1214 	mutex_unlock(&eng_grps->lock);
1215 	return ret;
1216 }
1217 
1218 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1219 				  int blkaddr)
1220 {
1221 	int timeout = 10, ret;
1222 	int i, busy;
1223 	u64 reg;
1224 
1225 	/* Disengage the cores from groups */
1226 	for (i = 0; i < total_cores; i++) {
1227 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1228 						CPT_AF_EXEX_CTL2(i), 0x0,
1229 						blkaddr);
1230 		if (ret)
1231 			return ret;
1232 
1233 		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1234 	}
1235 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1236 	if (ret)
1237 		return ret;
1238 
1239 	/* Wait for cores to become idle */
1240 	do {
1241 		busy = 0;
1242 		usleep_range(10000, 20000);
1243 		if (timeout-- < 0)
1244 			return -EBUSY;
1245 
1246 		for (i = 0; i < total_cores; i++) {
1247 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1248 						   cptpf->pdev,
1249 						   CPT_AF_EXEX_STS(i), &reg,
1250 						   blkaddr);
1251 			if (ret)
1252 				return ret;
1253 
1254 			if (reg & 0x1) {
1255 				busy = 1;
1256 				break;
1257 			}
1258 		}
1259 	} while (busy);
1260 
1261 	/* Disable the cores */
1262 	for (i = 0; i < total_cores; i++) {
1263 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1264 						CPT_AF_EXEX_CTL(i), 0x0,
1265 						blkaddr);
1266 		if (ret)
1267 			return ret;
1268 	}
1269 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1270 }
1271 
1272 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1273 {
1274 	int total_cores, ret;
1275 
1276 	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1277 		      cptpf->eng_grps.avail.max_ie_cnt +
1278 		      cptpf->eng_grps.avail.max_ae_cnt;
1279 
1280 	if (cptpf->has_cpt1) {
1281 		ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1282 		if (ret)
1283 			return ret;
1284 	}
1285 	return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1286 }
1287 
1288 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1289 			       struct otx2_cpt_eng_grps *eng_grps)
1290 {
1291 	struct otx2_cpt_eng_grp_info *grp;
1292 	int i, j;
1293 
1294 	mutex_lock(&eng_grps->lock);
1295 	delete_engine_grps(pdev, eng_grps);
1296 	/* Release memory */
1297 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1298 		grp = &eng_grps->grp[i];
1299 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1300 			kfree(grp->engs[j].bmap);
1301 			grp->engs[j].bmap = NULL;
1302 		}
1303 	}
1304 	mutex_unlock(&eng_grps->lock);
1305 }
1306 
1307 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1308 			   struct otx2_cpt_eng_grps *eng_grps)
1309 {
1310 	struct otx2_cpt_eng_grp_info *grp;
1311 	int i, j, ret;
1312 
1313 	mutex_init(&eng_grps->lock);
1314 	eng_grps->obj = pci_get_drvdata(pdev);
1315 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1316 	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1317 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1318 
1319 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1320 			     eng_grps->avail.max_ie_cnt +
1321 			     eng_grps->avail.max_ae_cnt;
1322 	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1323 		dev_err(&pdev->dev,
1324 			"Number of engines %d > than max supported %d\n",
1325 			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1326 		ret = -EINVAL;
1327 		goto cleanup_eng_grps;
1328 	}
1329 
1330 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1331 		grp = &eng_grps->grp[i];
1332 		grp->g = eng_grps;
1333 		grp->idx = i;
1334 
1335 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1336 			grp->engs[j].bmap =
1337 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1338 					sizeof(long), GFP_KERNEL);
1339 			if (!grp->engs[j].bmap) {
1340 				ret = -ENOMEM;
1341 				goto cleanup_eng_grps;
1342 			}
1343 		}
1344 	}
1345 	return 0;
1346 
1347 cleanup_eng_grps:
1348 	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1349 	return ret;
1350 }
1351 
1352 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1353 					  struct otx2_cpt_eng_grps *eng_grps)
1354 {
1355 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1356 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1357 	struct fw_info_t fw_info;
1358 	int ret;
1359 
1360 	mutex_lock(&eng_grps->lock);
1361 	ret = cpt_ucode_load_fw(pdev, &fw_info);
1362 	if (ret) {
1363 		mutex_unlock(&eng_grps->lock);
1364 		return ret;
1365 	}
1366 
1367 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1368 	if (uc_info[0] == NULL) {
1369 		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1370 		ret = -EINVAL;
1371 		goto release_fw;
1372 	}
1373 	engs[0].type = OTX2_CPT_AE_TYPES;
1374 	engs[0].count = 2;
1375 
1376 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1377 				  (void **) uc_info, 0);
1378 	if (ret)
1379 		goto release_fw;
1380 
1381 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1382 	if (uc_info[0] == NULL) {
1383 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1384 		ret = -EINVAL;
1385 		goto delete_eng_grp;
1386 	}
1387 	engs[0].type = OTX2_CPT_SE_TYPES;
1388 	engs[0].count = 2;
1389 
1390 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1391 				  (void **) uc_info, 0);
1392 	if (ret)
1393 		goto delete_eng_grp;
1394 
1395 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1396 	if (uc_info[0] == NULL) {
1397 		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1398 		ret = -EINVAL;
1399 		goto delete_eng_grp;
1400 	}
1401 	engs[0].type = OTX2_CPT_IE_TYPES;
1402 	engs[0].count = 2;
1403 
1404 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1405 				  (void **) uc_info, 0);
1406 	if (ret)
1407 		goto delete_eng_grp;
1408 
1409 	cpt_ucode_release_fw(&fw_info);
1410 	mutex_unlock(&eng_grps->lock);
1411 	return 0;
1412 
1413 delete_eng_grp:
1414 	delete_engine_grps(pdev, eng_grps);
1415 release_fw:
1416 	cpt_ucode_release_fw(&fw_info);
1417 	mutex_unlock(&eng_grps->lock);
1418 	return ret;
1419 }
1420 
1421 /*
1422  * Get CPT HW capabilities using LOAD_FVC operation.
1423  */
1424 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1425 {
1426 	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1427 	struct otx2_cpt_iq_command iq_cmd;
1428 	union otx2_cpt_opcode opcode;
1429 	union otx2_cpt_res_s *result;
1430 	union otx2_cpt_inst_s inst;
1431 	dma_addr_t rptr_baddr;
1432 	struct pci_dev *pdev;
1433 	u32 len, compl_rlen;
1434 	int ret, etype;
1435 	void *rptr;
1436 
1437 	/*
1438 	 * We don't get capabilities if it was already done
1439 	 * (when user enabled VFs for the first time)
1440 	 */
1441 	if (cptpf->is_eng_caps_discovered)
1442 		return 0;
1443 
1444 	pdev = cptpf->pdev;
1445 	/*
1446 	 * Create engine groups for each type to submit LOAD_FVC op and
1447 	 * get engine's capabilities.
1448 	 */
1449 	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1450 	if (ret)
1451 		goto delete_grps;
1452 
1453 	lfs->pdev = pdev;
1454 	lfs->reg_base = cptpf->reg_base;
1455 	lfs->mbox = &cptpf->afpf_mbox;
1456 	lfs->blkaddr = BLKADDR_CPT0;
1457 	ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1458 			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1459 	if (ret)
1460 		goto delete_grps;
1461 
1462 	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1463 	len = compl_rlen + LOADFVC_RLEN;
1464 
1465 	result = kzalloc(len, GFP_KERNEL);
1466 	if (!result) {
1467 		ret = -ENOMEM;
1468 		goto lf_cleanup;
1469 	}
1470 	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1471 				    DMA_BIDIRECTIONAL);
1472 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1473 		dev_err(&pdev->dev, "DMA mapping failed\n");
1474 		ret = -EFAULT;
1475 		goto free_result;
1476 	}
1477 	rptr = (u8 *)result + compl_rlen;
1478 
1479 	/* Fill in the command */
1480 	opcode.s.major = LOADFVC_MAJOR_OP;
1481 	opcode.s.minor = LOADFVC_MINOR_OP;
1482 
1483 	iq_cmd.cmd.u = 0;
1484 	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1485 
1486 	/* 64-bit swap for microcode data reads, not needed for addresses */
1487 	cpu_to_be64s(&iq_cmd.cmd.u);
1488 	iq_cmd.dptr = 0;
1489 	iq_cmd.rptr = rptr_baddr + compl_rlen;
1490 	iq_cmd.cptr.u = 0;
1491 
1492 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1493 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1494 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1495 							 etype);
1496 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1497 		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1498 
1499 		while (lfs->ops->cpt_get_compcode(result) ==
1500 						OTX2_CPT_COMPLETION_CODE_INIT)
1501 			cpu_relax();
1502 
1503 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1504 	}
1505 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1506 	cptpf->is_eng_caps_discovered = true;
1507 
1508 free_result:
1509 	kfree(result);
1510 lf_cleanup:
1511 	otx2_cptlf_shutdown(&cptpf->lfs);
1512 delete_grps:
1513 	delete_engine_grps(pdev, &cptpf->eng_grps);
1514 
1515 	return ret;
1516 }
1517 
1518 int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1519 				   struct devlink_param_gset_ctx *ctx)
1520 {
1521 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1522 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1523 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1524 	char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1525 	char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1526 	struct device *dev = &cptpf->pdev->dev;
1527 	char *start, *val, *err_msg, *tmp;
1528 	int grp_idx = 0, ret = -EINVAL;
1529 	bool has_se, has_ie, has_ae;
1530 	struct fw_info_t fw_info;
1531 	int ucode_idx = 0;
1532 
1533 	if (!eng_grps->is_grps_created) {
1534 		dev_err(dev, "Not allowed before creating the default groups\n");
1535 		return -EINVAL;
1536 	}
1537 	err_msg = "Invalid engine group format";
1538 	strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
1539 	start = tmp_buf;
1540 
1541 	has_se = has_ie = has_ae = false;
1542 
1543 	for (;;) {
1544 		val = strsep(&start, ";");
1545 		if (!val)
1546 			break;
1547 		val = strim(val);
1548 		if (!*val)
1549 			continue;
1550 
1551 		if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1552 			if (has_se || ucode_idx)
1553 				goto err_print;
1554 			tmp = strim(strsep(&val, ":"));
1555 			if (!val)
1556 				goto err_print;
1557 			if (strlen(tmp) != 2)
1558 				goto err_print;
1559 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1560 				goto err_print;
1561 			engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1562 			has_se = true;
1563 		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1564 			if (has_ae || ucode_idx)
1565 				goto err_print;
1566 			tmp = strim(strsep(&val, ":"));
1567 			if (!val)
1568 				goto err_print;
1569 			if (strlen(tmp) != 2)
1570 				goto err_print;
1571 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1572 				goto err_print;
1573 			engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1574 			has_ae = true;
1575 		} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1576 			if (has_ie || ucode_idx)
1577 				goto err_print;
1578 			tmp = strim(strsep(&val, ":"));
1579 			if (!val)
1580 				goto err_print;
1581 			if (strlen(tmp) != 2)
1582 				goto err_print;
1583 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1584 				goto err_print;
1585 			engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1586 			has_ie = true;
1587 		} else {
1588 			if (ucode_idx > 1)
1589 				goto err_print;
1590 			if (!strlen(val))
1591 				goto err_print;
1592 			if (strnstr(val, " ", strlen(val)))
1593 				goto err_print;
1594 			ucode_filename[ucode_idx++] = val;
1595 		}
1596 	}
1597 
1598 	/* Validate input parameters */
1599 	if (!(grp_idx && ucode_idx))
1600 		goto err_print;
1601 
1602 	if (ucode_idx > 1 && grp_idx < 2)
1603 		goto err_print;
1604 
1605 	if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1606 		err_msg = "Error max 2 engine types can be attached";
1607 		goto err_print;
1608 	}
1609 
1610 	if (grp_idx > 1) {
1611 		if ((engs[0].type + engs[1].type) !=
1612 		    (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1613 			err_msg = "Only combination of SE+IE engines is allowed";
1614 			goto err_print;
1615 		}
1616 		/* Keep SE engines at zero index */
1617 		if (engs[1].type == OTX2_CPT_SE_TYPES)
1618 			swap(engs[0], engs[1]);
1619 	}
1620 	mutex_lock(&eng_grps->lock);
1621 
1622 	if (cptpf->enabled_vfs) {
1623 		dev_err(dev, "Disable VFs before modifying engine groups\n");
1624 		ret = -EACCES;
1625 		goto err_unlock;
1626 	}
1627 	INIT_LIST_HEAD(&fw_info.ucodes);
1628 	ret = load_fw(dev, &fw_info, ucode_filename[0]);
1629 	if (ret) {
1630 		dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1631 		goto err_unlock;
1632 	}
1633 	if (ucode_idx > 1) {
1634 		ret = load_fw(dev, &fw_info, ucode_filename[1]);
1635 		if (ret) {
1636 			dev_err(dev, "Unable to load firmware %s\n",
1637 				ucode_filename[1]);
1638 			goto release_fw;
1639 		}
1640 	}
1641 	uc_info[0] = get_ucode(&fw_info, engs[0].type);
1642 	if (uc_info[0] == NULL) {
1643 		dev_err(dev, "Unable to find firmware for %s\n",
1644 			get_eng_type_str(engs[0].type));
1645 		ret = -EINVAL;
1646 		goto release_fw;
1647 	}
1648 	if (ucode_idx > 1) {
1649 		uc_info[1] = get_ucode(&fw_info, engs[1].type);
1650 		if (uc_info[1] == NULL) {
1651 			dev_err(dev, "Unable to find firmware for %s\n",
1652 				get_eng_type_str(engs[1].type));
1653 			ret = -EINVAL;
1654 			goto release_fw;
1655 		}
1656 	}
1657 	ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1658 				  (void **)uc_info, 1);
1659 
1660 release_fw:
1661 	cpt_ucode_release_fw(&fw_info);
1662 err_unlock:
1663 	mutex_unlock(&eng_grps->lock);
1664 	return ret;
1665 err_print:
1666 	dev_err(dev, "%s\n", err_msg);
1667 	return ret;
1668 }
1669 
1670 int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1671 				   struct devlink_param_gset_ctx *ctx)
1672 {
1673 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1674 	struct device *dev = &cptpf->pdev->dev;
1675 	char *tmp, *err_msg;
1676 	int egrp;
1677 	int ret;
1678 
1679 	err_msg = "Invalid input string format(ex: egrp:0)";
1680 	if (strncasecmp(ctx->val.vstr, "egrp", 4))
1681 		goto err_print;
1682 	tmp = ctx->val.vstr;
1683 	strsep(&tmp, ":");
1684 	if (!tmp)
1685 		goto err_print;
1686 	if (kstrtoint(tmp, 10, &egrp))
1687 		goto err_print;
1688 
1689 	if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1690 		dev_err(dev, "Invalid engine group %d", egrp);
1691 		return -EINVAL;
1692 	}
1693 	if (!eng_grps->grp[egrp].is_enabled) {
1694 		dev_err(dev, "Error engine_group%d is not configured", egrp);
1695 		return -EINVAL;
1696 	}
1697 	mutex_lock(&eng_grps->lock);
1698 	ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1699 	mutex_unlock(&eng_grps->lock);
1700 
1701 	return ret;
1702 
1703 err_print:
1704 	dev_err(dev, "%s\n", err_msg);
1705 	return -EINVAL;
1706 }
1707 
1708 static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
1709 			  int size, int idx)
1710 {
1711 	struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
1712 	struct otx2_cpt_engs_rsvd *engs;
1713 	int len, i;
1714 
1715 	buf[0] = '\0';
1716 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
1717 		engs = &eng_grp->engs[i];
1718 		if (!engs->type)
1719 			continue;
1720 		if (idx != -1 && idx != i)
1721 			continue;
1722 
1723 		if (eng_grp->mirror.is_ena)
1724 			mirrored_engs = find_engines_by_type(
1725 				&eng_grp->g->grp[eng_grp->mirror.idx],
1726 				engs->type);
1727 		if (i > 0 && idx == -1) {
1728 			len = strlen(buf);
1729 			scnprintf(buf + len, size - len, ", ");
1730 		}
1731 
1732 		len = strlen(buf);
1733 		scnprintf(buf + len, size - len, "%d %s ",
1734 			  mirrored_engs ? engs->count + mirrored_engs->count :
1735 					  engs->count,
1736 			  get_eng_type_str(engs->type));
1737 		if (mirrored_engs) {
1738 			len = strlen(buf);
1739 			scnprintf(buf + len, size - len,
1740 				  "(%d shared with engine_group%d) ",
1741 				  engs->count <= 0 ?
1742 					  engs->count + mirrored_engs->count :
1743 					  mirrored_engs->count,
1744 				  eng_grp->mirror.idx);
1745 		}
1746 	}
1747 }
1748 
1749 void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
1750 {
1751 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1752 	struct otx2_cpt_eng_grp_info *mirrored_grp;
1753 	char engs_info[2 * OTX2_CPT_NAME_LENGTH];
1754 	struct otx2_cpt_eng_grp_info *grp;
1755 	struct otx2_cpt_engs_rsvd *engs;
1756 	int i, j;
1757 
1758 	pr_debug("Engine groups global info");
1759 	pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
1760 		 eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
1761 	pr_debug("free SE %d", eng_grps->avail.se_cnt);
1762 	pr_debug("free IE %d", eng_grps->avail.ie_cnt);
1763 	pr_debug("free AE %d", eng_grps->avail.ae_cnt);
1764 
1765 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1766 		grp = &eng_grps->grp[i];
1767 		pr_debug("engine_group%d, state %s", i,
1768 			 grp->is_enabled ? "enabled" : "disabled");
1769 		if (grp->is_enabled) {
1770 			mirrored_grp = &eng_grps->grp[grp->mirror.idx];
1771 			pr_debug("Ucode0 filename %s, version %s",
1772 				 grp->mirror.is_ena ?
1773 					 mirrored_grp->ucode[0].filename :
1774 					 grp->ucode[0].filename,
1775 				 grp->mirror.is_ena ?
1776 					 mirrored_grp->ucode[0].ver_str :
1777 					 grp->ucode[0].ver_str);
1778 			if (is_2nd_ucode_used(grp))
1779 				pr_debug("Ucode1 filename %s, version %s",
1780 					 grp->ucode[1].filename,
1781 					 grp->ucode[1].ver_str);
1782 		}
1783 
1784 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1785 			engs = &grp->engs[j];
1786 			if (engs->type) {
1787 				u32 mask[5] = { };
1788 
1789 				get_engs_info(grp, engs_info,
1790 					      2 * OTX2_CPT_NAME_LENGTH, j);
1791 				pr_debug("Slot%d: %s", j, engs_info);
1792 				bitmap_to_arr32(mask, engs->bmap,
1793 						eng_grps->engs_num);
1794 				if (is_dev_otx2(cptpf->pdev))
1795 					pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
1796 						 mask[3], mask[2], mask[1],
1797 						 mask[0]);
1798 				else
1799 					pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
1800 						 mask[4], mask[3], mask[2], mask[1],
1801 						 mask[0]);
1802 			}
1803 		}
1804 	}
1805 }
1806