1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 #include "../include/common/hl_boot_if.h"
10 
11 #include <linux/firmware.h>
12 #include <linux/crc32.h>
13 #include <linux/slab.h>
14 #include <linux/ctype.h>
15 #include <linux/vmalloc.h>
16 
17 #include <trace/events/habanalabs.h>
18 
19 #define FW_FILE_MAX_SIZE		0x1400000 /* maximum size of 20MB */
20 
21 static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = {
22 	[COMMS_NOOP] = __stringify(COMMS_NOOP),
23 	[COMMS_CLR_STS] = __stringify(COMMS_CLR_STS),
24 	[COMMS_RST_STATE] = __stringify(COMMS_RST_STATE),
25 	[COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC),
26 	[COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY),
27 	[COMMS_EXEC] = __stringify(COMMS_EXEC),
28 	[COMMS_RST_DEV] = __stringify(COMMS_RST_DEV),
29 	[COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE),
30 	[COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC),
31 	[COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI),
32 };
33 
34 static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
35 	[COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP),
36 	[COMMS_STS_ACK] = __stringify(COMMS_STS_ACK),
37 	[COMMS_STS_OK] = __stringify(COMMS_STS_OK),
38 	[COMMS_STS_ERR] = __stringify(COMMS_STS_ERR),
39 	[COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR),
40 	[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
41 };
42 
43 static char *extract_fw_ver_from_str(const char *fw_str)
44 {
45 	char *str, *fw_ver, *whitespace;
46 	u32 ver_offset;
47 
48 	fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
49 	if (!fw_ver)
50 		return NULL;
51 
52 	str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
53 	if (!str)
54 		goto free_fw_ver;
55 
56 	/* Skip the fw- part */
57 	str += 3;
58 	ver_offset = str - fw_str;
59 
60 	/* Copy until the next whitespace */
61 	whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
62 	if (!whitespace)
63 		goto free_fw_ver;
64 
65 	strscpy(fw_ver, str, whitespace - str + 1);
66 
67 	return fw_ver;
68 
69 free_fw_ver:
70 	kfree(fw_ver);
71 	return NULL;
72 }
73 
74 static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
75 {
76 	char major[8], minor[8], *first_dot, *second_dot;
77 	int rc;
78 
79 	first_dot = strnstr(preboot_ver, ".", 10);
80 	if (first_dot) {
81 		strscpy(major, preboot_ver, first_dot - preboot_ver + 1);
82 		rc = kstrtou32(major, 10, &hdev->fw_major_version);
83 	} else {
84 		rc = -EINVAL;
85 	}
86 
87 	if (rc) {
88 		dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
89 		goto out;
90 	}
91 
92 	/* skip the first dot */
93 	first_dot++;
94 
95 	second_dot = strnstr(first_dot, ".", 10);
96 	if (second_dot) {
97 		strscpy(minor, first_dot, second_dot - first_dot + 1);
98 		rc = kstrtou32(minor, 10, &hdev->fw_minor_version);
99 	} else {
100 		rc = -EINVAL;
101 	}
102 
103 	if (rc)
104 		dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
105 
106 out:
107 	kfree(preboot_ver);
108 	return rc;
109 }
110 
111 static int hl_request_fw(struct hl_device *hdev,
112 				const struct firmware **firmware_p,
113 				const char *fw_name)
114 {
115 	size_t fw_size;
116 	int rc;
117 
118 	rc = request_firmware(firmware_p, fw_name, hdev->dev);
119 	if (rc) {
120 		dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
121 				fw_name, rc);
122 		goto out;
123 	}
124 
125 	fw_size = (*firmware_p)->size;
126 	if ((fw_size % 4) != 0) {
127 		dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
128 				fw_name, fw_size);
129 		rc = -EINVAL;
130 		goto release_fw;
131 	}
132 
133 	dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
134 
135 	if (fw_size > FW_FILE_MAX_SIZE) {
136 		dev_err(hdev->dev,
137 			"FW file size %zu exceeds maximum of %u bytes\n",
138 			fw_size, FW_FILE_MAX_SIZE);
139 		rc = -EINVAL;
140 		goto release_fw;
141 	}
142 
143 	return 0;
144 
145 release_fw:
146 	release_firmware(*firmware_p);
147 out:
148 	return rc;
149 }
150 
151 /**
152  * hl_release_firmware() - release FW
153  *
154  * @fw: fw descriptor
155  *
156  * note: this inline function added to serve as a comprehensive mirror for the
157  *       hl_request_fw function.
158  */
159 static inline void hl_release_firmware(const struct firmware *fw)
160 {
161 	release_firmware(fw);
162 }
163 
164 /**
165  * hl_fw_copy_fw_to_device() - copy FW to device
166  *
167  * @hdev: pointer to hl_device structure.
168  * @fw: fw descriptor
169  * @dst: IO memory mapped address space to copy firmware to
170  * @src_offset: offset in src FW to copy from
171  * @size: amount of bytes to copy (0 to copy the whole binary)
172  *
173  * actual copy of FW binary data to device, shared by static and dynamic loaders
174  */
175 static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
176 				const struct firmware *fw, void __iomem *dst,
177 				u32 src_offset, u32 size)
178 {
179 	const void *fw_data;
180 
181 	/* size 0 indicates to copy the whole file */
182 	if (!size)
183 		size = fw->size;
184 
185 	if (src_offset + size > fw->size) {
186 		dev_err(hdev->dev,
187 			"size to copy(%u) and offset(%u) are invalid\n",
188 			size, src_offset);
189 		return -EINVAL;
190 	}
191 
192 	fw_data = (const void *) fw->data;
193 
194 	memcpy_toio(dst, fw_data + src_offset, size);
195 	return 0;
196 }
197 
198 /**
199  * hl_fw_copy_msg_to_device() - copy message to device
200  *
201  * @hdev: pointer to hl_device structure.
202  * @msg: message
203  * @dst: IO memory mapped address space to copy firmware to
204  * @src_offset: offset in src message to copy from
205  * @size: amount of bytes to copy (0 to copy the whole binary)
206  *
207  * actual copy of message data to device.
208  */
209 static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
210 		struct lkd_msg_comms *msg, void __iomem *dst,
211 		u32 src_offset, u32 size)
212 {
213 	void *msg_data;
214 
215 	/* size 0 indicates to copy the whole file */
216 	if (!size)
217 		size = sizeof(struct lkd_msg_comms);
218 
219 	if (src_offset + size > sizeof(struct lkd_msg_comms)) {
220 		dev_err(hdev->dev,
221 			"size to copy(%u) and offset(%u) are invalid\n",
222 			size, src_offset);
223 		return -EINVAL;
224 	}
225 
226 	msg_data = (void *) msg;
227 
228 	memcpy_toio(dst, msg_data + src_offset, size);
229 
230 	return 0;
231 }
232 
233 /**
234  * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
235  *
236  * @hdev: pointer to hl_device structure.
237  * @fw_name: the firmware image name
238  * @dst: IO memory mapped address space to copy firmware to
239  * @src_offset: offset in src FW to copy from
240  * @size: amount of bytes to copy (0 to copy the whole binary)
241  *
242  * Copy fw code from firmware file to device memory.
243  *
244  * Return: 0 on success, non-zero for failure.
245  */
246 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
247 				void __iomem *dst, u32 src_offset, u32 size)
248 {
249 	const struct firmware *fw;
250 	int rc;
251 
252 	rc = hl_request_fw(hdev, &fw, fw_name);
253 	if (rc)
254 		return rc;
255 
256 	rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
257 
258 	hl_release_firmware(fw);
259 	return rc;
260 }
261 
262 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
263 {
264 	struct cpucp_packet pkt = {};
265 
266 	pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
267 	pkt.value = cpu_to_le64(value);
268 
269 	return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
270 }
271 
272 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
273 				u16 len, u32 timeout, u64 *result)
274 {
275 	struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
276 	struct asic_fixed_properties *prop = &hdev->asic_prop;
277 	struct cpucp_packet *pkt;
278 	dma_addr_t pkt_dma_addr;
279 	struct hl_bd *sent_bd;
280 	u32 tmp, expected_ack_val, pi, opcode;
281 	int rc;
282 
283 	pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
284 	if (!pkt) {
285 		dev_err(hdev->dev,
286 			"Failed to allocate DMA memory for packet to CPU\n");
287 		return -ENOMEM;
288 	}
289 
290 	memcpy(pkt, msg, len);
291 
292 	mutex_lock(&hdev->send_cpu_message_lock);
293 
294 	/* CPU-CP messages can be sent during soft-reset */
295 	if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
296 		rc = 0;
297 		goto out;
298 	}
299 
300 	if (hdev->device_cpu_disabled) {
301 		rc = -EIO;
302 		goto out;
303 	}
304 
305 	/* set fence to a non valid value */
306 	pkt->fence = cpu_to_le32(UINT_MAX);
307 	pi = queue->pi;
308 
309 	/*
310 	 * The CPU queue is a synchronous queue with an effective depth of
311 	 * a single entry (although it is allocated with room for multiple
312 	 * entries). We lock on it using 'send_cpu_message_lock' which
313 	 * serializes accesses to the CPU queue.
314 	 * Which means that we don't need to lock the access to the entire H/W
315 	 * queues module when submitting a JOB to the CPU queue.
316 	 */
317 	hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
318 
319 	if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
320 		expected_ack_val = queue->pi;
321 	else
322 		expected_ack_val = CPUCP_PACKET_FENCE_VAL;
323 
324 	rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
325 				(tmp == expected_ack_val), 1000,
326 				timeout, true);
327 
328 	hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
329 
330 	if (rc == -ETIMEDOUT) {
331 		/* If FW performed reset just before sending it a packet, we will get a timeout.
332 		 * This is expected behavior, hence no need for error message.
333 		 */
334 		if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
335 			dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
336 					tmp);
337 		else
338 			dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
339 		hdev->device_cpu_disabled = true;
340 		goto out;
341 	}
342 
343 	tmp = le32_to_cpu(pkt->ctl);
344 
345 	rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
346 	if (rc) {
347 		opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
348 
349 		if (!prop->supports_advanced_cpucp_rc) {
350 			dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
351 			rc = -EIO;
352 			goto scrub_descriptor;
353 		}
354 
355 		switch (rc) {
356 		case cpucp_packet_invalid:
357 			dev_err(hdev->dev,
358 				"CPU packet %d is not supported by F/W\n", opcode);
359 			break;
360 		case cpucp_packet_fault:
361 			dev_err(hdev->dev,
362 				"F/W failed processing CPU packet %d\n", opcode);
363 			break;
364 		case cpucp_packet_invalid_pkt:
365 			dev_dbg(hdev->dev,
366 				"CPU packet %d is not supported by F/W\n", opcode);
367 			break;
368 		case cpucp_packet_invalid_params:
369 			dev_err(hdev->dev,
370 				"F/W reports invalid parameters for CPU packet %d\n", opcode);
371 			break;
372 
373 		default:
374 			dev_err(hdev->dev,
375 				"Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
376 		}
377 
378 		/* propagate the return code from the f/w to the callers who want to check it */
379 		if (result)
380 			*result = rc;
381 
382 		rc = -EIO;
383 
384 	} else if (result) {
385 		*result = le64_to_cpu(pkt->result);
386 	}
387 
388 scrub_descriptor:
389 	/* Scrub previous buffer descriptor 'ctl' field which contains the
390 	 * previous PI value written during packet submission.
391 	 * We must do this or else F/W can read an old value upon queue wraparound.
392 	 */
393 	sent_bd = queue->kernel_address;
394 	sent_bd += hl_pi_2_offset(pi);
395 	sent_bd->ctl = cpu_to_le32(UINT_MAX);
396 
397 out:
398 	mutex_unlock(&hdev->send_cpu_message_lock);
399 
400 	hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
401 
402 	return rc;
403 }
404 
405 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
406 {
407 	struct cpucp_packet pkt;
408 	u64 result;
409 	int rc;
410 
411 	memset(&pkt, 0, sizeof(pkt));
412 
413 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
414 				CPUCP_PKT_CTL_OPCODE_SHIFT);
415 	pkt.value = cpu_to_le64(event_type);
416 
417 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
418 						0, &result);
419 
420 	if (rc)
421 		dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
422 
423 	return rc;
424 }
425 
426 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
427 		size_t irq_arr_size)
428 {
429 	struct cpucp_unmask_irq_arr_packet *pkt;
430 	size_t total_pkt_size;
431 	u64 result;
432 	int rc;
433 
434 	total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
435 			irq_arr_size;
436 
437 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
438 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
439 
440 	/* total_pkt_size is casted to u16 later on */
441 	if (total_pkt_size > USHRT_MAX) {
442 		dev_err(hdev->dev, "too many elements in IRQ array\n");
443 		return -EINVAL;
444 	}
445 
446 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
447 	if (!pkt)
448 		return -ENOMEM;
449 
450 	pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
451 	memcpy(&pkt->irqs, irq_arr, irq_arr_size);
452 
453 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
454 						CPUCP_PKT_CTL_OPCODE_SHIFT);
455 
456 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
457 						total_pkt_size, 0, &result);
458 
459 	if (rc)
460 		dev_err(hdev->dev, "failed to unmask IRQ array\n");
461 
462 	kfree(pkt);
463 
464 	return rc;
465 }
466 
467 int hl_fw_test_cpu_queue(struct hl_device *hdev)
468 {
469 	struct cpucp_packet test_pkt = {};
470 	u64 result;
471 	int rc;
472 
473 	test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
474 					CPUCP_PKT_CTL_OPCODE_SHIFT);
475 	test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
476 
477 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
478 						sizeof(test_pkt), 0, &result);
479 
480 	if (!rc) {
481 		if (result != CPUCP_PACKET_FENCE_VAL)
482 			dev_err(hdev->dev,
483 				"CPU queue test failed (%#08llx)\n", result);
484 	} else {
485 		dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
486 	}
487 
488 	return rc;
489 }
490 
491 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
492 						dma_addr_t *dma_handle)
493 {
494 	u64 kernel_addr;
495 
496 	kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
497 
498 	*dma_handle = hdev->cpu_accessible_dma_address +
499 		(kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
500 
501 	return (void *) (uintptr_t) kernel_addr;
502 }
503 
504 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
505 					void *vaddr)
506 {
507 	gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
508 			size);
509 }
510 
511 int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
512 {
513 	struct cpucp_packet pkt;
514 	int rc;
515 
516 	memset(&pkt, 0, sizeof(pkt));
517 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET <<	CPUCP_PKT_CTL_OPCODE_SHIFT);
518 	pkt.value = cpu_to_le64(open);
519 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
520 	if (rc)
521 		dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
522 
523 	return rc;
524 }
525 
526 int hl_fw_send_heartbeat(struct hl_device *hdev)
527 {
528 	struct cpucp_packet hb_pkt;
529 	u64 result;
530 	int rc;
531 
532 	memset(&hb_pkt, 0, sizeof(hb_pkt));
533 	hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
534 					CPUCP_PKT_CTL_OPCODE_SHIFT);
535 	hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
536 
537 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
538 						sizeof(hb_pkt), 0, &result);
539 
540 	if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
541 		return -EIO;
542 
543 	if (le32_to_cpu(hb_pkt.status_mask) &
544 					CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
545 		dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
546 		rc = -EIO;
547 	}
548 
549 	return rc;
550 }
551 
552 static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
553 								u32 sts_val)
554 {
555 	bool err_exists = false;
556 
557 	if (!(err_val & CPU_BOOT_ERR0_ENABLED))
558 		return false;
559 
560 	if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
561 		dev_err(hdev->dev,
562 			"Device boot error - DRAM initialization failed\n");
563 		err_exists = true;
564 	}
565 
566 	if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
567 		dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
568 		err_exists = true;
569 	}
570 
571 	if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
572 		dev_err(hdev->dev,
573 			"Device boot error - Thermal Sensor initialization failed\n");
574 		err_exists = true;
575 	}
576 
577 	if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
578 		if (hdev->bmc_enable) {
579 			dev_err(hdev->dev,
580 				"Device boot error - Skipped waiting for BMC\n");
581 			err_exists = true;
582 		} else {
583 			dev_info(hdev->dev,
584 				"Device boot message - Skipped waiting for BMC\n");
585 			/* This is an info so we don't want it to disable the
586 			 * device
587 			 */
588 			err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
589 		}
590 	}
591 
592 	if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
593 		dev_err(hdev->dev,
594 			"Device boot error - Serdes data from BMC not available\n");
595 		err_exists = true;
596 	}
597 
598 	if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
599 		dev_err(hdev->dev,
600 			"Device boot error - NIC F/W initialization failed\n");
601 		err_exists = true;
602 	}
603 
604 	if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
605 		dev_err(hdev->dev,
606 			"Device boot warning - security not ready\n");
607 		err_exists = true;
608 	}
609 
610 	if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
611 		dev_err(hdev->dev, "Device boot error - security failure\n");
612 		err_exists = true;
613 	}
614 
615 	if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
616 		dev_err(hdev->dev, "Device boot error - eFuse failure\n");
617 		err_exists = true;
618 	}
619 
620 	if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
621 		dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
622 		err_exists = true;
623 	}
624 
625 	if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
626 		dev_err(hdev->dev, "Device boot error - PLL failure\n");
627 		err_exists = true;
628 	}
629 
630 	if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
631 		/* Ignore this bit, don't prevent driver loading */
632 		dev_dbg(hdev->dev, "device unusable status is set\n");
633 		err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
634 	}
635 
636 	if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
637 		dev_err(hdev->dev, "Device boot error - binning failure\n");
638 		err_exists = true;
639 	}
640 
641 	if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
642 		dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
643 
644 	if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
645 		dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
646 		err_exists = true;
647 	}
648 
649 	/* All warnings should go here in order not to reach the unknown error validation */
650 	if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
651 		dev_warn(hdev->dev,
652 			"Device boot warning - Skipped DRAM initialization\n");
653 		/* This is a warning so we don't want it to disable the
654 		 * device
655 		 */
656 		err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
657 	}
658 
659 	if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
660 		dev_warn(hdev->dev,
661 			"Device boot warning - Failed to load preboot primary image\n");
662 		/* This is a warning so we don't want it to disable the
663 		 * device as we have a secondary preboot image
664 		 */
665 		err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
666 	}
667 
668 	if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
669 		dev_warn(hdev->dev,
670 			"Device boot warning - TPM failure\n");
671 		/* This is a warning so we don't want it to disable the
672 		 * device
673 		 */
674 		err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
675 	}
676 
677 	if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
678 		dev_err(hdev->dev,
679 			"Device boot error - unknown ERR0 error 0x%08x\n", err_val);
680 		err_exists = true;
681 	}
682 
683 	/* return error only if it's in the predefined mask */
684 	if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
685 				lower_32_bits(hdev->boot_error_status_mask)))
686 		return true;
687 
688 	return false;
689 }
690 
691 /* placeholder for ERR1 as no errors defined there yet */
692 static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
693 								u32 sts_val)
694 {
695 	/*
696 	 * keep this variable to preserve the logic of the function.
697 	 * this way it would require less modifications when error will be
698 	 * added to DEV_ERR1
699 	 */
700 	bool err_exists = false;
701 
702 	if (!(err_val & CPU_BOOT_ERR1_ENABLED))
703 		return false;
704 
705 	if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
706 		dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
707 
708 	if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
709 		dev_err(hdev->dev,
710 			"Device boot error - unknown ERR1 error 0x%08x\n",
711 								err_val);
712 		err_exists = true;
713 	}
714 
715 	/* return error only if it's in the predefined mask */
716 	if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
717 				upper_32_bits(hdev->boot_error_status_mask)))
718 		return true;
719 
720 	return false;
721 }
722 
723 static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
724 				u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
725 				u32 cpu_boot_dev_status1_reg)
726 {
727 	u32 err_val, status_val;
728 	bool err_exists = false;
729 
730 	/* Some of the firmware status codes are deprecated in newer f/w
731 	 * versions. In those versions, the errors are reported
732 	 * in different registers. Therefore, we need to check those
733 	 * registers and print the exact errors. Moreover, there
734 	 * may be multiple errors, so we need to report on each error
735 	 * separately. Some of the error codes might indicate a state
736 	 * that is not an error per-se, but it is an error in production
737 	 * environment
738 	 */
739 	err_val = RREG32(boot_err0_reg);
740 	status_val = RREG32(cpu_boot_dev_status0_reg);
741 	err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
742 
743 	err_val = RREG32(boot_err1_reg);
744 	status_val = RREG32(cpu_boot_dev_status1_reg);
745 	err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
746 
747 	if (err_exists)
748 		return -EIO;
749 
750 	return 0;
751 }
752 
753 int hl_fw_cpucp_info_get(struct hl_device *hdev,
754 				u32 sts_boot_dev_sts0_reg,
755 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
756 				u32 boot_err1_reg)
757 {
758 	struct asic_fixed_properties *prop = &hdev->asic_prop;
759 	struct cpucp_packet pkt = {};
760 	dma_addr_t cpucp_info_dma_addr;
761 	void *cpucp_info_cpu_addr;
762 	char *kernel_ver;
763 	u64 result;
764 	int rc;
765 
766 	cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
767 								&cpucp_info_dma_addr);
768 	if (!cpucp_info_cpu_addr) {
769 		dev_err(hdev->dev,
770 			"Failed to allocate DMA memory for CPU-CP info packet\n");
771 		return -ENOMEM;
772 	}
773 
774 	memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
775 
776 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
777 				CPUCP_PKT_CTL_OPCODE_SHIFT);
778 	pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
779 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
780 
781 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
782 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
783 	if (rc) {
784 		dev_err(hdev->dev,
785 			"Failed to handle CPU-CP info pkt, error %d\n", rc);
786 		goto out;
787 	}
788 
789 	rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
790 				sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
791 	if (rc) {
792 		dev_err(hdev->dev, "Errors in device boot\n");
793 		goto out;
794 	}
795 
796 	memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
797 			sizeof(prop->cpucp_info));
798 
799 	rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
800 	if (rc) {
801 		dev_err(hdev->dev,
802 			"Failed to build hwmon channel info, error %d\n", rc);
803 		rc = -EFAULT;
804 		goto out;
805 	}
806 
807 	kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
808 	if (kernel_ver) {
809 		dev_info(hdev->dev, "Linux version %s", kernel_ver);
810 		kfree(kernel_ver);
811 	}
812 
813 	/* assume EQ code doesn't need to check eqe index */
814 	hdev->event_queue.check_eqe_index = false;
815 
816 	/* Read FW application security bits again */
817 	if (prop->fw_cpu_boot_dev_sts0_valid) {
818 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
819 		if (prop->fw_app_cpu_boot_dev_sts0 &
820 				CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
821 			hdev->event_queue.check_eqe_index = true;
822 	}
823 
824 	if (prop->fw_cpu_boot_dev_sts1_valid)
825 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
826 
827 out:
828 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
829 
830 	return rc;
831 }
832 
833 static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
834 {
835 	struct cpucp_array_data_packet *pkt;
836 	size_t total_pkt_size, data_size;
837 	u64 result;
838 	int rc;
839 
840 	/* skip sending this info for unsupported ASICs */
841 	if (!hdev->asic_funcs->get_msi_info)
842 		return 0;
843 
844 	data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
845 	total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
846 
847 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
848 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
849 
850 	/* total_pkt_size is casted to u16 later on */
851 	if (total_pkt_size > USHRT_MAX) {
852 		dev_err(hdev->dev, "CPUCP array data is too big\n");
853 		return -EINVAL;
854 	}
855 
856 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
857 	if (!pkt)
858 		return -ENOMEM;
859 
860 	pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
861 
862 	memset((void *) &pkt->data, 0xFF, data_size);
863 	hdev->asic_funcs->get_msi_info(pkt->data);
864 
865 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
866 						CPUCP_PKT_CTL_OPCODE_SHIFT);
867 
868 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
869 						total_pkt_size, 0, &result);
870 
871 	/*
872 	 * in case packet result is invalid it means that FW does not support
873 	 * this feature and will use default/hard coded MSI values. no reason
874 	 * to stop the boot
875 	 */
876 	if (rc && result == cpucp_packet_invalid)
877 		rc = 0;
878 
879 	if (rc)
880 		dev_err(hdev->dev, "failed to send CPUCP array data\n");
881 
882 	kfree(pkt);
883 
884 	return rc;
885 }
886 
887 int hl_fw_cpucp_handshake(struct hl_device *hdev,
888 				u32 sts_boot_dev_sts0_reg,
889 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
890 				u32 boot_err1_reg)
891 {
892 	int rc;
893 
894 	rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
895 					sts_boot_dev_sts1_reg, boot_err0_reg,
896 					boot_err1_reg);
897 	if (rc)
898 		return rc;
899 
900 	return hl_fw_send_msi_info_msg(hdev);
901 }
902 
903 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
904 {
905 	struct cpucp_packet pkt = {};
906 	void *eeprom_info_cpu_addr;
907 	dma_addr_t eeprom_info_dma_addr;
908 	u64 result;
909 	int rc;
910 
911 	eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
912 									&eeprom_info_dma_addr);
913 	if (!eeprom_info_cpu_addr) {
914 		dev_err(hdev->dev,
915 			"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
916 		return -ENOMEM;
917 	}
918 
919 	memset(eeprom_info_cpu_addr, 0, max_size);
920 
921 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
922 				CPUCP_PKT_CTL_OPCODE_SHIFT);
923 	pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
924 	pkt.data_max_size = cpu_to_le32(max_size);
925 
926 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
927 			HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
928 
929 	if (rc) {
930 		dev_err(hdev->dev,
931 			"Failed to handle CPU-CP EEPROM packet, error %d\n",
932 			rc);
933 		goto out;
934 	}
935 
936 	/* result contains the actual size */
937 	memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
938 
939 out:
940 	hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
941 
942 	return rc;
943 }
944 
945 int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
946 {
947 	struct cpucp_monitor_dump *mon_dump_cpu_addr;
948 	dma_addr_t mon_dump_dma_addr;
949 	struct cpucp_packet pkt = {};
950 	size_t data_size;
951 	__le32 *src_ptr;
952 	u32 *dst_ptr;
953 	u64 result;
954 	int i, rc;
955 
956 	data_size = sizeof(struct cpucp_monitor_dump);
957 	mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
958 	if (!mon_dump_cpu_addr) {
959 		dev_err(hdev->dev,
960 			"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
961 		return -ENOMEM;
962 	}
963 
964 	memset(mon_dump_cpu_addr, 0, data_size);
965 
966 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
967 	pkt.addr = cpu_to_le64(mon_dump_dma_addr);
968 	pkt.data_max_size = cpu_to_le32(data_size);
969 
970 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
971 							HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
972 	if (rc) {
973 		dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
974 		goto out;
975 	}
976 
977 	/* result contains the actual size */
978 	src_ptr = (__le32 *) mon_dump_cpu_addr;
979 	dst_ptr = data;
980 	for (i = 0; i < (data_size / sizeof(u32)); i++) {
981 		*dst_ptr = le32_to_cpu(*src_ptr);
982 		src_ptr++;
983 		dst_ptr++;
984 	}
985 
986 out:
987 	hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
988 
989 	return rc;
990 }
991 
992 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
993 		struct hl_info_pci_counters *counters)
994 {
995 	struct cpucp_packet pkt = {};
996 	u64 result;
997 	int rc;
998 
999 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1000 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1001 
1002 	/* Fetch PCI rx counter */
1003 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
1004 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1005 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1006 	if (rc) {
1007 		dev_err(hdev->dev,
1008 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1009 		return rc;
1010 	}
1011 	counters->rx_throughput = result;
1012 
1013 	memset(&pkt, 0, sizeof(pkt));
1014 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1015 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1016 
1017 	/* Fetch PCI tx counter */
1018 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
1019 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1020 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1021 	if (rc) {
1022 		dev_err(hdev->dev,
1023 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1024 		return rc;
1025 	}
1026 	counters->tx_throughput = result;
1027 
1028 	/* Fetch PCI replay counter */
1029 	memset(&pkt, 0, sizeof(pkt));
1030 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
1031 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1032 
1033 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1034 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1035 	if (rc) {
1036 		dev_err(hdev->dev,
1037 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1038 		return rc;
1039 	}
1040 	counters->replay_cnt = (u32) result;
1041 
1042 	return rc;
1043 }
1044 
1045 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
1046 {
1047 	struct cpucp_packet pkt = {};
1048 	u64 result;
1049 	int rc;
1050 
1051 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
1052 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1053 
1054 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1055 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1056 	if (rc) {
1057 		dev_err(hdev->dev,
1058 			"Failed to handle CpuCP total energy pkt, error %d\n",
1059 				rc);
1060 		return rc;
1061 	}
1062 
1063 	*total_energy = result;
1064 
1065 	return rc;
1066 }
1067 
1068 int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
1069 						enum pll_index *pll_index)
1070 {
1071 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1072 	u8 pll_byte, pll_bit_off;
1073 	bool dynamic_pll;
1074 	int fw_pll_idx;
1075 
1076 	dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
1077 						CPU_BOOT_DEV_STS0_DYN_PLL_EN);
1078 
1079 	if (!dynamic_pll) {
1080 		/*
1081 		 * in case we are working with legacy FW (each asic has unique
1082 		 * PLL numbering) use the driver based index as they are
1083 		 * aligned with fw legacy numbering
1084 		 */
1085 		*pll_index = input_pll_index;
1086 		return 0;
1087 	}
1088 
1089 	/* retrieve a FW compatible PLL index based on
1090 	 * ASIC specific user request
1091 	 */
1092 	fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
1093 	if (fw_pll_idx < 0) {
1094 		dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
1095 			input_pll_index, fw_pll_idx);
1096 		return -EINVAL;
1097 	}
1098 
1099 	/* PLL map is a u8 array */
1100 	pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
1101 	pll_bit_off = fw_pll_idx & 0x7;
1102 
1103 	if (!(pll_byte & BIT(pll_bit_off))) {
1104 		dev_err(hdev->dev, "PLL index %d is not supported\n",
1105 			fw_pll_idx);
1106 		return -EINVAL;
1107 	}
1108 
1109 	*pll_index = fw_pll_idx;
1110 
1111 	return 0;
1112 }
1113 
1114 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
1115 		u16 *pll_freq_arr)
1116 {
1117 	struct cpucp_packet pkt;
1118 	enum pll_index used_pll_idx;
1119 	u64 result;
1120 	int rc;
1121 
1122 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
1123 	if (rc)
1124 		return rc;
1125 
1126 	memset(&pkt, 0, sizeof(pkt));
1127 
1128 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
1129 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1130 	pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
1131 
1132 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1133 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1134 	if (rc) {
1135 		dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
1136 		return rc;
1137 	}
1138 
1139 	pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
1140 	pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
1141 	pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
1142 	pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
1143 
1144 	return 0;
1145 }
1146 
1147 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
1148 {
1149 	struct cpucp_packet pkt;
1150 	u64 result;
1151 	int rc;
1152 
1153 	memset(&pkt, 0, sizeof(pkt));
1154 
1155 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
1156 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1157 	pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
1158 
1159 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1160 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1161 	if (rc) {
1162 		dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
1163 		return rc;
1164 	}
1165 
1166 	*power = result;
1167 
1168 	return rc;
1169 }
1170 
1171 int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
1172 				struct cpucp_hbm_row_info *info)
1173 {
1174 	struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
1175 	dma_addr_t cpucp_repl_rows_info_dma_addr;
1176 	struct cpucp_packet pkt = {};
1177 	u64 result;
1178 	int rc;
1179 
1180 	cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
1181 							sizeof(struct cpucp_hbm_row_info),
1182 							&cpucp_repl_rows_info_dma_addr);
1183 	if (!cpucp_repl_rows_info_cpu_addr) {
1184 		dev_err(hdev->dev,
1185 			"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
1186 		return -ENOMEM;
1187 	}
1188 
1189 	memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
1190 
1191 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
1192 					CPUCP_PKT_CTL_OPCODE_SHIFT);
1193 	pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
1194 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
1195 
1196 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1197 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1198 	if (rc) {
1199 		dev_err(hdev->dev,
1200 			"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
1201 		goto out;
1202 	}
1203 
1204 	memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
1205 
1206 out:
1207 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
1208 						cpucp_repl_rows_info_cpu_addr);
1209 
1210 	return rc;
1211 }
1212 
1213 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
1214 {
1215 	struct cpucp_packet pkt;
1216 	u64 result;
1217 	int rc;
1218 
1219 	memset(&pkt, 0, sizeof(pkt));
1220 
1221 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
1222 
1223 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
1224 	if (rc) {
1225 		dev_err(hdev->dev,
1226 				"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
1227 		goto out;
1228 	}
1229 
1230 	*pend_rows_num = (u32) result;
1231 out:
1232 	return rc;
1233 }
1234 
1235 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
1236 {
1237 	struct cpucp_packet pkt;
1238 	int rc;
1239 
1240 	memset(&pkt, 0, sizeof(pkt));
1241 
1242 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1243 	pkt.value = cpu_to_le64(asid);
1244 
1245 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1246 						HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
1247 	if (rc)
1248 		dev_err(hdev->dev,
1249 			"Failed on ASID configuration request for engine core, error %d\n",
1250 			rc);
1251 
1252 	return rc;
1253 }
1254 
1255 void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
1256 {
1257 	struct static_fw_load_mgr *static_loader =
1258 			&hdev->fw_loader.static_loader;
1259 	int rc;
1260 
1261 	if (hdev->asic_prop.dynamic_fw_load) {
1262 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1263 				COMMS_RST_DEV, 0, false,
1264 				hdev->fw_loader.cpu_timeout);
1265 		if (rc)
1266 			dev_warn(hdev->dev, "Failed sending COMMS_RST_DEV\n");
1267 	} else {
1268 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
1269 	}
1270 }
1271 
1272 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
1273 {
1274 	struct static_fw_load_mgr *static_loader =
1275 			&hdev->fw_loader.static_loader;
1276 	int rc;
1277 
1278 	if (hdev->device_cpu_is_halted)
1279 		return;
1280 
1281 	/* Stop device CPU to make sure nothing bad happens */
1282 	if (hdev->asic_prop.dynamic_fw_load) {
1283 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1284 				COMMS_GOTO_WFE, 0, true,
1285 				hdev->fw_loader.cpu_timeout);
1286 		if (rc)
1287 			dev_warn(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
1288 	} else {
1289 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
1290 		msleep(static_loader->cpu_reset_wait_msec);
1291 
1292 		/* Must clear this register in order to prevent preboot
1293 		 * from reading WFE after reboot
1294 		 */
1295 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
1296 	}
1297 
1298 	hdev->device_cpu_is_halted = true;
1299 }
1300 
1301 static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
1302 {
1303 	/* Some of the status codes below are deprecated in newer f/w
1304 	 * versions but we keep them here for backward compatibility
1305 	 */
1306 	switch (status) {
1307 	case CPU_BOOT_STATUS_NA:
1308 		dev_err(hdev->dev,
1309 			"Device boot progress - BTL/ROM did NOT run\n");
1310 		break;
1311 	case CPU_BOOT_STATUS_IN_WFE:
1312 		dev_err(hdev->dev,
1313 			"Device boot progress - Stuck inside WFE loop\n");
1314 		break;
1315 	case CPU_BOOT_STATUS_IN_BTL:
1316 		dev_err(hdev->dev,
1317 			"Device boot progress - Stuck in BTL\n");
1318 		break;
1319 	case CPU_BOOT_STATUS_IN_PREBOOT:
1320 		dev_err(hdev->dev,
1321 			"Device boot progress - Stuck in Preboot\n");
1322 		break;
1323 	case CPU_BOOT_STATUS_IN_SPL:
1324 		dev_err(hdev->dev,
1325 			"Device boot progress - Stuck in SPL\n");
1326 		break;
1327 	case CPU_BOOT_STATUS_IN_UBOOT:
1328 		dev_err(hdev->dev,
1329 			"Device boot progress - Stuck in u-boot\n");
1330 		break;
1331 	case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
1332 		dev_err(hdev->dev,
1333 			"Device boot progress - DRAM initialization failed\n");
1334 		break;
1335 	case CPU_BOOT_STATUS_UBOOT_NOT_READY:
1336 		dev_err(hdev->dev,
1337 			"Device boot progress - Cannot boot\n");
1338 		break;
1339 	case CPU_BOOT_STATUS_TS_INIT_FAIL:
1340 		dev_err(hdev->dev,
1341 			"Device boot progress - Thermal Sensor initialization failed\n");
1342 		break;
1343 	case CPU_BOOT_STATUS_SECURITY_READY:
1344 		dev_err(hdev->dev,
1345 			"Device boot progress - Stuck in preboot after security initialization\n");
1346 		break;
1347 	default:
1348 		dev_err(hdev->dev,
1349 			"Device boot progress - Invalid or unexpected status code %d\n", status);
1350 		break;
1351 	}
1352 }
1353 
1354 int hl_fw_wait_preboot_ready(struct hl_device *hdev)
1355 {
1356 	struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
1357 	u32 status;
1358 	int rc;
1359 
1360 	/* Need to check two possible scenarios:
1361 	 *
1362 	 * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
1363 	 * the preboot is waiting for the boot fit
1364 	 *
1365 	 * All other status values - for older firmwares where the uboot was
1366 	 * loaded from the FLASH
1367 	 */
1368 	rc = hl_poll_timeout(
1369 		hdev,
1370 		pre_fw_load->cpu_boot_status_reg,
1371 		status,
1372 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
1373 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
1374 		(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
1375 		hdev->fw_poll_interval_usec,
1376 		pre_fw_load->wait_for_preboot_timeout);
1377 
1378 	if (rc) {
1379 		detect_cpu_boot_status(hdev, status);
1380 		dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
1381 
1382 		/* If we read all FF, then something is totally wrong, no point
1383 		 * of reading specific errors
1384 		 */
1385 		if (status != -1)
1386 			fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
1387 						pre_fw_load->boot_err1_reg,
1388 						pre_fw_load->sts_boot_dev_sts0_reg,
1389 						pre_fw_load->sts_boot_dev_sts1_reg);
1390 		return -EIO;
1391 	}
1392 
1393 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
1394 
1395 	return 0;
1396 }
1397 
1398 static int hl_fw_read_preboot_caps(struct hl_device *hdev)
1399 {
1400 	struct pre_fw_load_props *pre_fw_load;
1401 	struct asic_fixed_properties *prop;
1402 	u32 reg_val;
1403 	int rc;
1404 
1405 	prop = &hdev->asic_prop;
1406 	pre_fw_load = &hdev->fw_loader.pre_fw_load;
1407 
1408 	rc = hl_fw_wait_preboot_ready(hdev);
1409 	if (rc)
1410 		return rc;
1411 
1412 	/*
1413 	 * the registers DEV_STS* contain FW capabilities/features.
1414 	 * We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
1415 	 * is set.
1416 	 * In the first read of this register we store the value of this
1417 	 * register ONLY if the register is enabled (which will be propagated
1418 	 * to next stages) and also mark the register as valid.
1419 	 * In case it is not enabled the stored value will be left 0- all
1420 	 * caps/features are off
1421 	 */
1422 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
1423 	if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
1424 		prop->fw_cpu_boot_dev_sts0_valid = true;
1425 		prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
1426 	}
1427 
1428 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
1429 	if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
1430 		prop->fw_cpu_boot_dev_sts1_valid = true;
1431 		prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
1432 	}
1433 
1434 	prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
1435 						CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
1436 
1437 	/* initialize FW loader once we know what load protocol is used */
1438 	hdev->asic_funcs->init_firmware_loader(hdev);
1439 
1440 	dev_dbg(hdev->dev, "Attempting %s FW load\n",
1441 			prop->dynamic_fw_load ? "dynamic" : "legacy");
1442 	return 0;
1443 }
1444 
1445 static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
1446 					enum hl_fw_component fwc)
1447 {
1448 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1449 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1450 	struct static_fw_load_mgr *static_loader;
1451 	char *dest, *boot_ver, *preboot_ver;
1452 	u32 ver_off, limit;
1453 	const char *name;
1454 	char btl_ver[32];
1455 
1456 	static_loader = &hdev->fw_loader.static_loader;
1457 
1458 	switch (fwc) {
1459 	case FW_COMP_BOOT_FIT:
1460 		ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
1461 		dest = prop->uboot_ver;
1462 		name = "Boot-fit";
1463 		limit = static_loader->boot_fit_version_max_off;
1464 		break;
1465 	case FW_COMP_PREBOOT:
1466 		ver_off = RREG32(static_loader->preboot_version_offset_reg);
1467 		dest = prop->preboot_ver;
1468 		name = "Preboot";
1469 		limit = static_loader->preboot_version_max_off;
1470 		break;
1471 	default:
1472 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
1473 		return -EIO;
1474 	}
1475 
1476 	ver_off &= static_loader->sram_offset_mask;
1477 
1478 	if (ver_off < limit) {
1479 		memcpy_fromio(dest,
1480 			hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
1481 			VERSION_MAX_LEN);
1482 	} else {
1483 		dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
1484 								name, ver_off);
1485 		strscpy(dest, "unavailable", VERSION_MAX_LEN);
1486 		return -EIO;
1487 	}
1488 
1489 	if (fwc == FW_COMP_BOOT_FIT) {
1490 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
1491 		if (boot_ver) {
1492 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
1493 			kfree(boot_ver);
1494 		}
1495 	} else if (fwc == FW_COMP_PREBOOT) {
1496 		preboot_ver = strnstr(prop->preboot_ver, "Preboot",
1497 						VERSION_MAX_LEN);
1498 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
1499 			strscpy(btl_ver, prop->preboot_ver,
1500 				min((int) (preboot_ver - prop->preboot_ver),
1501 									31));
1502 			dev_info(hdev->dev, "%s\n", btl_ver);
1503 		}
1504 
1505 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
1506 		if (preboot_ver) {
1507 			dev_info(hdev->dev, "preboot version %s\n",
1508 								preboot_ver);
1509 			kfree(preboot_ver);
1510 		}
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 /**
1517  * hl_fw_preboot_update_state - update internal data structures during
1518  *                              handshake with preboot
1519  *
1520  *
1521  * @hdev: pointer to the habanalabs device structure
1522  *
1523  * @return 0 on success, otherwise non-zero error code
1524  */
1525 static void hl_fw_preboot_update_state(struct hl_device *hdev)
1526 {
1527 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1528 	u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
1529 
1530 	cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
1531 	cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
1532 
1533 	/* We read boot_dev_sts registers multiple times during boot:
1534 	 * 1. preboot - a. Check whether the security status bits are valid
1535 	 *              b. Check whether fw security is enabled
1536 	 *              c. Check whether hard reset is done by preboot
1537 	 * 2. boot cpu - a. Fetch boot cpu security status
1538 	 *               b. Check whether hard reset is done by boot cpu
1539 	 * 3. FW application - a. Fetch fw application security status
1540 	 *                     b. Check whether hard reset is done by fw app
1541 	 */
1542 	prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
1543 
1544 	prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
1545 
1546 	dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
1547 							cpu_boot_dev_sts0);
1548 
1549 	dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
1550 							cpu_boot_dev_sts1);
1551 
1552 	dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
1553 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
1554 
1555 	dev_dbg(hdev->dev, "firmware-level security is %s\n",
1556 			prop->fw_security_enabled ? "enabled" : "disabled");
1557 
1558 	dev_dbg(hdev->dev, "GIC controller is %s\n",
1559 			prop->gic_interrupts_enable ? "enabled" : "disabled");
1560 }
1561 
1562 static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
1563 {
1564 	int rc;
1565 
1566 	rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
1567 	if (rc)
1568 		return rc;
1569 
1570 	return 0;
1571 }
1572 
1573 int hl_fw_read_preboot_status(struct hl_device *hdev)
1574 {
1575 	int rc;
1576 
1577 	if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
1578 		return 0;
1579 
1580 	/* get FW pre-load parameters  */
1581 	hdev->asic_funcs->init_firmware_preload_params(hdev);
1582 
1583 	/*
1584 	 * In order to determine boot method (static VS dynamic) we need to
1585 	 * read the boot caps register
1586 	 */
1587 	rc = hl_fw_read_preboot_caps(hdev);
1588 	if (rc)
1589 		return rc;
1590 
1591 	hl_fw_preboot_update_state(hdev);
1592 
1593 	/* no need to read preboot status in dynamic load */
1594 	if (hdev->asic_prop.dynamic_fw_load)
1595 		return 0;
1596 
1597 	return hl_fw_static_read_preboot_status(hdev);
1598 }
1599 
1600 /* associate string with COMM status */
1601 static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
1602 	[COMMS_STS_NOOP] = "NOOP",
1603 	[COMMS_STS_ACK] = "ACK",
1604 	[COMMS_STS_OK] = "OK",
1605 	[COMMS_STS_ERR] = "ERR",
1606 	[COMMS_STS_VALID_ERR] = "VALID_ERR",
1607 	[COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
1608 };
1609 
1610 /**
1611  * hl_fw_dynamic_report_error_status - report error status
1612  *
1613  * @hdev: pointer to the habanalabs device structure
1614  * @status: value of FW status register
1615  * @expected_status: the expected status
1616  */
1617 static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
1618 						u32 status,
1619 						enum comms_sts expected_status)
1620 {
1621 	enum comms_sts comm_status =
1622 				FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1623 
1624 	if (comm_status < COMMS_STS_INVLD_LAST)
1625 		dev_err(hdev->dev, "Device status %s, expected status: %s\n",
1626 				hl_dynamic_fw_status_str[comm_status],
1627 				hl_dynamic_fw_status_str[expected_status]);
1628 	else
1629 		dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
1630 				comm_status,
1631 				hl_dynamic_fw_status_str[expected_status]);
1632 }
1633 
1634 /**
1635  * hl_fw_dynamic_send_cmd - send LKD to FW cmd
1636  *
1637  * @hdev: pointer to the habanalabs device structure
1638  * @fw_loader: managing structure for loading device's FW
1639  * @cmd: LKD to FW cmd code
1640  * @size: size of next FW component to be loaded (0 if not necessary)
1641  *
1642  * LDK to FW exact command layout is defined at struct comms_command.
1643  * note: the size argument is used only when the next FW component should be
1644  *       loaded, otherwise it shall be 0. the size is used by the FW in later
1645  *       protocol stages and when sending only indicating the amount of memory
1646  *       to be allocated by the FW to receive the next boot component.
1647  */
1648 static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
1649 				struct fw_load_mgr *fw_loader,
1650 				enum comms_cmd cmd, unsigned int size)
1651 {
1652 	struct cpu_dyn_regs *dyn_regs;
1653 	u32 val;
1654 
1655 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1656 
1657 	val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
1658 	val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
1659 
1660 	trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1661 	WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
1662 }
1663 
1664 /**
1665  * hl_fw_dynamic_extract_fw_response - update the FW response
1666  *
1667  * @hdev: pointer to the habanalabs device structure
1668  * @fw_loader: managing structure for loading device's FW
1669  * @response: FW response
1670  * @status: the status read from CPU status register
1671  *
1672  * @return 0 on success, otherwise non-zero error code
1673  */
1674 static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
1675 						struct fw_load_mgr *fw_loader,
1676 						struct fw_response *response,
1677 						u32 status)
1678 {
1679 	response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1680 	response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
1681 						COMMS_STATUS_OFFSET_ALIGN_SHIFT;
1682 	response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
1683 
1684 	if ((response->ram_type != COMMS_SRAM) &&
1685 					(response->ram_type != COMMS_DRAM)) {
1686 		dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
1687 							response->ram_type);
1688 		return -EIO;
1689 	}
1690 
1691 	return 0;
1692 }
1693 
1694 /**
1695  * hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
1696  *
1697  * @hdev: pointer to the habanalabs device structure
1698  * @fw_loader: managing structure for loading device's FW
1699  * @expected_status: expected status to wait for
1700  * @timeout: timeout for status wait
1701  *
1702  * @return 0 on success, otherwise non-zero error code
1703  *
1704  * waiting for status from FW include polling the FW status register until
1705  * expected status is received or timeout occurs (whatever occurs first).
1706  */
1707 static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
1708 						struct fw_load_mgr *fw_loader,
1709 						enum comms_sts expected_status,
1710 						u32 timeout)
1711 {
1712 	struct cpu_dyn_regs *dyn_regs;
1713 	u32 status;
1714 	int rc;
1715 
1716 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1717 
1718 	trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
1719 
1720 	/* Wait for expected status */
1721 	rc = hl_poll_timeout(
1722 		hdev,
1723 		le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
1724 		status,
1725 		FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
1726 		hdev->fw_comms_poll_interval_usec,
1727 		timeout);
1728 
1729 	if (rc) {
1730 		hl_fw_dynamic_report_error_status(hdev, status,
1731 							expected_status);
1732 		return -EIO;
1733 	}
1734 
1735 	trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
1736 
1737 	/*
1738 	 * skip storing FW response for NOOP to preserve the actual desired
1739 	 * FW status
1740 	 */
1741 	if (expected_status == COMMS_STS_NOOP)
1742 		return 0;
1743 
1744 	rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
1745 					&fw_loader->dynamic_loader.response,
1746 					status);
1747 	return rc;
1748 }
1749 
1750 /**
1751  * hl_fw_dynamic_send_clear_cmd - send clear command to FW
1752  *
1753  * @hdev: pointer to the habanalabs device structure
1754  * @fw_loader: managing structure for loading device's FW
1755  *
1756  * @return 0 on success, otherwise non-zero error code
1757  *
1758  * after command cycle between LKD to FW CPU (i.e. LKD got an expected status
1759  * from FW) we need to clear the CPU status register in order to avoid garbage
1760  * between command cycles.
1761  * This is done by sending clear command and polling the CPU to LKD status
1762  * register to hold the status NOOP
1763  */
1764 static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
1765 						struct fw_load_mgr *fw_loader)
1766 {
1767 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
1768 
1769 	return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
1770 							fw_loader->cpu_timeout);
1771 }
1772 
1773 /**
1774  * hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
1775  *
1776  * @hdev: pointer to the habanalabs device structure
1777  * @fw_loader: managing structure for loading device's FW
1778  * @cmd: LKD to FW cmd code
1779  * @size: size of next FW component to be loaded (0 if not necessary)
1780  * @wait_ok: if true also wait for OK response from FW
1781  * @timeout: timeout for status wait
1782  *
1783  * @return 0 on success, otherwise non-zero error code
1784  *
1785  * brief:
1786  * when sending protocol command we have the following steps:
1787  * - send clear (clear command and verify clear status register)
1788  * - send the actual protocol command
1789  * - wait for ACK on the protocol command
1790  * - send clear
1791  * - send NOOP
1792  * if, in addition, the specific protocol command should wait for OK then:
1793  * - wait for OK
1794  * - send clear
1795  * - send NOOP
1796  *
1797  * NOTES:
1798  * send clear: this is necessary in order to clear the status register to avoid
1799  *             leftovers between command
1800  * NOOP command: necessary to avoid loop on the clear command by the FW
1801  */
1802 int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
1803 				struct fw_load_mgr *fw_loader,
1804 				enum comms_cmd cmd, unsigned int size,
1805 				bool wait_ok, u32 timeout)
1806 {
1807 	int rc;
1808 
1809 	trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1810 
1811 	/* first send clear command to clean former commands */
1812 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1813 	if (rc)
1814 		return rc;
1815 
1816 	/* send the actual command */
1817 	hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
1818 
1819 	/* wait for ACK for the command */
1820 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
1821 								timeout);
1822 	if (rc)
1823 		return rc;
1824 
1825 	/* clear command to prepare for NOOP command */
1826 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1827 	if (rc)
1828 		return rc;
1829 
1830 	/* send the actual NOOP command */
1831 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1832 
1833 	if (!wait_ok)
1834 		return 0;
1835 
1836 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
1837 								timeout);
1838 	if (rc)
1839 		return rc;
1840 
1841 	/* clear command to prepare for NOOP command */
1842 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1843 	if (rc)
1844 		return rc;
1845 
1846 	/* send the actual NOOP command */
1847 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1848 
1849 	return 0;
1850 }
1851 
1852 /**
1853  * hl_fw_compat_crc32 - CRC compatible with FW
1854  *
1855  * @data: pointer to the data
1856  * @size: size of the data
1857  *
1858  * @return the CRC32 result
1859  *
1860  * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
1861  *       in order to be aligned we need to flip the bits of both the input
1862  *       initial CRC and kernel's CRC32 result.
1863  *       in addition both sides use initial CRC of 0,
1864  */
1865 static u32 hl_fw_compat_crc32(u8 *data, size_t size)
1866 {
1867 	return ~crc32_le(~((u32)0), data, size);
1868 }
1869 
1870 /**
1871  * hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
1872  *                                        transfer (image or descriptor) between
1873  *                                        host and FW
1874  *
1875  * @hdev: pointer to the habanalabs device structure
1876  * @addr: device address of memory transfer
1877  * @size: memory transfer size
1878  * @region: PCI memory region
1879  *
1880  * @return 0 on success, otherwise non-zero error code
1881  */
1882 static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
1883 						u64 addr, size_t size,
1884 						struct pci_mem_region *region)
1885 {
1886 	u64 end_addr;
1887 
1888 	/* now make sure that the memory transfer is within region's bounds */
1889 	end_addr = addr + size;
1890 	if (end_addr >= region->region_base + region->region_size) {
1891 		dev_err(hdev->dev,
1892 			"dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
1893 							end_addr);
1894 		return -EIO;
1895 	}
1896 
1897 	/*
1898 	 * now make sure memory transfer is within predefined BAR bounds.
1899 	 * this is to make sure we do not need to set the bar (e.g. for DRAM
1900 	 * memory transfers)
1901 	 */
1902 	if (end_addr >= region->region_base - region->offset_in_bar +
1903 							region->bar_size) {
1904 		dev_err(hdev->dev,
1905 			"FW image beyond PCI BAR bounds\n");
1906 		return -EIO;
1907 	}
1908 
1909 	return 0;
1910 }
1911 
1912 /**
1913  * hl_fw_dynamic_validate_descriptor - validate FW descriptor
1914  *
1915  * @hdev: pointer to the habanalabs device structure
1916  * @fw_loader: managing structure for loading device's FW
1917  * @fw_desc: the descriptor from FW
1918  *
1919  * @return 0 on success, otherwise non-zero error code
1920  */
1921 static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
1922 					struct fw_load_mgr *fw_loader,
1923 					struct lkd_fw_comms_desc *fw_desc)
1924 {
1925 	struct pci_mem_region *region;
1926 	enum pci_region region_id;
1927 	size_t data_size;
1928 	u32 data_crc32;
1929 	u8 *data_ptr;
1930 	u64 addr;
1931 	int rc;
1932 
1933 	if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
1934 		dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
1935 				fw_desc->header.magic);
1936 
1937 	if (fw_desc->header.version != HL_COMMS_DESC_VER)
1938 		dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
1939 				fw_desc->header.version);
1940 
1941 	/*
1942 	 * Calc CRC32 of data without header. use the size of the descriptor
1943 	 * reported by firmware, without calculating it ourself, to allow adding
1944 	 * more fields to the lkd_fw_comms_desc structure.
1945 	 * note that no alignment/stride address issues here as all structures
1946 	 * are 64 bit padded.
1947 	 */
1948 	data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
1949 	data_size = le16_to_cpu(fw_desc->header.size);
1950 
1951 	data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
1952 	if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
1953 		dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
1954 			data_crc32, fw_desc->header.crc32);
1955 		return -EIO;
1956 	}
1957 
1958 	/* find memory region to which to copy the image */
1959 	addr = le64_to_cpu(fw_desc->img_addr);
1960 	region_id = hl_get_pci_memory_region(hdev, addr);
1961 	if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
1962 		dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
1963 		return -EIO;
1964 	}
1965 
1966 	region = &hdev->pci_mem_region[region_id];
1967 
1968 	/* store the region for the copy stage */
1969 	fw_loader->dynamic_loader.image_region = region;
1970 
1971 	/*
1972 	 * here we know that the start address is valid, now make sure that the
1973 	 * image is within region's bounds
1974 	 */
1975 	rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
1976 					fw_loader->dynamic_loader.fw_image_size,
1977 					region);
1978 	if (rc) {
1979 		dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
1980 		return rc;
1981 	}
1982 
1983 	/* here we can mark the descriptor as valid as the content has been validated */
1984 	fw_loader->dynamic_loader.fw_desc_valid = true;
1985 
1986 	return 0;
1987 }
1988 
1989 static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
1990 						struct fw_response *response,
1991 						struct pci_mem_region *region)
1992 {
1993 	u64 device_addr;
1994 	int rc;
1995 
1996 	device_addr = region->region_base + response->ram_offset;
1997 
1998 	/*
1999 	 * validate that the descriptor is within region's bounds
2000 	 * Note that as the start address was supplied according to the RAM
2001 	 * type- testing only the end address is enough
2002 	 */
2003 	rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
2004 					sizeof(struct lkd_fw_comms_desc),
2005 					region);
2006 	return rc;
2007 }
2008 
2009 /*
2010  * hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw
2011  *
2012  * @hdev: pointer to the habanalabs device structure
2013  * @fw_desc: the descriptor from FW
2014  */
2015 static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev,
2016 					struct lkd_fw_comms_desc *fw_desc)
2017 {
2018 	int i;
2019 	char *msg;
2020 
2021 	for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) {
2022 		if (!fw_desc->ascii_msg[i].valid)
2023 			return;
2024 
2025 		/* force NULL termination */
2026 		msg = fw_desc->ascii_msg[i].msg;
2027 		msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0';
2028 
2029 		switch (fw_desc->ascii_msg[i].msg_lvl) {
2030 		case LKD_FW_ASCII_MSG_ERR:
2031 			dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2032 			break;
2033 		case LKD_FW_ASCII_MSG_WRN:
2034 			dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2035 			break;
2036 		case LKD_FW_ASCII_MSG_INF:
2037 			dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2038 			break;
2039 		default:
2040 			dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2041 			break;
2042 		}
2043 	}
2044 }
2045 
2046 /**
2047  * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
2048  *
2049  * @hdev: pointer to the habanalabs device structure
2050  * @fw_loader: managing structure for loading device's FW
2051  *
2052  * @return 0 on success, otherwise non-zero error code
2053  */
2054 static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
2055 						struct fw_load_mgr *fw_loader)
2056 {
2057 	struct lkd_fw_comms_desc *fw_desc;
2058 	struct pci_mem_region *region;
2059 	struct fw_response *response;
2060 	void *temp_fw_desc;
2061 	void __iomem *src;
2062 	u16 fw_data_size;
2063 	enum pci_region region_id;
2064 	int rc;
2065 
2066 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2067 	response = &fw_loader->dynamic_loader.response;
2068 
2069 	region_id = (response->ram_type == COMMS_SRAM) ?
2070 					PCI_REGION_SRAM : PCI_REGION_DRAM;
2071 
2072 	region = &hdev->pci_mem_region[region_id];
2073 
2074 	rc = hl_fw_dynamic_validate_response(hdev, response, region);
2075 	if (rc) {
2076 		dev_err(hdev->dev,
2077 			"invalid mem transfer request for FW descriptor\n");
2078 		return rc;
2079 	}
2080 
2081 	/*
2082 	 * extract address to copy the descriptor from
2083 	 * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
2084 	 * as invalid.
2085 	 * it will be marked again as valid once validated
2086 	 */
2087 	fw_loader->dynamic_loader.fw_desc_valid = false;
2088 	src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2089 							response->ram_offset;
2090 
2091 	/*
2092 	 * We do the copy of the fw descriptor in 2 phases:
2093 	 * 1. copy the header + data info according to our lkd_fw_comms_desc definition.
2094 	 *    then we're able to read the actual data size provided by fw.
2095 	 *    this is needed for cases where data in descriptor was changed(add/remove)
2096 	 *    in embedded specs header file before updating lkd copy of the header file
2097 	 * 2. copy descriptor to temporary buffer with aligned size and send it to validation
2098 	 */
2099 	memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
2100 	fw_data_size = le16_to_cpu(fw_desc->header.size);
2101 
2102 	temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
2103 	if (!temp_fw_desc)
2104 		return -ENOMEM;
2105 
2106 	memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
2107 
2108 	rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
2109 					(struct lkd_fw_comms_desc *) temp_fw_desc);
2110 
2111 	if (!rc)
2112 		hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc);
2113 
2114 	vfree(temp_fw_desc);
2115 
2116 	return rc;
2117 }
2118 
2119 /**
2120  * hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
2121  *
2122  * @hdev: pointer to the habanalabs device structure
2123  * @fw_loader: managing structure for loading device's FW
2124  * @next_image_size: size to allocate for next FW component
2125  *
2126  * @return 0 on success, otherwise non-zero error code
2127  */
2128 static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
2129 						struct fw_load_mgr *fw_loader,
2130 						size_t next_image_size)
2131 {
2132 	int rc;
2133 
2134 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
2135 						next_image_size, true,
2136 						fw_loader->cpu_timeout);
2137 	if (rc)
2138 		return rc;
2139 
2140 	return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
2141 }
2142 
2143 /**
2144  * hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
2145  *
2146  * @hdev: pointer to the habanalabs device structure
2147  * @fwc: the firmware component
2148  * @fw_version: fw component's version string
2149  */
2150 static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
2151 					enum hl_fw_component fwc,
2152 					const char *fw_version)
2153 {
2154 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2155 	char *preboot_ver, *boot_ver;
2156 	char btl_ver[32];
2157 
2158 	switch (fwc) {
2159 	case FW_COMP_BOOT_FIT:
2160 		strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
2161 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
2162 		if (boot_ver) {
2163 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
2164 			kfree(boot_ver);
2165 		}
2166 
2167 		break;
2168 	case FW_COMP_PREBOOT:
2169 		strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
2170 		preboot_ver = strnstr(prop->preboot_ver, "Preboot",
2171 						VERSION_MAX_LEN);
2172 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
2173 			strscpy(btl_ver, prop->preboot_ver,
2174 				min((int) (preboot_ver - prop->preboot_ver), 31));
2175 			dev_info(hdev->dev, "%s\n", btl_ver);
2176 		}
2177 
2178 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
2179 		if (preboot_ver) {
2180 			int rc;
2181 
2182 			dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
2183 
2184 			/* This function takes care of freeing preboot_ver */
2185 			rc = extract_fw_sub_versions(hdev, preboot_ver);
2186 			if (rc)
2187 				return rc;
2188 		}
2189 
2190 		break;
2191 	default:
2192 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2193 		return -EINVAL;
2194 	}
2195 
2196 	return 0;
2197 }
2198 
2199 /**
2200  * hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
2201  *
2202  * @hdev: pointer to the habanalabs device structure
2203  * @fw: fw descriptor
2204  * @fw_loader: managing structure for loading device's FW
2205  */
2206 static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
2207 						const struct firmware *fw,
2208 						struct fw_load_mgr *fw_loader)
2209 {
2210 	struct lkd_fw_comms_desc *fw_desc;
2211 	struct pci_mem_region *region;
2212 	void __iomem *dest;
2213 	u64 addr;
2214 	int rc;
2215 
2216 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2217 	addr = le64_to_cpu(fw_desc->img_addr);
2218 
2219 	/* find memory region to which to copy the image */
2220 	region = fw_loader->dynamic_loader.image_region;
2221 
2222 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2223 					(addr - region->region_base);
2224 
2225 	rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
2226 					fw_loader->boot_fit_img.src_off,
2227 					fw_loader->boot_fit_img.copy_size);
2228 
2229 	return rc;
2230 }
2231 
2232 /**
2233  * hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
2234  *
2235  * @hdev: pointer to the habanalabs device structure
2236  * @msg: message
2237  * @fw_loader: managing structure for loading device's FW
2238  */
2239 static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
2240 		struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
2241 {
2242 	struct lkd_fw_comms_desc *fw_desc;
2243 	struct pci_mem_region *region;
2244 	void __iomem *dest;
2245 	u64 addr;
2246 	int rc;
2247 
2248 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2249 	addr = le64_to_cpu(fw_desc->img_addr);
2250 
2251 	/* find memory region to which to copy the image */
2252 	region = fw_loader->dynamic_loader.image_region;
2253 
2254 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2255 					(addr - region->region_base);
2256 
2257 	rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
2258 
2259 	return rc;
2260 }
2261 
2262 /**
2263  * hl_fw_boot_fit_update_state - update internal data structures after boot-fit
2264  *                               is loaded
2265  *
2266  * @hdev: pointer to the habanalabs device structure
2267  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2268  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2269  *
2270  * @return 0 on success, otherwise non-zero error code
2271  */
2272 static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
2273 						u32 cpu_boot_dev_sts0_reg,
2274 						u32 cpu_boot_dev_sts1_reg)
2275 {
2276 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2277 
2278 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
2279 
2280 	/* Read boot_cpu status bits */
2281 	if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
2282 		prop->fw_bootfit_cpu_boot_dev_sts0 =
2283 				RREG32(cpu_boot_dev_sts0_reg);
2284 
2285 		prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
2286 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2287 
2288 		dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
2289 					prop->fw_bootfit_cpu_boot_dev_sts0);
2290 	}
2291 
2292 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2293 		prop->fw_bootfit_cpu_boot_dev_sts1 =
2294 				RREG32(cpu_boot_dev_sts1_reg);
2295 
2296 		dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
2297 					prop->fw_bootfit_cpu_boot_dev_sts1);
2298 	}
2299 
2300 	dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
2301 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2302 }
2303 
2304 static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
2305 {
2306 	struct cpu_dyn_regs *dyn_regs =
2307 			&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2308 
2309 	/* Check whether all 3 interrupt interfaces are set, if not use a
2310 	 * single interface
2311 	 */
2312 	if (!hdev->asic_prop.gic_interrupts_enable &&
2313 			!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
2314 				CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
2315 		dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
2316 		dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
2317 
2318 		dev_warn(hdev->dev,
2319 			"Using a single interrupt interface towards cpucp");
2320 	}
2321 }
2322 /**
2323  * hl_fw_dynamic_load_image - load FW image using dynamic protocol
2324  *
2325  * @hdev: pointer to the habanalabs device structure
2326  * @fw_loader: managing structure for loading device's FW
2327  * @load_fwc: the FW component to be loaded
2328  * @img_ld_timeout: image load timeout
2329  *
2330  * @return 0 on success, otherwise non-zero error code
2331  */
2332 static int hl_fw_dynamic_load_image(struct hl_device *hdev,
2333 						struct fw_load_mgr *fw_loader,
2334 						enum hl_fw_component load_fwc,
2335 						u32 img_ld_timeout)
2336 {
2337 	enum hl_fw_component cur_fwc;
2338 	const struct firmware *fw;
2339 	char *fw_name;
2340 	int rc = 0;
2341 
2342 	/*
2343 	 * when loading image we have one of 2 scenarios:
2344 	 * 1. current FW component is preboot and we want to load boot-fit
2345 	 * 2. current FW component is boot-fit and we want to load linux
2346 	 */
2347 	if (load_fwc == FW_COMP_BOOT_FIT) {
2348 		cur_fwc = FW_COMP_PREBOOT;
2349 		fw_name = fw_loader->boot_fit_img.image_name;
2350 	} else {
2351 		cur_fwc = FW_COMP_BOOT_FIT;
2352 		fw_name = fw_loader->linux_img.image_name;
2353 	}
2354 
2355 	/* request FW in order to communicate to FW the size to be allocated */
2356 	rc = hl_request_fw(hdev, &fw, fw_name);
2357 	if (rc)
2358 		return rc;
2359 
2360 	/* store the image size for future validation */
2361 	fw_loader->dynamic_loader.fw_image_size = fw->size;
2362 
2363 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
2364 	if (rc)
2365 		goto release_fw;
2366 
2367 	/* read preboot version */
2368 	rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
2369 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2370 	if (rc)
2371 		goto release_fw;
2372 
2373 	/* update state according to boot stage */
2374 	if (cur_fwc == FW_COMP_BOOT_FIT) {
2375 		struct cpu_dyn_regs *dyn_regs;
2376 
2377 		dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
2378 		hl_fw_boot_fit_update_state(hdev,
2379 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2380 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2381 	}
2382 
2383 	/* copy boot fit to space allocated by FW */
2384 	rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
2385 	if (rc)
2386 		goto release_fw;
2387 
2388 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2389 						0, true,
2390 						fw_loader->cpu_timeout);
2391 	if (rc)
2392 		goto release_fw;
2393 
2394 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2395 						0, false,
2396 						img_ld_timeout);
2397 
2398 release_fw:
2399 	hl_release_firmware(fw);
2400 	return rc;
2401 }
2402 
2403 static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
2404 					struct fw_load_mgr *fw_loader)
2405 {
2406 	struct dynamic_fw_load_mgr *dyn_loader;
2407 	u32 status;
2408 	int rc;
2409 
2410 	dyn_loader = &fw_loader->dynamic_loader;
2411 
2412 	/*
2413 	 * Make sure CPU boot-loader is running
2414 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2415 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2416 	 * which at later stage is relocated to DRAM. In this case we expect
2417 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2418 	 * poll flags
2419 	 */
2420 	rc = hl_poll_timeout(
2421 		hdev,
2422 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2423 		status,
2424 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2425 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2426 		hdev->fw_poll_interval_usec,
2427 		dyn_loader->wait_for_bl_timeout);
2428 	if (rc) {
2429 		dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status);
2430 		return rc;
2431 	}
2432 
2433 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2434 	return 0;
2435 }
2436 
2437 static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
2438 						struct fw_load_mgr *fw_loader)
2439 {
2440 	struct dynamic_fw_load_mgr *dyn_loader;
2441 	u32 status;
2442 	int rc;
2443 
2444 	dyn_loader = &fw_loader->dynamic_loader;
2445 
2446 	/* Make sure CPU linux is running */
2447 
2448 	rc = hl_poll_timeout(
2449 		hdev,
2450 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2451 		status,
2452 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2453 		hdev->fw_poll_interval_usec,
2454 		fw_loader->cpu_timeout);
2455 	if (rc) {
2456 		dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status);
2457 		return rc;
2458 	}
2459 
2460 	dev_dbg(hdev->dev, "Boot status = %d\n", status);
2461 	return 0;
2462 }
2463 
2464 /**
2465  * hl_fw_linux_update_state -	update internal data structures after Linux
2466  *				is loaded.
2467  *				Note: Linux initialization is comprised mainly
2468  *				of two stages - loading kernel (SRAM_AVAIL)
2469  *				& loading ARMCP.
2470  *				Therefore reading boot device status in any of
2471  *				these stages might result in different values.
2472  *
2473  * @hdev: pointer to the habanalabs device structure
2474  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2475  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2476  *
2477  * @return 0 on success, otherwise non-zero error code
2478  */
2479 static void hl_fw_linux_update_state(struct hl_device *hdev,
2480 						u32 cpu_boot_dev_sts0_reg,
2481 						u32 cpu_boot_dev_sts1_reg)
2482 {
2483 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2484 
2485 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
2486 
2487 	/* Read FW application security bits */
2488 	if (prop->fw_cpu_boot_dev_sts0_valid) {
2489 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
2490 
2491 		prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
2492 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2493 
2494 		if (prop->fw_app_cpu_boot_dev_sts0 &
2495 				CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
2496 			prop->gic_interrupts_enable = false;
2497 
2498 		dev_dbg(hdev->dev,
2499 			"Firmware application CPU status0 %#x\n",
2500 			prop->fw_app_cpu_boot_dev_sts0);
2501 
2502 		dev_dbg(hdev->dev, "GIC controller is %s\n",
2503 				prop->gic_interrupts_enable ?
2504 						"enabled" : "disabled");
2505 	}
2506 
2507 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2508 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
2509 
2510 		dev_dbg(hdev->dev,
2511 			"Firmware application CPU status1 %#x\n",
2512 			prop->fw_app_cpu_boot_dev_sts1);
2513 	}
2514 
2515 	dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
2516 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2517 
2518 	dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2519 }
2520 
2521 /**
2522  * hl_fw_dynamic_send_msg - send a COMMS message with attached data
2523  *
2524  * @hdev: pointer to the habanalabs device structure
2525  * @fw_loader: managing structure for loading device's FW
2526  * @msg_type: message type
2527  * @data: data to be sent
2528  *
2529  * @return 0 on success, otherwise non-zero error code
2530  */
2531 static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
2532 		struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
2533 {
2534 	struct lkd_msg_comms *msg;
2535 	int rc;
2536 
2537 	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
2538 	if (!msg)
2539 		return -ENOMEM;
2540 
2541 	/* create message to be sent */
2542 	msg->header.type = msg_type;
2543 	msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header));
2544 	msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
2545 
2546 	switch (msg_type) {
2547 	case HL_COMMS_RESET_CAUSE_TYPE:
2548 		msg->reset_cause = *(__u8 *) data;
2549 		break;
2550 
2551 	default:
2552 		dev_err(hdev->dev,
2553 			"Send COMMS message - invalid message type %u\n",
2554 			msg_type);
2555 		rc = -EINVAL;
2556 		goto out;
2557 	}
2558 
2559 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
2560 			sizeof(struct lkd_msg_comms));
2561 	if (rc)
2562 		goto out;
2563 
2564 	/* copy message to space allocated by FW */
2565 	rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader);
2566 	if (rc)
2567 		goto out;
2568 
2569 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2570 						0, true,
2571 						fw_loader->cpu_timeout);
2572 	if (rc)
2573 		goto out;
2574 
2575 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2576 						0, true,
2577 						fw_loader->cpu_timeout);
2578 
2579 out:
2580 	kfree(msg);
2581 	return rc;
2582 }
2583 
2584 /**
2585  * hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
2586  *
2587  * @hdev: pointer to the habanalabs device structure
2588  * @fw_loader: managing structure for loading device's FW
2589  *
2590  * @return 0 on success, otherwise non-zero error code
2591  *
2592  * brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
2593  * the communication is done using registers:
2594  * - LKD command register
2595  * - FW status register
2596  * the protocol is race free. this goal is achieved by splitting the requests
2597  * and response to known synchronization points between the LKD and the FW.
2598  * each response to LKD request is known and bound to a predefined timeout.
2599  * in case of timeout expiration without the desired status from FW- the
2600  * protocol (and hence the boot) will fail.
2601  */
2602 static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
2603 					struct fw_load_mgr *fw_loader)
2604 {
2605 	struct cpu_dyn_regs *dyn_regs;
2606 	int rc, fw_error_rc;
2607 
2608 	dev_info(hdev->dev,
2609 		"Loading %sfirmware to device, may take some time...\n",
2610 		hdev->asic_prop.fw_security_enabled ? "secured " : "");
2611 
2612 	/* initialize FW descriptor as invalid */
2613 	fw_loader->dynamic_loader.fw_desc_valid = false;
2614 
2615 	/*
2616 	 * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
2617 	 * It will be updated from FW after hl_fw_dynamic_request_descriptor().
2618 	 */
2619 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
2620 
2621 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
2622 						0, true,
2623 						fw_loader->cpu_timeout);
2624 	if (rc)
2625 		goto protocol_err;
2626 
2627 	if (hdev->reset_info.curr_reset_cause) {
2628 		rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
2629 				HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
2630 		if (rc)
2631 			goto protocol_err;
2632 
2633 		/* Clear current reset cause */
2634 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
2635 	}
2636 
2637 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
2638 		struct lkd_fw_binning_info *binning_info;
2639 
2640 		rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
2641 		if (rc)
2642 			goto protocol_err;
2643 
2644 		/* read preboot version */
2645 		rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
2646 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2647 
2648 		if (rc)
2649 			return rc;
2650 
2651 		/* read binning info from preboot */
2652 		if (hdev->support_preboot_binning) {
2653 			binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info;
2654 			hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l);
2655 			hdev->dram_binning = le32_to_cpu(binning_info->dram_mask);
2656 			hdev->edma_binning = le32_to_cpu(binning_info->edma_mask);
2657 			hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask);
2658 			hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask);
2659 
2660 			rc = hdev->asic_funcs->set_dram_properties(hdev);
2661 			if (rc)
2662 				return rc;
2663 
2664 			rc = hdev->asic_funcs->set_binning_masks(hdev);
2665 			if (rc)
2666 				return rc;
2667 
2668 			dev_dbg(hdev->dev,
2669 				"Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n",
2670 				hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
2671 				hdev->decoder_binning, hdev->rotator_binning);
2672 		}
2673 
2674 		return 0;
2675 	}
2676 
2677 	/* load boot fit to FW */
2678 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
2679 						fw_loader->boot_fit_timeout);
2680 	if (rc) {
2681 		dev_err(hdev->dev, "failed to load boot fit\n");
2682 		goto protocol_err;
2683 	}
2684 
2685 	/*
2686 	 * when testing FW load (without Linux) on PLDM we don't want to
2687 	 * wait until boot fit is active as it may take several hours.
2688 	 * instead, we load the bootfit and let it do all initialization in
2689 	 * the background.
2690 	 */
2691 	if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
2692 		return 0;
2693 
2694 	rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
2695 	if (rc)
2696 		goto protocol_err;
2697 
2698 	/* Enable DRAM scrambling before Linux boot and after successful
2699 	 *  UBoot
2700 	 */
2701 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2702 
2703 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2704 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2705 		return 0;
2706 	}
2707 
2708 	if (fw_loader->skip_bmc) {
2709 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
2710 							COMMS_SKIP_BMC, 0,
2711 							true,
2712 							fw_loader->cpu_timeout);
2713 		if (rc) {
2714 			dev_err(hdev->dev, "failed to load boot fit\n");
2715 			goto protocol_err;
2716 		}
2717 	}
2718 
2719 	/* load Linux image to FW */
2720 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
2721 							fw_loader->cpu_timeout);
2722 	if (rc) {
2723 		dev_err(hdev->dev, "failed to load Linux\n");
2724 		goto protocol_err;
2725 	}
2726 
2727 	rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
2728 	if (rc)
2729 		goto protocol_err;
2730 
2731 	hl_fw_linux_update_state(hdev, le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2732 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2733 
2734 	hl_fw_dynamic_update_linux_interrupt_if(hdev);
2735 
2736 protocol_err:
2737 	if (fw_loader->dynamic_loader.fw_desc_valid) {
2738 		fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
2739 				le32_to_cpu(dyn_regs->cpu_boot_err1),
2740 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2741 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2742 
2743 		if (fw_error_rc)
2744 			return fw_error_rc;
2745 	}
2746 
2747 	return rc;
2748 }
2749 
2750 /**
2751  * hl_fw_static_init_cpu - initialize the device CPU using static protocol
2752  *
2753  * @hdev: pointer to the habanalabs device structure
2754  * @fw_loader: managing structure for loading device's FW
2755  *
2756  * @return 0 on success, otherwise non-zero error code
2757  */
2758 static int hl_fw_static_init_cpu(struct hl_device *hdev,
2759 					struct fw_load_mgr *fw_loader)
2760 {
2761 	u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
2762 	u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
2763 	struct static_fw_load_mgr *static_loader;
2764 	u32 cpu_boot_status_reg;
2765 	int rc;
2766 
2767 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
2768 		return 0;
2769 
2770 	/* init common loader parameters */
2771 	cpu_timeout = fw_loader->cpu_timeout;
2772 
2773 	/* init static loader parameters */
2774 	static_loader = &fw_loader->static_loader;
2775 	cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
2776 	msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
2777 	cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
2778 	cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
2779 	cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
2780 
2781 	dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
2782 		cpu_timeout / USEC_PER_SEC);
2783 
2784 	/* Wait for boot FIT request */
2785 	rc = hl_poll_timeout(
2786 		hdev,
2787 		cpu_boot_status_reg,
2788 		status,
2789 		status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
2790 		hdev->fw_poll_interval_usec,
2791 		fw_loader->boot_fit_timeout);
2792 
2793 	if (rc) {
2794 		dev_dbg(hdev->dev,
2795 			"No boot fit request received (status = %d), resuming boot\n", status);
2796 	} else {
2797 		rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
2798 		if (rc)
2799 			goto out;
2800 
2801 		/* Clear device CPU message status */
2802 		WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
2803 
2804 		/* Signal device CPU that boot loader is ready */
2805 		WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
2806 
2807 		/* Poll for CPU device ack */
2808 		rc = hl_poll_timeout(
2809 			hdev,
2810 			cpu_msg_status_reg,
2811 			status,
2812 			status == CPU_MSG_OK,
2813 			hdev->fw_poll_interval_usec,
2814 			fw_loader->boot_fit_timeout);
2815 
2816 		if (rc) {
2817 			dev_err(hdev->dev,
2818 				"Timeout waiting for boot fit load ack (status = %d)\n", status);
2819 			goto out;
2820 		}
2821 
2822 		/* Clear message */
2823 		WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2824 	}
2825 
2826 	/*
2827 	 * Make sure CPU boot-loader is running
2828 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2829 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2830 	 * which at later stage is relocated to DRAM. In this case we expect
2831 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2832 	 * poll flags
2833 	 */
2834 	rc = hl_poll_timeout(
2835 		hdev,
2836 		cpu_boot_status_reg,
2837 		status,
2838 		(status == CPU_BOOT_STATUS_DRAM_RDY) ||
2839 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
2840 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2841 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2842 		hdev->fw_poll_interval_usec,
2843 		cpu_timeout);
2844 
2845 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2846 
2847 	/* Read U-Boot version now in case we will later fail */
2848 	hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
2849 
2850 	/* update state according to boot stage */
2851 	hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
2852 						cpu_boot_dev_status1_reg);
2853 
2854 	if (rc) {
2855 		detect_cpu_boot_status(hdev, status);
2856 		rc = -EIO;
2857 		goto out;
2858 	}
2859 
2860 	/* Enable DRAM scrambling before Linux boot and after successful
2861 	 *  UBoot
2862 	 */
2863 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2864 
2865 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2866 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2867 		rc = 0;
2868 		goto out;
2869 	}
2870 
2871 	if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
2872 		rc = 0;
2873 		goto out;
2874 	}
2875 
2876 	dev_info(hdev->dev,
2877 		"Loading firmware to device, may take some time...\n");
2878 
2879 	rc = hdev->asic_funcs->load_firmware_to_device(hdev);
2880 	if (rc)
2881 		goto out;
2882 
2883 	if (fw_loader->skip_bmc) {
2884 		WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
2885 
2886 		rc = hl_poll_timeout(
2887 			hdev,
2888 			cpu_boot_status_reg,
2889 			status,
2890 			(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
2891 			hdev->fw_poll_interval_usec,
2892 			cpu_timeout);
2893 
2894 		if (rc) {
2895 			dev_err(hdev->dev,
2896 				"Failed to get ACK on skipping BMC (status = %d)\n",
2897 				status);
2898 			WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2899 			rc = -EIO;
2900 			goto out;
2901 		}
2902 	}
2903 
2904 	WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
2905 
2906 	rc = hl_poll_timeout(
2907 		hdev,
2908 		cpu_boot_status_reg,
2909 		status,
2910 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2911 		hdev->fw_poll_interval_usec,
2912 		cpu_timeout);
2913 
2914 	/* Clear message */
2915 	WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2916 
2917 	if (rc) {
2918 		if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2919 			dev_err(hdev->dev,
2920 				"Device reports FIT image is corrupted\n");
2921 		else
2922 			dev_err(hdev->dev,
2923 				"Failed to load firmware to device (status = %d)\n",
2924 				status);
2925 
2926 		rc = -EIO;
2927 		goto out;
2928 	}
2929 
2930 	rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
2931 					fw_loader->static_loader.boot_err1_reg,
2932 					cpu_boot_dev_status0_reg,
2933 					cpu_boot_dev_status1_reg);
2934 	if (rc)
2935 		return rc;
2936 
2937 	hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
2938 						cpu_boot_dev_status1_reg);
2939 
2940 	return 0;
2941 
2942 out:
2943 	fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
2944 					fw_loader->static_loader.boot_err1_reg,
2945 					cpu_boot_dev_status0_reg,
2946 					cpu_boot_dev_status1_reg);
2947 
2948 	return rc;
2949 }
2950 
2951 /**
2952  * hl_fw_init_cpu - initialize the device CPU
2953  *
2954  * @hdev: pointer to the habanalabs device structure
2955  *
2956  * @return 0 on success, otherwise non-zero error code
2957  *
2958  * perform necessary initializations for device's CPU. takes into account if
2959  * init protocol is static or dynamic.
2960  */
2961 int hl_fw_init_cpu(struct hl_device *hdev)
2962 {
2963 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2964 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
2965 
2966 	return  prop->dynamic_fw_load ?
2967 			hl_fw_dynamic_init_cpu(hdev, fw_loader) :
2968 			hl_fw_static_init_cpu(hdev, fw_loader);
2969 }
2970 
2971 void hl_fw_set_pll_profile(struct hl_device *hdev)
2972 {
2973 	hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
2974 				hdev->asic_prop.max_freq_value);
2975 }
2976 
2977 int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
2978 {
2979 	long value;
2980 
2981 	if (!hl_device_operational(hdev, NULL))
2982 		return -ENODEV;
2983 
2984 	if (!hdev->pdev) {
2985 		*cur_clk = 0;
2986 		*max_clk = 0;
2987 		return 0;
2988 	}
2989 
2990 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
2991 
2992 	if (value < 0) {
2993 		dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", value);
2994 		return value;
2995 	}
2996 
2997 	*max_clk = (value / 1000 / 1000);
2998 
2999 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
3000 
3001 	if (value < 0) {
3002 		dev_err(hdev->dev, "Failed to retrieve device current clock %ld\n", value);
3003 		return value;
3004 	}
3005 
3006 	*cur_clk = (value / 1000 / 1000);
3007 
3008 	return 0;
3009 }
3010 
3011 long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
3012 {
3013 	struct cpucp_packet pkt;
3014 	u32 used_pll_idx;
3015 	u64 result;
3016 	int rc;
3017 
3018 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3019 	if (rc)
3020 		return rc;
3021 
3022 	memset(&pkt, 0, sizeof(pkt));
3023 
3024 	if (curr)
3025 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
3026 						CPUCP_PKT_CTL_OPCODE_SHIFT);
3027 	else
3028 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3029 
3030 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3031 
3032 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3033 
3034 	if (rc) {
3035 		dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
3036 			used_pll_idx, rc);
3037 		return rc;
3038 	}
3039 
3040 	return (long) result;
3041 }
3042 
3043 void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
3044 {
3045 	struct cpucp_packet pkt;
3046 	u32 used_pll_idx;
3047 	int rc;
3048 
3049 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3050 	if (rc)
3051 		return;
3052 
3053 	memset(&pkt, 0, sizeof(pkt));
3054 
3055 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3056 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3057 	pkt.value = cpu_to_le64(freq);
3058 
3059 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3060 
3061 	if (rc)
3062 		dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
3063 			used_pll_idx, rc);
3064 }
3065 
3066 long hl_fw_get_max_power(struct hl_device *hdev)
3067 {
3068 	struct cpucp_packet pkt;
3069 	u64 result;
3070 	int rc;
3071 
3072 	memset(&pkt, 0, sizeof(pkt));
3073 
3074 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3075 
3076 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3077 
3078 	if (rc) {
3079 		dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
3080 		return rc;
3081 	}
3082 
3083 	return result;
3084 }
3085 
3086 void hl_fw_set_max_power(struct hl_device *hdev)
3087 {
3088 	struct cpucp_packet pkt;
3089 	int rc;
3090 
3091 	/* TODO: remove this after simulator supports this packet */
3092 	if (!hdev->pdev)
3093 		return;
3094 
3095 	memset(&pkt, 0, sizeof(pkt));
3096 
3097 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3098 	pkt.value = cpu_to_le64(hdev->max_power);
3099 
3100 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3101 
3102 	if (rc)
3103 		dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
3104 }
3105 
3106 static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
3107 					u32 nonce, u32 timeout)
3108 {
3109 	struct cpucp_packet pkt = {};
3110 	dma_addr_t req_dma_addr;
3111 	void *req_cpu_addr;
3112 	int rc;
3113 
3114 	req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
3115 	if (!req_cpu_addr) {
3116 		dev_err(hdev->dev,
3117 			"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
3118 		return -ENOMEM;
3119 	}
3120 
3121 	memset(data, 0, size);
3122 
3123 	pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
3124 	pkt.addr = cpu_to_le64(req_dma_addr);
3125 	pkt.data_max_size = cpu_to_le32(size);
3126 	pkt.nonce = cpu_to_le32(nonce);
3127 
3128 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
3129 					timeout, NULL);
3130 	if (rc) {
3131 		dev_err(hdev->dev,
3132 			"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
3133 		goto out;
3134 	}
3135 
3136 	memcpy(data, req_cpu_addr, size);
3137 
3138 out:
3139 	hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
3140 
3141 	return rc;
3142 }
3143 
3144 int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
3145 				u32 nonce)
3146 {
3147 	return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
3148 					sizeof(struct cpucp_sec_attest_info), nonce,
3149 					HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
3150 }
3151 
3152 int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
3153 						dma_addr_t buff, u32 *size)
3154 {
3155 	struct cpucp_packet pkt = {0};
3156 	u64 result;
3157 	int rc = 0;
3158 
3159 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT);
3160 	pkt.addr = cpu_to_le64(buff);
3161 	pkt.data_max_size = cpu_to_le32(*size);
3162 	pkt.pkt_subidx = cpu_to_le32(sub_opcode);
3163 
3164 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
3165 						HL_CPUCP_INFO_TIMEOUT_USEC, &result);
3166 	if (rc)
3167 		dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
3168 	else
3169 		dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
3170 
3171 	*size = (u32)result;
3172 
3173 	return rc;
3174 }
3175