1e65e175bSOded Gabbay // SPDX-License-Identifier: GPL-2.0
2e65e175bSOded Gabbay 
3e65e175bSOded Gabbay /*
4e65e175bSOded Gabbay  * Copyright 2020 HabanaLabs, Ltd.
5e65e175bSOded Gabbay  * All Rights Reserved.
6e65e175bSOded Gabbay  */
7e65e175bSOded Gabbay 
8e65e175bSOded Gabbay #include "habanalabs.h"
9e65e175bSOded Gabbay 
10f7d67c1cSKoby Elbaz static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
11f7d67c1cSKoby Elbaz 	"Error due to un-priv read",
12f7d67c1cSKoby Elbaz 	"Error due to un-secure read",
13f7d67c1cSKoby Elbaz 	"Error due to read from unmapped reg",
14f7d67c1cSKoby Elbaz 	"Error due to un-priv write",
15f7d67c1cSKoby Elbaz 	"Error due to un-secure write",
16f7d67c1cSKoby Elbaz 	"Error due to write to unmapped reg",
17f7d67c1cSKoby Elbaz 	"External I/F write sec violation",
18f7d67c1cSKoby Elbaz 	"External I/F write to un-mapped reg",
19f7d67c1cSKoby Elbaz 	"Read to write only",
20f7d67c1cSKoby Elbaz 	"Write to read only"
21f7d67c1cSKoby Elbaz };
22f7d67c1cSKoby Elbaz 
23e65e175bSOded Gabbay /**
24e65e175bSOded Gabbay  * hl_get_pb_block - return the relevant block within the block array
25e65e175bSOded Gabbay  *
26e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
27e65e175bSOded Gabbay  * @mm_reg_addr: register address in the desired block
28e65e175bSOded Gabbay  * @pb_blocks: blocks array
29e65e175bSOded Gabbay  * @array_size: blocks array size
30e65e175bSOded Gabbay  *
31e65e175bSOded Gabbay  */
hl_get_pb_block(struct hl_device * hdev,u32 mm_reg_addr,const u32 pb_blocks[],int array_size)32e65e175bSOded Gabbay static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
33e65e175bSOded Gabbay 		const u32 pb_blocks[], int array_size)
34e65e175bSOded Gabbay {
35e65e175bSOded Gabbay 	int i;
36e65e175bSOded Gabbay 	u32 start_addr, end_addr;
37e65e175bSOded Gabbay 
38e65e175bSOded Gabbay 	for (i = 0 ; i < array_size ; i++) {
39e65e175bSOded Gabbay 		start_addr = pb_blocks[i];
40e65e175bSOded Gabbay 		end_addr = start_addr + HL_BLOCK_SIZE;
41e65e175bSOded Gabbay 
42e65e175bSOded Gabbay 		if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
43e65e175bSOded Gabbay 			return i;
44e65e175bSOded Gabbay 	}
45e65e175bSOded Gabbay 
46e65e175bSOded Gabbay 	dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
47e65e175bSOded Gabbay 			mm_reg_addr);
48e65e175bSOded Gabbay 	return -EDOM;
49e65e175bSOded Gabbay }
50e65e175bSOded Gabbay 
51e65e175bSOded Gabbay /**
52e65e175bSOded Gabbay  * hl_unset_pb_in_block - clear a specific protection bit in a block
53e65e175bSOded Gabbay  *
54e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
55e65e175bSOded Gabbay  * @reg_offset: register offset will be converted to bit offset in pb block
56e65e175bSOded Gabbay  * @sgs_entry: pb array
57e65e175bSOded Gabbay  *
58e65e175bSOded Gabbay  */
hl_unset_pb_in_block(struct hl_device * hdev,u32 reg_offset,struct hl_block_glbl_sec * sgs_entry)59e65e175bSOded Gabbay static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
60e65e175bSOded Gabbay 				struct hl_block_glbl_sec *sgs_entry)
61e65e175bSOded Gabbay {
62e65e175bSOded Gabbay 	if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
63e65e175bSOded Gabbay 		dev_err(hdev->dev,
64e65e175bSOded Gabbay 			"Register offset(%d) is out of range(%d) or invalid\n",
65e65e175bSOded Gabbay 			reg_offset, HL_BLOCK_SIZE);
66e65e175bSOded Gabbay 		return -EINVAL;
67e65e175bSOded Gabbay 	}
68e65e175bSOded Gabbay 
69e65e175bSOded Gabbay 	UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
70e65e175bSOded Gabbay 			 (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
71e65e175bSOded Gabbay 
72e65e175bSOded Gabbay 	return 0;
73e65e175bSOded Gabbay }
74e65e175bSOded Gabbay 
75e65e175bSOded Gabbay /**
76e65e175bSOded Gabbay  * hl_unsecure_register - locate the relevant block for this register and
77e65e175bSOded Gabbay  *                        remove corresponding protection bit
78e65e175bSOded Gabbay  *
79e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
80e65e175bSOded Gabbay  * @mm_reg_addr: register address to unsecure
81e65e175bSOded Gabbay  * @offset: additional offset to the register address
82e65e175bSOded Gabbay  * @pb_blocks: blocks array
83e65e175bSOded Gabbay  * @sgs_array: pb array
84e65e175bSOded Gabbay  * @array_size: blocks array size
85e65e175bSOded Gabbay  *
86e65e175bSOded Gabbay  */
hl_unsecure_register(struct hl_device * hdev,u32 mm_reg_addr,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int array_size)87e65e175bSOded Gabbay int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
88e65e175bSOded Gabbay 		const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
89e65e175bSOded Gabbay 		int array_size)
90e65e175bSOded Gabbay {
91e65e175bSOded Gabbay 	u32 reg_offset;
92e65e175bSOded Gabbay 	int block_num;
93e65e175bSOded Gabbay 
94e65e175bSOded Gabbay 	block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
95e65e175bSOded Gabbay 			array_size);
96e65e175bSOded Gabbay 	if (block_num < 0)
97e65e175bSOded Gabbay 		return block_num;
98e65e175bSOded Gabbay 
99e65e175bSOded Gabbay 	reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
100e65e175bSOded Gabbay 
101e65e175bSOded Gabbay 	return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
102e65e175bSOded Gabbay }
103e65e175bSOded Gabbay 
104e65e175bSOded Gabbay /**
105e65e175bSOded Gabbay  * hl_unsecure_register_range - locate the relevant block for this register
106e65e175bSOded Gabbay  *                              range and remove corresponding protection bit
107e65e175bSOded Gabbay  *
108e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
109e65e175bSOded Gabbay  * @mm_reg_range: register address range to unsecure
110e65e175bSOded Gabbay  * @offset: additional offset to the register address
111e65e175bSOded Gabbay  * @pb_blocks: blocks array
112e65e175bSOded Gabbay  * @sgs_array: pb array
113e65e175bSOded Gabbay  * @array_size: blocks array size
114e65e175bSOded Gabbay  *
115e65e175bSOded Gabbay  */
hl_unsecure_register_range(struct hl_device * hdev,struct range mm_reg_range,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int array_size)116e65e175bSOded Gabbay static int hl_unsecure_register_range(struct hl_device *hdev,
117e65e175bSOded Gabbay 		struct range mm_reg_range, int offset, const u32 pb_blocks[],
118e65e175bSOded Gabbay 		struct hl_block_glbl_sec sgs_array[],
119e65e175bSOded Gabbay 		int array_size)
120e65e175bSOded Gabbay {
121e65e175bSOded Gabbay 	u32 reg_offset;
122e65e175bSOded Gabbay 	int i, block_num, rc = 0;
123e65e175bSOded Gabbay 
124e65e175bSOded Gabbay 	block_num = hl_get_pb_block(hdev,
125e65e175bSOded Gabbay 			mm_reg_range.start + offset, pb_blocks,
126e65e175bSOded Gabbay 			array_size);
127e65e175bSOded Gabbay 	if (block_num < 0)
128e65e175bSOded Gabbay 		return block_num;
129e65e175bSOded Gabbay 
130e65e175bSOded Gabbay 	for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
131e65e175bSOded Gabbay 		reg_offset = (i + offset) - pb_blocks[block_num];
132e65e175bSOded Gabbay 		rc |= hl_unset_pb_in_block(hdev, reg_offset,
133e65e175bSOded Gabbay 					&sgs_array[block_num]);
134e65e175bSOded Gabbay 	}
135e65e175bSOded Gabbay 
136e65e175bSOded Gabbay 	return rc;
137e65e175bSOded Gabbay }
138e65e175bSOded Gabbay 
139e65e175bSOded Gabbay /**
140e65e175bSOded Gabbay  * hl_unsecure_registers - locate the relevant block for all registers and
141e65e175bSOded Gabbay  *                        remove corresponding protection bit
142e65e175bSOded Gabbay  *
143e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
144e65e175bSOded Gabbay  * @mm_reg_array: register address array to unsecure
145e65e175bSOded Gabbay  * @mm_array_size: register array size
146e65e175bSOded Gabbay  * @offset: additional offset to the register address
147e65e175bSOded Gabbay  * @pb_blocks: blocks array
148e65e175bSOded Gabbay  * @sgs_array: pb array
149e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
150e65e175bSOded Gabbay  *
151e65e175bSOded Gabbay  */
hl_unsecure_registers(struct hl_device * hdev,const u32 mm_reg_array[],int mm_array_size,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int blocks_array_size)152e65e175bSOded Gabbay int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
153e65e175bSOded Gabbay 		int mm_array_size, int offset, const u32 pb_blocks[],
154e65e175bSOded Gabbay 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
155e65e175bSOded Gabbay {
156e65e175bSOded Gabbay 	int i, rc = 0;
157e65e175bSOded Gabbay 
158e65e175bSOded Gabbay 	for (i = 0 ; i < mm_array_size ; i++) {
159e65e175bSOded Gabbay 		rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
160e65e175bSOded Gabbay 				pb_blocks, sgs_array, blocks_array_size);
161e65e175bSOded Gabbay 
162e65e175bSOded Gabbay 		if (rc)
163e65e175bSOded Gabbay 			return rc;
164e65e175bSOded Gabbay 	}
165e65e175bSOded Gabbay 
166e65e175bSOded Gabbay 	return rc;
167e65e175bSOded Gabbay }
168e65e175bSOded Gabbay 
169e65e175bSOded Gabbay /**
170e65e175bSOded Gabbay  * hl_unsecure_registers_range - locate the relevant block for all register
171e65e175bSOded Gabbay  *                        ranges and remove corresponding protection bit
172e65e175bSOded Gabbay  *
173e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
174e65e175bSOded Gabbay  * @mm_reg_range_array: register address range array to unsecure
175e65e175bSOded Gabbay  * @mm_array_size: register array size
176e65e175bSOded Gabbay  * @offset: additional offset to the register address
177e65e175bSOded Gabbay  * @pb_blocks: blocks array
178e65e175bSOded Gabbay  * @sgs_array: pb array
179e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
180e65e175bSOded Gabbay  *
181e65e175bSOded Gabbay  */
hl_unsecure_registers_range(struct hl_device * hdev,const struct range mm_reg_range_array[],int mm_array_size,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int blocks_array_size)182e65e175bSOded Gabbay static int hl_unsecure_registers_range(struct hl_device *hdev,
183e65e175bSOded Gabbay 		const struct range mm_reg_range_array[], int mm_array_size,
184e65e175bSOded Gabbay 		int offset, const u32 pb_blocks[],
185e65e175bSOded Gabbay 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
186e65e175bSOded Gabbay {
187e65e175bSOded Gabbay 	int i, rc = 0;
188e65e175bSOded Gabbay 
189e65e175bSOded Gabbay 	for (i = 0 ; i < mm_array_size ; i++) {
190e65e175bSOded Gabbay 		rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
191e65e175bSOded Gabbay 			offset, pb_blocks, sgs_array, blocks_array_size);
192e65e175bSOded Gabbay 
193e65e175bSOded Gabbay 		if (rc)
194e65e175bSOded Gabbay 			return rc;
195e65e175bSOded Gabbay 	}
196e65e175bSOded Gabbay 
197e65e175bSOded Gabbay 	return rc;
198e65e175bSOded Gabbay }
199e65e175bSOded Gabbay 
200e65e175bSOded Gabbay /**
201e65e175bSOded Gabbay  * hl_ack_pb_security_violations - Ack security violation
202e65e175bSOded Gabbay  *
203e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
204e65e175bSOded Gabbay  * @pb_blocks: blocks array
205e65e175bSOded Gabbay  * @block_offset: additional offset to the block
206e65e175bSOded Gabbay  * @array_size: blocks array size
207e65e175bSOded Gabbay  *
208e65e175bSOded Gabbay  */
hl_ack_pb_security_violations(struct hl_device * hdev,const u32 pb_blocks[],u32 block_offset,int array_size)209e65e175bSOded Gabbay static void hl_ack_pb_security_violations(struct hl_device *hdev,
210e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 block_offset, int array_size)
211e65e175bSOded Gabbay {
212e65e175bSOded Gabbay 	int i;
213e65e175bSOded Gabbay 	u32 cause, addr, block_base;
214e65e175bSOded Gabbay 
215e65e175bSOded Gabbay 	for (i = 0 ; i < array_size ; i++) {
216e65e175bSOded Gabbay 		block_base = pb_blocks[i] + block_offset;
217e65e175bSOded Gabbay 		cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
218e65e175bSOded Gabbay 		if (cause) {
219e65e175bSOded Gabbay 			addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
220e65e175bSOded Gabbay 			hdev->asic_funcs->pb_print_security_errors(hdev,
221e65e175bSOded Gabbay 					block_base, cause, addr);
222e65e175bSOded Gabbay 			WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
223e65e175bSOded Gabbay 		}
224e65e175bSOded Gabbay 	}
225e65e175bSOded Gabbay }
226e65e175bSOded Gabbay 
227e65e175bSOded Gabbay /**
228e65e175bSOded Gabbay  * hl_config_glbl_sec - set pb in HW according to given pb array
229e65e175bSOded Gabbay  *
230e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
231e65e175bSOded Gabbay  * @pb_blocks: blocks array
232e65e175bSOded Gabbay  * @sgs_array: pb array
233e65e175bSOded Gabbay  * @block_offset: additional offset to the block
234e65e175bSOded Gabbay  * @array_size: blocks array size
235e65e175bSOded Gabbay  *
236e65e175bSOded Gabbay  */
hl_config_glbl_sec(struct hl_device * hdev,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],u32 block_offset,int array_size)237e65e175bSOded Gabbay void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
238e65e175bSOded Gabbay 		struct hl_block_glbl_sec sgs_array[], u32 block_offset,
239e65e175bSOded Gabbay 		int array_size)
240e65e175bSOded Gabbay {
241e65e175bSOded Gabbay 	int i, j;
242e65e175bSOded Gabbay 	u32 sgs_base;
243e65e175bSOded Gabbay 
244e65e175bSOded Gabbay 	if (hdev->pldm)
245e65e175bSOded Gabbay 		usleep_range(100, 1000);
246e65e175bSOded Gabbay 
247e65e175bSOded Gabbay 	for (i = 0 ; i < array_size ; i++) {
248e65e175bSOded Gabbay 		sgs_base = block_offset + pb_blocks[i] +
249e65e175bSOded Gabbay 				HL_BLOCK_GLBL_SEC_OFFS;
250e65e175bSOded Gabbay 
251e65e175bSOded Gabbay 		for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
252e65e175bSOded Gabbay 			WREG32(sgs_base + j * sizeof(u32),
253e65e175bSOded Gabbay 				sgs_array[i].sec_array[j]);
254e65e175bSOded Gabbay 	}
255e65e175bSOded Gabbay }
256e65e175bSOded Gabbay 
257e65e175bSOded Gabbay /**
258e65e175bSOded Gabbay  * hl_secure_block - locally memsets a block to 0
259e65e175bSOded Gabbay  *
260e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
261e65e175bSOded Gabbay  * @sgs_array: pb array to clear
262e65e175bSOded Gabbay  * @array_size: blocks array size
263e65e175bSOded Gabbay  *
264e65e175bSOded Gabbay  */
hl_secure_block(struct hl_device * hdev,struct hl_block_glbl_sec sgs_array[],int array_size)265e65e175bSOded Gabbay void hl_secure_block(struct hl_device *hdev,
266e65e175bSOded Gabbay 		struct hl_block_glbl_sec sgs_array[], int array_size)
267e65e175bSOded Gabbay {
268e65e175bSOded Gabbay 	int i;
269e65e175bSOded Gabbay 
270e65e175bSOded Gabbay 	for (i = 0 ; i < array_size ; i++)
271e65e175bSOded Gabbay 		memset((char *)(sgs_array[i].sec_array), 0,
272e65e175bSOded Gabbay 			HL_BLOCK_GLBL_SEC_SIZE);
273e65e175bSOded Gabbay }
274e65e175bSOded Gabbay 
275e65e175bSOded Gabbay /**
276e65e175bSOded Gabbay  * hl_init_pb_with_mask - set selected pb instances with mask in HW according
277e65e175bSOded Gabbay  *                        to given configuration
278e65e175bSOded Gabbay  *
279e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
280e65e175bSOded Gabbay  * @num_dcores: number of decores to apply configuration to
281e65e175bSOded Gabbay  *              set to HL_PB_SHARED if need to apply only once
282e65e175bSOded Gabbay  * @dcore_offset: offset between dcores
283e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
284e65e175bSOded Gabbay  * @instance_offset: offset between instances
285e65e175bSOded Gabbay  * @pb_blocks: blocks array
286e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
287*964234abSKoby Elbaz  * @user_regs_array: unsecured register array
288*964234abSKoby Elbaz  * @user_regs_array_size: unsecured register array size
289e65e175bSOded Gabbay  * @mask: enabled instances mask: 1- enabled, 0- disabled
290e65e175bSOded Gabbay  */
hl_init_pb_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * user_regs_array,u32 user_regs_array_size,u64 mask)291e65e175bSOded Gabbay int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
292e65e175bSOded Gabbay 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
293e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size,
294*964234abSKoby Elbaz 		const u32 *user_regs_array, u32 user_regs_array_size, u64 mask)
295e65e175bSOded Gabbay {
296e65e175bSOded Gabbay 	int i, j;
297e65e175bSOded Gabbay 	struct hl_block_glbl_sec *glbl_sec;
298e65e175bSOded Gabbay 
299e65e175bSOded Gabbay 	glbl_sec = kcalloc(blocks_array_size,
300e65e175bSOded Gabbay 			sizeof(struct hl_block_glbl_sec),
301e65e175bSOded Gabbay 			GFP_KERNEL);
302e65e175bSOded Gabbay 	if (!glbl_sec)
303e65e175bSOded Gabbay 		return -ENOMEM;
304e65e175bSOded Gabbay 
305e65e175bSOded Gabbay 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
306*964234abSKoby Elbaz 	hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 0,
307*964234abSKoby Elbaz 			pb_blocks, glbl_sec, blocks_array_size);
308e65e175bSOded Gabbay 
309e65e175bSOded Gabbay 	/* Fill all blocks with the same configuration */
310e65e175bSOded Gabbay 	for (i = 0 ; i < num_dcores ; i++) {
311e65e175bSOded Gabbay 		for (j = 0 ; j < num_instances ; j++) {
312e65e175bSOded Gabbay 			int seq = i * num_instances + j;
313e65e175bSOded Gabbay 
314e65e175bSOded Gabbay 			if (!(mask & BIT_ULL(seq)))
315e65e175bSOded Gabbay 				continue;
316e65e175bSOded Gabbay 
317e65e175bSOded Gabbay 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
318e65e175bSOded Gabbay 					i * dcore_offset + j * instance_offset,
319e65e175bSOded Gabbay 					blocks_array_size);
320e65e175bSOded Gabbay 		}
321e65e175bSOded Gabbay 	}
322e65e175bSOded Gabbay 
323e65e175bSOded Gabbay 	kfree(glbl_sec);
324e65e175bSOded Gabbay 
325e65e175bSOded Gabbay 	return 0;
326e65e175bSOded Gabbay }
327e65e175bSOded Gabbay 
328e65e175bSOded Gabbay /**
329e65e175bSOded Gabbay  * hl_init_pb - set pb in HW according to given configuration
330e65e175bSOded Gabbay  *
331e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
332e65e175bSOded Gabbay  * @num_dcores: number of decores to apply configuration to
333e65e175bSOded Gabbay  *              set to HL_PB_SHARED if need to apply only once
334e65e175bSOded Gabbay  * @dcore_offset: offset between dcores
335e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
336e65e175bSOded Gabbay  * @instance_offset: offset between instances
337e65e175bSOded Gabbay  * @pb_blocks: blocks array
338e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
339*964234abSKoby Elbaz  * @user_regs_array: unsecured register array
340*964234abSKoby Elbaz  * @user_regs_array_size: unsecured register array size
341e65e175bSOded Gabbay  *
342e65e175bSOded Gabbay  */
hl_init_pb(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * user_regs_array,u32 user_regs_array_size)343e65e175bSOded Gabbay int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
344e65e175bSOded Gabbay 		u32 num_instances, u32 instance_offset,
345e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size,
346*964234abSKoby Elbaz 		const u32 *user_regs_array, u32 user_regs_array_size)
347e65e175bSOded Gabbay {
348e65e175bSOded Gabbay 	return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
349e65e175bSOded Gabbay 			num_instances, instance_offset, pb_blocks,
350*964234abSKoby Elbaz 			blocks_array_size, user_regs_array,
351*964234abSKoby Elbaz 			user_regs_array_size, ULLONG_MAX);
352e65e175bSOded Gabbay }
353e65e175bSOded Gabbay 
354e65e175bSOded Gabbay /**
355e65e175bSOded Gabbay  * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
356e65e175bSOded Gabbay  *                               given configuration unsecurring registers
357e65e175bSOded Gabbay  *                               ranges instead of specific registers
358e65e175bSOded Gabbay  *
359e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
360e65e175bSOded Gabbay  * @num_dcores: number of decores to apply configuration to
361e65e175bSOded Gabbay  *              set to HL_PB_SHARED if need to apply only once
362e65e175bSOded Gabbay  * @dcore_offset: offset between dcores
363e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
364e65e175bSOded Gabbay  * @instance_offset: offset between instances
365e65e175bSOded Gabbay  * @pb_blocks: blocks array
366e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
367*964234abSKoby Elbaz  * @user_regs_range_array: unsecured register range array
368*964234abSKoby Elbaz  * @user_regs_range_array_size: unsecured register range array size
369e65e175bSOded Gabbay  * @mask: enabled instances mask: 1- enabled, 0- disabled
370e65e175bSOded Gabbay  */
hl_init_pb_ranges_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * user_regs_range_array,u32 user_regs_range_array_size,u64 mask)371e65e175bSOded Gabbay int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
372e65e175bSOded Gabbay 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
373e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size,
374*964234abSKoby Elbaz 		const struct range *user_regs_range_array,
375*964234abSKoby Elbaz 		u32 user_regs_range_array_size, u64 mask)
376e65e175bSOded Gabbay {
377e65e175bSOded Gabbay 	int i, j, rc = 0;
378e65e175bSOded Gabbay 	struct hl_block_glbl_sec *glbl_sec;
379e65e175bSOded Gabbay 
380e65e175bSOded Gabbay 	glbl_sec = kcalloc(blocks_array_size,
381e65e175bSOded Gabbay 			sizeof(struct hl_block_glbl_sec),
382e65e175bSOded Gabbay 			GFP_KERNEL);
383e65e175bSOded Gabbay 	if (!glbl_sec)
384e65e175bSOded Gabbay 		return -ENOMEM;
385e65e175bSOded Gabbay 
386e65e175bSOded Gabbay 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
387*964234abSKoby Elbaz 	rc = hl_unsecure_registers_range(hdev, user_regs_range_array,
388*964234abSKoby Elbaz 			user_regs_range_array_size, 0, pb_blocks, glbl_sec,
389e65e175bSOded Gabbay 			blocks_array_size);
390e65e175bSOded Gabbay 	if (rc)
391e65e175bSOded Gabbay 		goto free_glbl_sec;
392e65e175bSOded Gabbay 
393e65e175bSOded Gabbay 	/* Fill all blocks with the same configuration */
394e65e175bSOded Gabbay 	for (i = 0 ; i < num_dcores ; i++) {
395e65e175bSOded Gabbay 		for (j = 0 ; j < num_instances ; j++) {
396e65e175bSOded Gabbay 			int seq = i * num_instances + j;
397e65e175bSOded Gabbay 
398e65e175bSOded Gabbay 			if (!(mask & BIT_ULL(seq)))
399e65e175bSOded Gabbay 				continue;
400e65e175bSOded Gabbay 
401e65e175bSOded Gabbay 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
402e65e175bSOded Gabbay 					i * dcore_offset + j * instance_offset,
403e65e175bSOded Gabbay 					blocks_array_size);
404e65e175bSOded Gabbay 		}
405e65e175bSOded Gabbay 	}
406e65e175bSOded Gabbay 
407e65e175bSOded Gabbay free_glbl_sec:
408e65e175bSOded Gabbay 	kfree(glbl_sec);
409e65e175bSOded Gabbay 
410e65e175bSOded Gabbay 	return rc;
411e65e175bSOded Gabbay }
412e65e175bSOded Gabbay 
413e65e175bSOded Gabbay /**
414e65e175bSOded Gabbay  * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
415e65e175bSOded Gabbay  *                     registers ranges instead of specific registers
416e65e175bSOded Gabbay  *
417e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
418e65e175bSOded Gabbay  * @num_dcores: number of decores to apply configuration to
419e65e175bSOded Gabbay  *              set to HL_PB_SHARED if need to apply only once
420e65e175bSOded Gabbay  * @dcore_offset: offset between dcores
421e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
422e65e175bSOded Gabbay  * @instance_offset: offset between instances
423e65e175bSOded Gabbay  * @pb_blocks: blocks array
424e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
425*964234abSKoby Elbaz  * @user_regs_range_array: unsecured register range array
426*964234abSKoby Elbaz  * @user_regs_range_array_size: unsecured register range array size
427e65e175bSOded Gabbay  *
428e65e175bSOded Gabbay  */
hl_init_pb_ranges(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * user_regs_range_array,u32 user_regs_range_array_size)429e65e175bSOded Gabbay int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
430e65e175bSOded Gabbay 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
431e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size,
432*964234abSKoby Elbaz 		const struct range *user_regs_range_array,
433*964234abSKoby Elbaz 		u32 user_regs_range_array_size)
434e65e175bSOded Gabbay {
435e65e175bSOded Gabbay 	return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
436e65e175bSOded Gabbay 			num_instances, instance_offset, pb_blocks,
437*964234abSKoby Elbaz 			blocks_array_size, user_regs_range_array,
438*964234abSKoby Elbaz 			user_regs_range_array_size, ULLONG_MAX);
439e65e175bSOded Gabbay }
440e65e175bSOded Gabbay 
441e65e175bSOded Gabbay /**
442e65e175bSOded Gabbay  * hl_init_pb_single_dcore - set pb for a single docre in HW
443e65e175bSOded Gabbay  * according to given configuration
444e65e175bSOded Gabbay  *
445e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
446e65e175bSOded Gabbay  * @dcore_offset: offset from the dcore0
447e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
448e65e175bSOded Gabbay  * @instance_offset: offset between instances
449e65e175bSOded Gabbay  * @pb_blocks: blocks array
450e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
451*964234abSKoby Elbaz  * @user_regs_array: unsecured register array
452*964234abSKoby Elbaz  * @user_regs_array_size: unsecured register array size
453e65e175bSOded Gabbay  *
454e65e175bSOded Gabbay  */
hl_init_pb_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * user_regs_array,u32 user_regs_array_size)455e65e175bSOded Gabbay int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
456e65e175bSOded Gabbay 		u32 num_instances, u32 instance_offset,
457e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size,
458*964234abSKoby Elbaz 		const u32 *user_regs_array, u32 user_regs_array_size)
459e65e175bSOded Gabbay {
460e65e175bSOded Gabbay 	int i, rc = 0;
461e65e175bSOded Gabbay 	struct hl_block_glbl_sec *glbl_sec;
462e65e175bSOded Gabbay 
463e65e175bSOded Gabbay 	glbl_sec = kcalloc(blocks_array_size,
464e65e175bSOded Gabbay 			sizeof(struct hl_block_glbl_sec),
465e65e175bSOded Gabbay 			GFP_KERNEL);
466e65e175bSOded Gabbay 	if (!glbl_sec)
467e65e175bSOded Gabbay 		return -ENOMEM;
468e65e175bSOded Gabbay 
469e65e175bSOded Gabbay 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
470*964234abSKoby Elbaz 	rc = hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size,
471*964234abSKoby Elbaz 			0, pb_blocks, glbl_sec, blocks_array_size);
472e65e175bSOded Gabbay 	if (rc)
473e65e175bSOded Gabbay 		goto free_glbl_sec;
474e65e175bSOded Gabbay 
475e65e175bSOded Gabbay 	/* Fill all blocks with the same configuration */
476e65e175bSOded Gabbay 	for (i = 0 ; i < num_instances ; i++)
477e65e175bSOded Gabbay 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
478e65e175bSOded Gabbay 				dcore_offset + i * instance_offset,
479e65e175bSOded Gabbay 				blocks_array_size);
480e65e175bSOded Gabbay 
481e65e175bSOded Gabbay free_glbl_sec:
482e65e175bSOded Gabbay 	kfree(glbl_sec);
483e65e175bSOded Gabbay 
484e65e175bSOded Gabbay 	return rc;
485e65e175bSOded Gabbay }
486e65e175bSOded Gabbay 
487e65e175bSOded Gabbay /**
488e65e175bSOded Gabbay  * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
489e65e175bSOded Gabbay  *                                  to given configuration unsecurring
490e65e175bSOded Gabbay  *                                  registers ranges instead of specific
491e65e175bSOded Gabbay  *                                  registers
492e65e175bSOded Gabbay  *
493e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
494e65e175bSOded Gabbay  * @dcore_offset: offset from the dcore0
495e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
496e65e175bSOded Gabbay  * @instance_offset: offset between instances
497e65e175bSOded Gabbay  * @pb_blocks: blocks array
498e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
499*964234abSKoby Elbaz  * @user_regs_range_array: unsecured register range array
500*964234abSKoby Elbaz  * @user_regs_range_array_size: unsecured register range array size
501e65e175bSOded Gabbay  *
502e65e175bSOded Gabbay  */
hl_init_pb_ranges_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * user_regs_range_array,u32 user_regs_range_array_size)503e65e175bSOded Gabbay int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
504e65e175bSOded Gabbay 		u32 num_instances, u32 instance_offset,
505e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size,
5063b3d853aSKoby Elbaz 		const struct range *user_regs_range_array, u32 user_regs_range_array_size)
507e65e175bSOded Gabbay {
508e65e175bSOded Gabbay 	int i;
509e65e175bSOded Gabbay 	struct hl_block_glbl_sec *glbl_sec;
510e65e175bSOded Gabbay 
511e65e175bSOded Gabbay 	glbl_sec = kcalloc(blocks_array_size,
512e65e175bSOded Gabbay 			sizeof(struct hl_block_glbl_sec),
513e65e175bSOded Gabbay 			GFP_KERNEL);
514e65e175bSOded Gabbay 	if (!glbl_sec)
515e65e175bSOded Gabbay 		return -ENOMEM;
516e65e175bSOded Gabbay 
517e65e175bSOded Gabbay 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
5183b3d853aSKoby Elbaz 	hl_unsecure_registers_range(hdev, user_regs_range_array,
5193b3d853aSKoby Elbaz 			user_regs_range_array_size, 0, pb_blocks, glbl_sec,
520e65e175bSOded Gabbay 			blocks_array_size);
521e65e175bSOded Gabbay 
522e65e175bSOded Gabbay 	/* Fill all blocks with the same configuration */
523e65e175bSOded Gabbay 	for (i = 0 ; i < num_instances ; i++)
524e65e175bSOded Gabbay 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
525e65e175bSOded Gabbay 				dcore_offset + i * instance_offset,
526e65e175bSOded Gabbay 				blocks_array_size);
527e65e175bSOded Gabbay 
528e65e175bSOded Gabbay 	kfree(glbl_sec);
529e65e175bSOded Gabbay 
530e65e175bSOded Gabbay 	return 0;
531e65e175bSOded Gabbay }
532e65e175bSOded Gabbay 
533e65e175bSOded Gabbay /**
534e65e175bSOded Gabbay  * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
535e65e175bSOded Gabbay  *
536e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
537e65e175bSOded Gabbay  * @num_dcores: number of decores to apply configuration to
538e65e175bSOded Gabbay  *              set to HL_PB_SHARED if need to apply only once
539e65e175bSOded Gabbay  * @dcore_offset: offset between dcores
540e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
541e65e175bSOded Gabbay  * @instance_offset: offset between instances
542e65e175bSOded Gabbay  * @pb_blocks: blocks array
543e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
544e65e175bSOded Gabbay  * @mask: enabled instances mask: 1- enabled, 0- disabled
545e65e175bSOded Gabbay  *
546e65e175bSOded Gabbay  */
hl_ack_pb_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,u64 mask)547e65e175bSOded Gabbay void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
548e65e175bSOded Gabbay 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
549e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
550e65e175bSOded Gabbay {
551e65e175bSOded Gabbay 	int i, j;
552e65e175bSOded Gabbay 
553e65e175bSOded Gabbay 	/* ack all blocks */
554e65e175bSOded Gabbay 	for (i = 0 ; i < num_dcores ; i++) {
555e65e175bSOded Gabbay 		for (j = 0 ; j < num_instances ; j++) {
556e65e175bSOded Gabbay 			int seq = i * num_instances + j;
557e65e175bSOded Gabbay 
558e65e175bSOded Gabbay 			if (!(mask & BIT_ULL(seq)))
559e65e175bSOded Gabbay 				continue;
560e65e175bSOded Gabbay 
561e65e175bSOded Gabbay 			hl_ack_pb_security_violations(hdev, pb_blocks,
562e65e175bSOded Gabbay 					i * dcore_offset + j * instance_offset,
563e65e175bSOded Gabbay 					blocks_array_size);
564e65e175bSOded Gabbay 		}
565e65e175bSOded Gabbay 	}
566e65e175bSOded Gabbay }
567e65e175bSOded Gabbay 
568e65e175bSOded Gabbay /**
569e65e175bSOded Gabbay  * hl_ack_pb - ack pb in HW according to given configuration
570e65e175bSOded Gabbay  *
571e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
572e65e175bSOded Gabbay  * @num_dcores: number of decores to apply configuration to
573e65e175bSOded Gabbay  *              set to HL_PB_SHARED if need to apply only once
574e65e175bSOded Gabbay  * @dcore_offset: offset between dcores
575e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
576e65e175bSOded Gabbay  * @instance_offset: offset between instances
577e65e175bSOded Gabbay  * @pb_blocks: blocks array
578e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
579e65e175bSOded Gabbay  *
580e65e175bSOded Gabbay  */
hl_ack_pb(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size)581e65e175bSOded Gabbay void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
582e65e175bSOded Gabbay 		u32 num_instances, u32 instance_offset,
583e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size)
584e65e175bSOded Gabbay {
585e65e175bSOded Gabbay 	hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
586e65e175bSOded Gabbay 			instance_offset, pb_blocks, blocks_array_size,
587e65e175bSOded Gabbay 			ULLONG_MAX);
588e65e175bSOded Gabbay }
589e65e175bSOded Gabbay 
590e65e175bSOded Gabbay /**
591e65e175bSOded Gabbay  * hl_ack_pb_single_dcore - ack pb for single docre in HW
592e65e175bSOded Gabbay  * according to given configuration
593e65e175bSOded Gabbay  *
594e65e175bSOded Gabbay  * @hdev: pointer to hl_device structure
595e65e175bSOded Gabbay  * @dcore_offset: offset from dcore0
596e65e175bSOded Gabbay  * @num_instances: number of instances to apply configuration to
597e65e175bSOded Gabbay  * @instance_offset: offset between instances
598e65e175bSOded Gabbay  * @pb_blocks: blocks array
599e65e175bSOded Gabbay  * @blocks_array_size: blocks array size
600e65e175bSOded Gabbay  *
601e65e175bSOded Gabbay  */
hl_ack_pb_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size)602e65e175bSOded Gabbay void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
603e65e175bSOded Gabbay 		u32 num_instances, u32 instance_offset,
604e65e175bSOded Gabbay 		const u32 pb_blocks[], u32 blocks_array_size)
605e65e175bSOded Gabbay {
606e65e175bSOded Gabbay 	int i;
607e65e175bSOded Gabbay 
608e65e175bSOded Gabbay 	/* ack all blocks */
609e65e175bSOded Gabbay 	for (i = 0 ; i < num_instances ; i++)
610e65e175bSOded Gabbay 		hl_ack_pb_security_violations(hdev, pb_blocks,
611e65e175bSOded Gabbay 				dcore_offset + i * instance_offset,
612e65e175bSOded Gabbay 				blocks_array_size);
613e65e175bSOded Gabbay 
614e65e175bSOded Gabbay }
615f7d67c1cSKoby Elbaz 
hl_automated_get_block_base_addr(struct hl_device * hdev,struct hl_special_block_info * block_info,u32 major,u32 minor,u32 sub_minor)616f7d67c1cSKoby Elbaz static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
617f7d67c1cSKoby Elbaz 		struct hl_special_block_info *block_info,
618f7d67c1cSKoby Elbaz 		u32 major, u32 minor, u32 sub_minor)
619f7d67c1cSKoby Elbaz {
620f7d67c1cSKoby Elbaz 	u32 fw_block_base_address = block_info->base_addr +
621f7d67c1cSKoby Elbaz 			major * block_info->major_offset +
622f7d67c1cSKoby Elbaz 			minor * block_info->minor_offset +
623f7d67c1cSKoby Elbaz 			sub_minor * block_info->sub_minor_offset;
624f7d67c1cSKoby Elbaz 	struct asic_fixed_properties *prop = &hdev->asic_prop;
625f7d67c1cSKoby Elbaz 
626f7d67c1cSKoby Elbaz 	/* Calculation above returns an address for FW use, and therefore should
627f7d67c1cSKoby Elbaz 	 * be casted for driver use.
628f7d67c1cSKoby Elbaz 	 */
629f7d67c1cSKoby Elbaz 	return (fw_block_base_address - lower_32_bits(prop->cfg_base_address));
630f7d67c1cSKoby Elbaz }
631f7d67c1cSKoby Elbaz 
hl_check_block_type_exclusion(struct hl_skip_blocks_cfg * skip_blocks_cfg,int block_type)632f7d67c1cSKoby Elbaz static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg,
633f7d67c1cSKoby Elbaz 		int block_type)
634f7d67c1cSKoby Elbaz {
635f7d67c1cSKoby Elbaz 	int i;
636f7d67c1cSKoby Elbaz 
637f7d67c1cSKoby Elbaz 	/* Check if block type is listed in the exclusion list of block types */
638f7d67c1cSKoby Elbaz 	for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++)
639f7d67c1cSKoby Elbaz 		if (block_type == skip_blocks_cfg->block_types[i])
640f7d67c1cSKoby Elbaz 			return true;
641f7d67c1cSKoby Elbaz 
642f7d67c1cSKoby Elbaz 	return false;
643f7d67c1cSKoby Elbaz }
644f7d67c1cSKoby Elbaz 
hl_check_block_range_exclusion(struct hl_device * hdev,struct hl_skip_blocks_cfg * skip_blocks_cfg,struct hl_special_block_info * block_info,u32 major,u32 minor,u32 sub_minor)645f7d67c1cSKoby Elbaz static bool hl_check_block_range_exclusion(struct hl_device *hdev,
646f7d67c1cSKoby Elbaz 		struct hl_skip_blocks_cfg *skip_blocks_cfg,
647f7d67c1cSKoby Elbaz 		struct hl_special_block_info *block_info,
648f7d67c1cSKoby Elbaz 		u32 major, u32 minor, u32 sub_minor)
649f7d67c1cSKoby Elbaz {
650f7d67c1cSKoby Elbaz 	u32 blocks_in_range, block_base_addr_in_range, block_base_addr;
651f7d67c1cSKoby Elbaz 	int i, j;
652f7d67c1cSKoby Elbaz 
653f7d67c1cSKoby Elbaz 	block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
654f7d67c1cSKoby Elbaz 			major, minor, sub_minor);
655f7d67c1cSKoby Elbaz 
656f7d67c1cSKoby Elbaz 	for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) {
657f7d67c1cSKoby Elbaz 		blocks_in_range = (skip_blocks_cfg->block_ranges[i].end -
658f7d67c1cSKoby Elbaz 				skip_blocks_cfg->block_ranges[i].start) /
659f7d67c1cSKoby Elbaz 				HL_BLOCK_SIZE + 1;
660f7d67c1cSKoby Elbaz 		for (j = 0 ; j < blocks_in_range ; j++) {
661f7d67c1cSKoby Elbaz 			block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start +
662f7d67c1cSKoby Elbaz 					j * HL_BLOCK_SIZE;
663f7d67c1cSKoby Elbaz 			if (block_base_addr == block_base_addr_in_range)
664f7d67c1cSKoby Elbaz 				return true;
665f7d67c1cSKoby Elbaz 		}
666f7d67c1cSKoby Elbaz 	}
667f7d67c1cSKoby Elbaz 
668f7d67c1cSKoby Elbaz 	return false;
669f7d67c1cSKoby Elbaz }
670f7d67c1cSKoby Elbaz 
hl_read_glbl_errors(struct hl_device * hdev,u32 blk_idx,u32 major,u32 minor,u32 sub_minor,void * data)671f7d67c1cSKoby Elbaz static int hl_read_glbl_errors(struct hl_device *hdev,
672f7d67c1cSKoby Elbaz 		u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
673f7d67c1cSKoby Elbaz {
674f7d67c1cSKoby Elbaz 	struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
675f7d67c1cSKoby Elbaz 	struct hl_special_block_info *current_block = &special_blocks[blk_idx];
676f7d67c1cSKoby Elbaz 	u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
677f7d67c1cSKoby Elbaz 		base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
678f7d67c1cSKoby Elbaz 	int i;
679f7d67c1cSKoby Elbaz 
680f7d67c1cSKoby Elbaz 	block_base = base + major * current_block->major_offset +
681f7d67c1cSKoby Elbaz 			minor * current_block->minor_offset +
682f7d67c1cSKoby Elbaz 			sub_minor * current_block->sub_minor_offset;
683f7d67c1cSKoby Elbaz 
684f7d67c1cSKoby Elbaz 	glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET;
685f7d67c1cSKoby Elbaz 	cause_val = RREG32(glbl_err_cause);
686f7d67c1cSKoby Elbaz 	if (!cause_val)
687f7d67c1cSKoby Elbaz 		return 0;
688f7d67c1cSKoby Elbaz 
689f7d67c1cSKoby Elbaz 	glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
690f7d67c1cSKoby Elbaz 	addr_val = RREG32(glbl_err_addr);
691f7d67c1cSKoby Elbaz 
692f7d67c1cSKoby Elbaz 	for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
693f7d67c1cSKoby Elbaz 		if (cause_val & BIT(i))
694f7d67c1cSKoby Elbaz 			dev_err_ratelimited(hdev->dev,
695f7d67c1cSKoby Elbaz 				"%s, addr %#llx\n",
696f7d67c1cSKoby Elbaz 				hl_glbl_error_cause[i],
697f7d67c1cSKoby Elbaz 				hdev->asic_prop.cfg_base_address + block_base +
698f7d67c1cSKoby Elbaz 				FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
699f7d67c1cSKoby Elbaz 	}
700f7d67c1cSKoby Elbaz 
701f7d67c1cSKoby Elbaz 	WREG32(glbl_err_cause, cause_val);
702f7d67c1cSKoby Elbaz 
703f7d67c1cSKoby Elbaz 	return 0;
704f7d67c1cSKoby Elbaz }
705f7d67c1cSKoby Elbaz 
hl_check_for_glbl_errors(struct hl_device * hdev)706f7d67c1cSKoby Elbaz void hl_check_for_glbl_errors(struct hl_device *hdev)
707f7d67c1cSKoby Elbaz {
708f7d67c1cSKoby Elbaz 	struct asic_fixed_properties *prop = &hdev->asic_prop;
709f7d67c1cSKoby Elbaz 	struct hl_special_blocks_cfg special_blocks_cfg;
710f7d67c1cSKoby Elbaz 	struct iterate_special_ctx glbl_err_iter;
711f7d67c1cSKoby Elbaz 	int rc;
712f7d67c1cSKoby Elbaz 
713f7d67c1cSKoby Elbaz 	memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg));
714f7d67c1cSKoby Elbaz 	special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg;
715f7d67c1cSKoby Elbaz 
716f7d67c1cSKoby Elbaz 	glbl_err_iter.fn = &hl_read_glbl_errors;
717f7d67c1cSKoby Elbaz 	glbl_err_iter.data = &special_blocks_cfg;
718f7d67c1cSKoby Elbaz 
719f7d67c1cSKoby Elbaz 	rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
720f7d67c1cSKoby Elbaz 	if (rc)
721f7d67c1cSKoby Elbaz 		dev_err_ratelimited(hdev->dev,
722f7d67c1cSKoby Elbaz 			"Could not iterate special blocks, glbl error check failed\n");
723f7d67c1cSKoby Elbaz }
724f7d67c1cSKoby Elbaz 
hl_iterate_special_blocks(struct hl_device * hdev,struct iterate_special_ctx * ctx)725f7d67c1cSKoby Elbaz int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
726f7d67c1cSKoby Elbaz {
727f7d67c1cSKoby Elbaz 	struct hl_special_blocks_cfg *special_blocks_cfg =
728f7d67c1cSKoby Elbaz 			(struct hl_special_blocks_cfg *)ctx->data;
729f7d67c1cSKoby Elbaz 	struct hl_skip_blocks_cfg *skip_blocks_cfg =
730f7d67c1cSKoby Elbaz 			special_blocks_cfg->skip_blocks_cfg;
731f7d67c1cSKoby Elbaz 	u32 major, minor, sub_minor, blk_idx, num_blocks;
732f7d67c1cSKoby Elbaz 	struct hl_special_block_info *block_info_arr;
733f7d67c1cSKoby Elbaz 	int rc;
734f7d67c1cSKoby Elbaz 
735f7d67c1cSKoby Elbaz 	block_info_arr = hdev->asic_prop.special_blocks;
736f7d67c1cSKoby Elbaz 	if (!block_info_arr)
737f7d67c1cSKoby Elbaz 		return -EINVAL;
738f7d67c1cSKoby Elbaz 
739f7d67c1cSKoby Elbaz 	num_blocks = hdev->asic_prop.num_of_special_blocks;
740f7d67c1cSKoby Elbaz 
741f7d67c1cSKoby Elbaz 	for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) {
742f7d67c1cSKoby Elbaz 		if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type))
743f7d67c1cSKoby Elbaz 			continue;
744f7d67c1cSKoby Elbaz 
745f7d67c1cSKoby Elbaz 		for (major = 0 ; major < block_info_arr->major ; major++) {
746f7d67c1cSKoby Elbaz 			minor = 0;
747f7d67c1cSKoby Elbaz 			do {
748f7d67c1cSKoby Elbaz 				sub_minor = 0;
749f7d67c1cSKoby Elbaz 				do {
750f7d67c1cSKoby Elbaz 					if ((hl_check_block_range_exclusion(hdev,
751f7d67c1cSKoby Elbaz 							skip_blocks_cfg, block_info_arr,
752f7d67c1cSKoby Elbaz 							major, minor, sub_minor)) ||
753f7d67c1cSKoby Elbaz 						(skip_blocks_cfg->skip_block_hook &&
754f7d67c1cSKoby Elbaz 						skip_blocks_cfg->skip_block_hook(hdev,
755f7d67c1cSKoby Elbaz 							special_blocks_cfg,
756f7d67c1cSKoby Elbaz 							blk_idx, major, minor, sub_minor))) {
757f7d67c1cSKoby Elbaz 						sub_minor++;
758f7d67c1cSKoby Elbaz 						continue;
759f7d67c1cSKoby Elbaz 					}
760f7d67c1cSKoby Elbaz 
761f7d67c1cSKoby Elbaz 					rc = ctx->fn(hdev, blk_idx, major, minor,
762f7d67c1cSKoby Elbaz 								sub_minor, ctx->data);
763f7d67c1cSKoby Elbaz 					if (rc)
764f7d67c1cSKoby Elbaz 						return rc;
765f7d67c1cSKoby Elbaz 
766f7d67c1cSKoby Elbaz 					sub_minor++;
767f7d67c1cSKoby Elbaz 				} while (sub_minor < block_info_arr->sub_minor);
768f7d67c1cSKoby Elbaz 
769f7d67c1cSKoby Elbaz 				minor++;
770f7d67c1cSKoby Elbaz 			} while (minor < block_info_arr->minor);
771f7d67c1cSKoby Elbaz 		}
772f7d67c1cSKoby Elbaz 	}
773f7d67c1cSKoby Elbaz 
774f7d67c1cSKoby Elbaz 	return 0;
775f7d67c1cSKoby Elbaz }
776