1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/io.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include "qed.h"
15 #include "qed_hsi.h"
16 #include "qed_hw.h"
17 #include "qed_init_ops.h"
18 #include "qed_iro_hsi.h"
19 #include "qed_reg_addr.h"
20 #include "qed_sriov.h"
21 
22 #define QED_INIT_MAX_POLL_COUNT 100
23 #define QED_INIT_POLL_PERIOD_US 500
24 
25 static u32 pxp_global_win[] = {
26 	0,
27 	0,
28 	0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
29 	0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
30 	0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
31 	0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
32 	0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
33 	0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
34 	0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
35 	0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
36 	0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
37 	0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
38 	0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
39 	0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
40 	0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
41 	0,
42 	0,
43 	0,
44 	0,
45 };
46 
47 /* IRO Array */
48 static const u32 iro_arr[] = {
49 	0x00000000, 0x00000000, 0x00080000,
50 	0x00004478, 0x00000008, 0x00080000,
51 	0x00003288, 0x00000088, 0x00880000,
52 	0x000058a8, 0x00000020, 0x00200000,
53 	0x00003188, 0x00000008, 0x00080000,
54 	0x00000b00, 0x00000008, 0x00040000,
55 	0x00000a80, 0x00000008, 0x00040000,
56 	0x00000000, 0x00000008, 0x00020000,
57 	0x00000080, 0x00000008, 0x00040000,
58 	0x00000084, 0x00000008, 0x00020000,
59 	0x00005798, 0x00000004, 0x00040000,
60 	0x00004e50, 0x00000000, 0x00780000,
61 	0x00003e40, 0x00000000, 0x00780000,
62 	0x00004500, 0x00000000, 0x00780000,
63 	0x00003210, 0x00000000, 0x00780000,
64 	0x00003b50, 0x00000000, 0x00780000,
65 	0x00007f58, 0x00000000, 0x00780000,
66 	0x00005fd8, 0x00000000, 0x00080000,
67 	0x00007100, 0x00000000, 0x00080000,
68 	0x0000af20, 0x00000000, 0x00080000,
69 	0x00004398, 0x00000000, 0x00080000,
70 	0x0000a5a0, 0x00000000, 0x00080000,
71 	0x0000bde8, 0x00000000, 0x00080000,
72 	0x00000020, 0x00000004, 0x00040000,
73 	0x00005688, 0x00000010, 0x00100000,
74 	0x0000c210, 0x00000030, 0x00300000,
75 	0x0000b108, 0x00000038, 0x00380000,
76 	0x00003d20, 0x00000080, 0x00400000,
77 	0x0000bf60, 0x00000000, 0x00040000,
78 	0x00004560, 0x00040080, 0x00040000,
79 	0x000001f8, 0x00000004, 0x00040000,
80 	0x00003d60, 0x00000080, 0x00200000,
81 	0x00008960, 0x00000040, 0x00300000,
82 	0x0000e840, 0x00000060, 0x00600000,
83 	0x00004698, 0x00000080, 0x00380000,
84 	0x000107b8, 0x000000c0, 0x00c00000,
85 	0x000001f8, 0x00000002, 0x00020000,
86 	0x0000a260, 0x00000000, 0x01080000,
87 	0x0000a368, 0x00000008, 0x00080000,
88 	0x000001c0, 0x00000008, 0x00080000,
89 	0x000001f8, 0x00000008, 0x00080000,
90 	0x00000ac0, 0x00000008, 0x00080000,
91 	0x00002578, 0x00000008, 0x00080000,
92 	0x000024f8, 0x00000008, 0x00080000,
93 	0x00000280, 0x00000008, 0x00080000,
94 	0x00000680, 0x00080018, 0x00080000,
95 	0x00000b78, 0x00080018, 0x00020000,
96 	0x0000c600, 0x00000058, 0x003c0000,
97 	0x00012038, 0x00000020, 0x00100000,
98 	0x00011b00, 0x00000048, 0x00180000,
99 	0x00009650, 0x00000050, 0x00200000,
100 	0x00008b10, 0x00000040, 0x00280000,
101 	0x000116c0, 0x00000018, 0x00100000,
102 	0x0000c808, 0x00000048, 0x00380000,
103 	0x00011790, 0x00000020, 0x00200000,
104 	0x000046d0, 0x00000080, 0x00100000,
105 	0x00003618, 0x00000010, 0x00100000,
106 	0x0000a9e8, 0x00000008, 0x00010000,
107 	0x000097a0, 0x00000008, 0x00010000,
108 	0x00011a10, 0x00000008, 0x00010000,
109 	0x0000e9f8, 0x00000008, 0x00010000,
110 	0x00012648, 0x00000008, 0x00010000,
111 	0x000121c8, 0x00000008, 0x00010000,
112 	0x0000af08, 0x00000030, 0x00100000,
113 	0x0000d748, 0x00000028, 0x00280000,
114 	0x00009e68, 0x00000018, 0x00180000,
115 	0x00009fe8, 0x00000008, 0x00080000,
116 	0x00013ea8, 0x00000008, 0x00080000,
117 	0x00012f18, 0x00000018, 0x00180000,
118 	0x0000dfe8, 0x00500288, 0x00100000,
119 	0x000131a0, 0x00000138, 0x00280000,
120 };
121 
122 void qed_init_iro_array(struct qed_dev *cdev)
123 {
124 	cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
125 }
126 
127 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
128 {
129 	if (rt_offset >= RUNTIME_ARRAY_SIZE) {
130 		DP_ERR(p_hwfn,
131 		       "Avoid storing %u in rt_data at index %u!\n",
132 		       val, rt_offset);
133 		return;
134 	}
135 
136 	p_hwfn->rt_data.init_val[rt_offset] = val;
137 	p_hwfn->rt_data.b_valid[rt_offset] = true;
138 }
139 
140 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
141 			   u32 rt_offset, u32 *p_val, size_t size)
142 {
143 	size_t i;
144 
145 	if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
146 		DP_ERR(p_hwfn,
147 		       "Avoid storing values in rt_data at indices %u-%u!\n",
148 		       rt_offset,
149 		       (u32)(rt_offset + size - 1));
150 		return;
151 	}
152 
153 	for (i = 0; i < size / sizeof(u32); i++) {
154 		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
155 		p_hwfn->rt_data.b_valid[rt_offset + i]	= true;
156 	}
157 }
158 
159 static int qed_init_rt(struct qed_hwfn	*p_hwfn,
160 		       struct qed_ptt *p_ptt,
161 		       u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
162 {
163 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
164 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
165 	u16 i, j, segment;
166 	int rc = 0;
167 
168 	/* Since not all RT entries are initialized, go over the RT and
169 	 * for each segment of initialized values use DMA.
170 	 */
171 	for (i = 0; i < size; i++) {
172 		if (!p_valid[i])
173 			continue;
174 
175 		/* In case there isn't any wide-bus configuration here,
176 		 * simply write the data instead of using dmae.
177 		 */
178 		if (!b_must_dmae) {
179 			qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
180 			p_valid[i] = false;
181 			continue;
182 		}
183 
184 		/* Start of a new segment */
185 		for (segment = 1; i + segment < size; segment++)
186 			if (!p_valid[i + segment])
187 				break;
188 
189 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
190 				       (uintptr_t)(p_init_val + i),
191 				       addr + (i << 2), segment, NULL);
192 		if (rc)
193 			return rc;
194 
195 		/* invalidate after writing */
196 		for (j = i; j < (u32)(i + segment); j++)
197 			p_valid[j] = false;
198 
199 		/* Jump over the entire segment, including invalid entry */
200 		i += segment;
201 	}
202 
203 	return rc;
204 }
205 
206 int qed_init_alloc(struct qed_hwfn *p_hwfn)
207 {
208 	struct qed_rt_data *rt_data = &p_hwfn->rt_data;
209 
210 	if (IS_VF(p_hwfn->cdev))
211 		return 0;
212 
213 	rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool),
214 				   GFP_KERNEL);
215 	if (!rt_data->b_valid)
216 		return -ENOMEM;
217 
218 	rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32),
219 				    GFP_KERNEL);
220 	if (!rt_data->init_val) {
221 		kfree(rt_data->b_valid);
222 		rt_data->b_valid = NULL;
223 		return -ENOMEM;
224 	}
225 
226 	return 0;
227 }
228 
229 void qed_init_free(struct qed_hwfn *p_hwfn)
230 {
231 	kfree(p_hwfn->rt_data.init_val);
232 	p_hwfn->rt_data.init_val = NULL;
233 	kfree(p_hwfn->rt_data.b_valid);
234 	p_hwfn->rt_data.b_valid = NULL;
235 }
236 
237 static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
238 			       struct qed_ptt *p_ptt,
239 			       u32 addr,
240 			       u32 dmae_data_offset,
241 			       u32 size,
242 			       const u32 *buf,
243 			       bool b_must_dmae,
244 			       bool b_can_dmae)
245 {
246 	int rc = 0;
247 
248 	/* Perform DMAE only for lengthy enough sections or for wide-bus */
249 	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
250 		const u32 *data = buf + dmae_data_offset;
251 		u32 i;
252 
253 		for (i = 0; i < size; i++)
254 			qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
255 	} else {
256 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
257 				       (uintptr_t)(buf + dmae_data_offset),
258 				       addr, size, NULL);
259 	}
260 
261 	return rc;
262 }
263 
264 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
265 			      struct qed_ptt *p_ptt,
266 			      u32 addr, u32 fill_count)
267 {
268 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
269 	struct qed_dmae_params params = {};
270 
271 	memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
272 
273 	/* invoke the DMAE virtual/physical buffer API with
274 	 * 1. DMAE init channel
275 	 * 2. addr,
276 	 * 3. p_hwfb->temp_data,
277 	 * 4. fill_count
278 	 */
279 	SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
280 	return qed_dmae_host2grc(p_hwfn, p_ptt,
281 				 (uintptr_t)(&zero_buffer[0]),
282 				 addr, fill_count, &params);
283 }
284 
285 static void qed_init_fill(struct qed_hwfn *p_hwfn,
286 			  struct qed_ptt *p_ptt,
287 			  u32 addr, u32 fill, u32 fill_count)
288 {
289 	u32 i;
290 
291 	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
292 		qed_wr(p_hwfn, p_ptt, addr, fill);
293 }
294 
295 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
296 			      struct qed_ptt *p_ptt,
297 			      struct init_write_op *cmd,
298 			      bool b_must_dmae, bool b_can_dmae)
299 {
300 	u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
301 	u32 data = le32_to_cpu(cmd->data);
302 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
303 
304 	u32 offset, output_len, input_len, max_size;
305 	struct qed_dev *cdev = p_hwfn->cdev;
306 	union init_array_hdr *hdr;
307 	const u32 *array_data;
308 	int rc = 0;
309 	u32 size;
310 
311 	array_data = cdev->fw_data->arr_data;
312 
313 	hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
314 	data = le32_to_cpu(hdr->raw.data);
315 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
316 	case INIT_ARR_ZIPPED:
317 		offset = dmae_array_offset + 1;
318 		input_len = GET_FIELD(data,
319 				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
320 		max_size = MAX_ZIPPED_SIZE * 4;
321 		memset(p_hwfn->unzip_buf, 0, max_size);
322 
323 		output_len = qed_unzip_data(p_hwfn, input_len,
324 					    (u8 *)&array_data[offset],
325 					    max_size, (u8 *)p_hwfn->unzip_buf);
326 		if (output_len) {
327 			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
328 						 output_len,
329 						 p_hwfn->unzip_buf,
330 						 b_must_dmae, b_can_dmae);
331 		} else {
332 			DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
333 			rc = -EINVAL;
334 		}
335 		break;
336 	case INIT_ARR_PATTERN:
337 	{
338 		u32 repeats = GET_FIELD(data,
339 					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
340 		u32 i;
341 
342 		size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
343 
344 		for (i = 0; i < repeats; i++, addr += size << 2) {
345 			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
346 						 dmae_array_offset + 1,
347 						 size, array_data,
348 						 b_must_dmae, b_can_dmae);
349 			if (rc)
350 				break;
351 		}
352 		break;
353 	}
354 	case INIT_ARR_STANDARD:
355 		size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
356 		rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
357 					 dmae_array_offset + 1,
358 					 size, array_data,
359 					 b_must_dmae, b_can_dmae);
360 		break;
361 	}
362 
363 	return rc;
364 }
365 
366 /* init_ops write command */
367 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
368 			   struct qed_ptt *p_ptt,
369 			   struct init_write_op *p_cmd, bool b_can_dmae)
370 {
371 	u32 data = le32_to_cpu(p_cmd->data);
372 	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
373 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
374 	union init_write_args *arg = &p_cmd->args;
375 	int rc = 0;
376 
377 	/* Sanitize */
378 	if (b_must_dmae && !b_can_dmae) {
379 		DP_NOTICE(p_hwfn,
380 			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
381 			  addr);
382 		return -EINVAL;
383 	}
384 
385 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
386 	case INIT_SRC_INLINE:
387 		data = le32_to_cpu(p_cmd->args.inline_val);
388 		qed_wr(p_hwfn, p_ptt, addr, data);
389 		break;
390 	case INIT_SRC_ZEROS:
391 		data = le32_to_cpu(p_cmd->args.zeros_count);
392 		if (b_must_dmae || (b_can_dmae && (data >= 64)))
393 			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
394 		else
395 			qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
396 		break;
397 	case INIT_SRC_ARRAY:
398 		rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
399 					b_must_dmae, b_can_dmae);
400 		break;
401 	case INIT_SRC_RUNTIME:
402 		qed_init_rt(p_hwfn, p_ptt, addr,
403 			    le16_to_cpu(arg->runtime.offset),
404 			    le16_to_cpu(arg->runtime.size),
405 			    b_must_dmae);
406 		break;
407 	}
408 
409 	return rc;
410 }
411 
412 static inline bool comp_eq(u32 val, u32 expected_val)
413 {
414 	return val == expected_val;
415 }
416 
417 static inline bool comp_and(u32 val, u32 expected_val)
418 {
419 	return (val & expected_val) == expected_val;
420 }
421 
422 static inline bool comp_or(u32 val, u32 expected_val)
423 {
424 	return (val | expected_val) > 0;
425 }
426 
427 /* init_ops read/poll commands */
428 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
429 			    struct qed_ptt *p_ptt, struct init_read_op *cmd)
430 {
431 	bool (*comp_check)(u32 val, u32 expected_val);
432 	u32 delay = QED_INIT_POLL_PERIOD_US, val;
433 	u32 data, addr, poll;
434 	int i;
435 
436 	data = le32_to_cpu(cmd->op_data);
437 	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
438 	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
439 
440 	val = qed_rd(p_hwfn, p_ptt, addr);
441 
442 	if (poll == INIT_POLL_NONE)
443 		return;
444 
445 	switch (poll) {
446 	case INIT_POLL_EQ:
447 		comp_check = comp_eq;
448 		break;
449 	case INIT_POLL_OR:
450 		comp_check = comp_or;
451 		break;
452 	case INIT_POLL_AND:
453 		comp_check = comp_and;
454 		break;
455 	default:
456 		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
457 		       cmd->op_data);
458 		return;
459 	}
460 
461 	data = le32_to_cpu(cmd->expected_val);
462 	for (i = 0;
463 	     i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
464 	     i++) {
465 		udelay(delay);
466 		val = qed_rd(p_hwfn, p_ptt, addr);
467 	}
468 
469 	if (i == QED_INIT_MAX_POLL_COUNT) {
470 		DP_ERR(p_hwfn,
471 		       "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
472 		       addr, le32_to_cpu(cmd->expected_val),
473 		       val, le32_to_cpu(cmd->op_data));
474 	}
475 }
476 
477 /* init_ops callbacks entry point */
478 static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
479 			   struct qed_ptt *p_ptt,
480 			   struct init_callback_op *p_cmd)
481 {
482 	int rc;
483 
484 	switch (p_cmd->callback_id) {
485 	case DMAE_READY_CB:
486 		rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
487 		break;
488 	default:
489 		DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
490 			  p_cmd->callback_id);
491 		return -EINVAL;
492 	}
493 
494 	return rc;
495 }
496 
497 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
498 				  u16 *p_offset, int modes)
499 {
500 	struct qed_dev *cdev = p_hwfn->cdev;
501 	const u8 *modes_tree_buf;
502 	u8 arg1, arg2, tree_val;
503 
504 	modes_tree_buf = cdev->fw_data->modes_tree_buf;
505 	tree_val = modes_tree_buf[(*p_offset)++];
506 	switch (tree_val) {
507 	case INIT_MODE_OP_NOT:
508 		return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
509 	case INIT_MODE_OP_OR:
510 		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
511 		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
512 		return arg1 | arg2;
513 	case INIT_MODE_OP_AND:
514 		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
515 		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
516 		return arg1 & arg2;
517 	default:
518 		tree_val -= MAX_INIT_MODE_OPS;
519 		return (modes & BIT(tree_val)) ? 1 : 0;
520 	}
521 }
522 
523 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
524 			     struct init_if_mode_op *p_cmd, int modes)
525 {
526 	u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
527 
528 	if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
529 		return 0;
530 	else
531 		return GET_FIELD(le32_to_cpu(p_cmd->op_data),
532 				 INIT_IF_MODE_OP_CMD_OFFSET);
533 }
534 
535 static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
536 			      u32 phase, u32 phase_id)
537 {
538 	u32 data = le32_to_cpu(p_cmd->phase_data);
539 	u32 op_data = le32_to_cpu(p_cmd->op_data);
540 
541 	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
542 	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
543 	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
544 		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
545 	else
546 		return 0;
547 }
548 
549 int qed_init_run(struct qed_hwfn *p_hwfn,
550 		 struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
551 {
552 	bool b_dmae = (phase != PHASE_ENGINE);
553 	struct qed_dev *cdev = p_hwfn->cdev;
554 	u32 cmd_num, num_init_ops;
555 	union init_op *init_ops;
556 	int rc = 0;
557 
558 	num_init_ops = cdev->fw_data->init_ops_size;
559 	init_ops = cdev->fw_data->init_ops;
560 
561 	p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
562 	if (!p_hwfn->unzip_buf)
563 		return -ENOMEM;
564 
565 	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
566 		union init_op *cmd = &init_ops[cmd_num];
567 		u32 data = le32_to_cpu(cmd->raw.op_data);
568 
569 		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
570 		case INIT_OP_WRITE:
571 			rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
572 					     b_dmae);
573 			break;
574 		case INIT_OP_READ:
575 			qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
576 			break;
577 		case INIT_OP_IF_MODE:
578 			cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
579 						     modes);
580 			break;
581 		case INIT_OP_IF_PHASE:
582 			cmd_num += qed_init_cmd_phase(&cmd->if_phase,
583 						      phase, phase_id);
584 			break;
585 		case INIT_OP_DELAY:
586 			/* qed_init_run is always invoked from
587 			 * sleep-able context
588 			 */
589 			udelay(le32_to_cpu(cmd->delay.delay));
590 			break;
591 
592 		case INIT_OP_CALLBACK:
593 			rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
594 			if (phase == PHASE_ENGINE &&
595 			    cmd->callback.callback_id == DMAE_READY_CB)
596 				b_dmae = true;
597 			break;
598 		}
599 
600 		if (rc)
601 			break;
602 	}
603 
604 	kfree(p_hwfn->unzip_buf);
605 	p_hwfn->unzip_buf = NULL;
606 	return rc;
607 }
608 
609 void qed_gtt_init(struct qed_hwfn *p_hwfn)
610 {
611 	u32 gtt_base;
612 	u32 i;
613 
614 	/* Set the global windows */
615 	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
616 
617 	for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
618 		if (pxp_global_win[i])
619 			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
620 			       pxp_global_win[i]);
621 }
622 
623 int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
624 {
625 	struct qed_fw_data *fw = cdev->fw_data;
626 	struct bin_buffer_hdr *buf_hdr;
627 	u32 offset, len;
628 
629 	if (!data) {
630 		DP_NOTICE(cdev, "Invalid fw data\n");
631 		return -EINVAL;
632 	}
633 
634 	/* First Dword contains metadata and should be skipped */
635 	buf_hdr = (struct bin_buffer_hdr *)data;
636 
637 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
638 	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
639 
640 	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
641 	fw->init_ops = (union init_op *)(data + offset);
642 
643 	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
644 	fw->arr_data = (u32 *)(data + offset);
645 
646 	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
647 	fw->modes_tree_buf = (u8 *)(data + offset);
648 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
649 	fw->init_ops_size = len / sizeof(struct init_raw_op);
650 
651 	offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
652 	fw->fw_overlays = (u32 *)(data + offset);
653 	len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
654 	fw->fw_overlays_len = len;
655 
656 	return 0;
657 }
658