1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/io.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include "qed.h"
15 #include "qed_hsi.h"
16 #include "qed_hw.h"
17 #include "qed_init_ops.h"
18 #include "qed_reg_addr.h"
19 #include "qed_sriov.h"
20 
21 #define QED_INIT_MAX_POLL_COUNT 100
22 #define QED_INIT_POLL_PERIOD_US 500
23 
24 static u32 pxp_global_win[] = {
25 	0,
26 	0,
27 	0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
28 	0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
29 	0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
30 	0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
31 	0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
32 	0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
33 	0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
34 	0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
35 	0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
36 	0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
37 	0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
38 	0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
39 	0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
40 	0,
41 	0,
42 	0,
43 	0,
44 };
45 
46 /* IRO Array */
47 static const u32 iro_arr[] = {
48 	0x00000000, 0x00000000, 0x00080000,
49 	0x00003288, 0x00000088, 0x00880000,
50 	0x000058e8, 0x00000020, 0x00200000,
51 	0x00000b00, 0x00000008, 0x00040000,
52 	0x00000a80, 0x00000008, 0x00040000,
53 	0x00000000, 0x00000008, 0x00020000,
54 	0x00000080, 0x00000008, 0x00040000,
55 	0x00000084, 0x00000008, 0x00020000,
56 	0x00005718, 0x00000004, 0x00040000,
57 	0x00004dd0, 0x00000000, 0x00780000,
58 	0x00003e40, 0x00000000, 0x00780000,
59 	0x00004480, 0x00000000, 0x00780000,
60 	0x00003210, 0x00000000, 0x00780000,
61 	0x00003b50, 0x00000000, 0x00780000,
62 	0x00007f58, 0x00000000, 0x00780000,
63 	0x00005f58, 0x00000000, 0x00080000,
64 	0x00007100, 0x00000000, 0x00080000,
65 	0x0000aea0, 0x00000000, 0x00080000,
66 	0x00004398, 0x00000000, 0x00080000,
67 	0x0000a5a0, 0x00000000, 0x00080000,
68 	0x0000bde8, 0x00000000, 0x00080000,
69 	0x00000020, 0x00000004, 0x00040000,
70 	0x000056c8, 0x00000010, 0x00100000,
71 	0x0000c210, 0x00000030, 0x00300000,
72 	0x0000b088, 0x00000038, 0x00380000,
73 	0x00003d20, 0x00000080, 0x00400000,
74 	0x0000bf60, 0x00000000, 0x00040000,
75 	0x00004560, 0x00040080, 0x00040000,
76 	0x000001f8, 0x00000004, 0x00040000,
77 	0x00003d60, 0x00000080, 0x00200000,
78 	0x00008960, 0x00000040, 0x00300000,
79 	0x0000e840, 0x00000060, 0x00600000,
80 	0x00004618, 0x00000080, 0x00380000,
81 	0x00010738, 0x000000c0, 0x00c00000,
82 	0x000001f8, 0x00000002, 0x00020000,
83 	0x0000a2a0, 0x00000000, 0x01080000,
84 	0x0000a3a8, 0x00000008, 0x00080000,
85 	0x000001c0, 0x00000008, 0x00080000,
86 	0x000001f8, 0x00000008, 0x00080000,
87 	0x00000ac0, 0x00000008, 0x00080000,
88 	0x00002578, 0x00000008, 0x00080000,
89 	0x000024f8, 0x00000008, 0x00080000,
90 	0x00000280, 0x00000008, 0x00080000,
91 	0x00000680, 0x00080018, 0x00080000,
92 	0x00000b78, 0x00080018, 0x00020000,
93 	0x0000c640, 0x00000050, 0x003c0000,
94 	0x00012038, 0x00000018, 0x00100000,
95 	0x00011b00, 0x00000040, 0x00180000,
96 	0x000095d0, 0x00000050, 0x00200000,
97 	0x00008b10, 0x00000040, 0x00280000,
98 	0x00011640, 0x00000018, 0x00100000,
99 	0x0000c828, 0x00000048, 0x00380000,
100 	0x00011710, 0x00000020, 0x00200000,
101 	0x00004650, 0x00000080, 0x00100000,
102 	0x00003618, 0x00000010, 0x00100000,
103 	0x0000a968, 0x00000008, 0x00010000,
104 	0x000097a0, 0x00000008, 0x00010000,
105 	0x00011990, 0x00000008, 0x00010000,
106 	0x0000f018, 0x00000008, 0x00010000,
107 	0x00012628, 0x00000008, 0x00010000,
108 	0x00011da8, 0x00000008, 0x00010000,
109 	0x0000aa78, 0x00000030, 0x00100000,
110 	0x0000d768, 0x00000028, 0x00280000,
111 	0x00009a58, 0x00000018, 0x00180000,
112 	0x00009bd8, 0x00000008, 0x00080000,
113 	0x00013a18, 0x00000008, 0x00080000,
114 	0x000126e8, 0x00000018, 0x00180000,
115 	0x0000e608, 0x00500288, 0x00100000,
116 	0x00012970, 0x00000138, 0x00280000,
117 };
118 
119 void qed_init_iro_array(struct qed_dev *cdev)
120 {
121 	cdev->iro_arr = iro_arr;
122 }
123 
124 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
125 {
126 	p_hwfn->rt_data.init_val[rt_offset] = val;
127 	p_hwfn->rt_data.b_valid[rt_offset] = true;
128 }
129 
130 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
131 			   u32 rt_offset, u32 *p_val, size_t size)
132 {
133 	size_t i;
134 
135 	for (i = 0; i < size / sizeof(u32); i++) {
136 		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
137 		p_hwfn->rt_data.b_valid[rt_offset + i]	= true;
138 	}
139 }
140 
141 static int qed_init_rt(struct qed_hwfn	*p_hwfn,
142 		       struct qed_ptt *p_ptt,
143 		       u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
144 {
145 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
146 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
147 	u16 i, j, segment;
148 	int rc = 0;
149 
150 	/* Since not all RT entries are initialized, go over the RT and
151 	 * for each segment of initialized values use DMA.
152 	 */
153 	for (i = 0; i < size; i++) {
154 		if (!p_valid[i])
155 			continue;
156 
157 		/* In case there isn't any wide-bus configuration here,
158 		 * simply write the data instead of using dmae.
159 		 */
160 		if (!b_must_dmae) {
161 			qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
162 			p_valid[i] = false;
163 			continue;
164 		}
165 
166 		/* Start of a new segment */
167 		for (segment = 1; i + segment < size; segment++)
168 			if (!p_valid[i + segment])
169 				break;
170 
171 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
172 				       (uintptr_t)(p_init_val + i),
173 				       addr + (i << 2), segment, NULL);
174 		if (rc)
175 			return rc;
176 
177 		/* invalidate after writing */
178 		for (j = i; j < i + segment; j++)
179 			p_valid[j] = false;
180 
181 		/* Jump over the entire segment, including invalid entry */
182 		i += segment;
183 	}
184 
185 	return rc;
186 }
187 
188 int qed_init_alloc(struct qed_hwfn *p_hwfn)
189 {
190 	struct qed_rt_data *rt_data = &p_hwfn->rt_data;
191 
192 	if (IS_VF(p_hwfn->cdev))
193 		return 0;
194 
195 	rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool),
196 				   GFP_KERNEL);
197 	if (!rt_data->b_valid)
198 		return -ENOMEM;
199 
200 	rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32),
201 				    GFP_KERNEL);
202 	if (!rt_data->init_val) {
203 		kfree(rt_data->b_valid);
204 		rt_data->b_valid = NULL;
205 		return -ENOMEM;
206 	}
207 
208 	return 0;
209 }
210 
211 void qed_init_free(struct qed_hwfn *p_hwfn)
212 {
213 	kfree(p_hwfn->rt_data.init_val);
214 	p_hwfn->rt_data.init_val = NULL;
215 	kfree(p_hwfn->rt_data.b_valid);
216 	p_hwfn->rt_data.b_valid = NULL;
217 }
218 
219 static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
220 			       struct qed_ptt *p_ptt,
221 			       u32 addr,
222 			       u32 dmae_data_offset,
223 			       u32 size,
224 			       const u32 *buf,
225 			       bool b_must_dmae,
226 			       bool b_can_dmae)
227 {
228 	int rc = 0;
229 
230 	/* Perform DMAE only for lengthy enough sections or for wide-bus */
231 	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
232 		const u32 *data = buf + dmae_data_offset;
233 		u32 i;
234 
235 		for (i = 0; i < size; i++)
236 			qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
237 	} else {
238 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
239 				       (uintptr_t)(buf + dmae_data_offset),
240 				       addr, size, NULL);
241 	}
242 
243 	return rc;
244 }
245 
246 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
247 			      struct qed_ptt *p_ptt,
248 			      u32 addr, u32 fill, u32 fill_count)
249 {
250 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
251 	struct qed_dmae_params params = {};
252 
253 	memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
254 
255 	/* invoke the DMAE virtual/physical buffer API with
256 	 * 1. DMAE init channel
257 	 * 2. addr,
258 	 * 3. p_hwfb->temp_data,
259 	 * 4. fill_count
260 	 */
261 	SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
262 	return qed_dmae_host2grc(p_hwfn, p_ptt,
263 				 (uintptr_t)(&zero_buffer[0]),
264 				 addr, fill_count, &params);
265 }
266 
267 static void qed_init_fill(struct qed_hwfn *p_hwfn,
268 			  struct qed_ptt *p_ptt,
269 			  u32 addr, u32 fill, u32 fill_count)
270 {
271 	u32 i;
272 
273 	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
274 		qed_wr(p_hwfn, p_ptt, addr, fill);
275 }
276 
277 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
278 			      struct qed_ptt *p_ptt,
279 			      struct init_write_op *cmd,
280 			      bool b_must_dmae, bool b_can_dmae)
281 {
282 	u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
283 	u32 data = le32_to_cpu(cmd->data);
284 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
285 
286 	u32 offset, output_len, input_len, max_size;
287 	struct qed_dev *cdev = p_hwfn->cdev;
288 	union init_array_hdr *hdr;
289 	const u32 *array_data;
290 	int rc = 0;
291 	u32 size;
292 
293 	array_data = cdev->fw_data->arr_data;
294 
295 	hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
296 	data = le32_to_cpu(hdr->raw.data);
297 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
298 	case INIT_ARR_ZIPPED:
299 		offset = dmae_array_offset + 1;
300 		input_len = GET_FIELD(data,
301 				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
302 		max_size = MAX_ZIPPED_SIZE * 4;
303 		memset(p_hwfn->unzip_buf, 0, max_size);
304 
305 		output_len = qed_unzip_data(p_hwfn, input_len,
306 					    (u8 *)&array_data[offset],
307 					    max_size, (u8 *)p_hwfn->unzip_buf);
308 		if (output_len) {
309 			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
310 						 output_len,
311 						 p_hwfn->unzip_buf,
312 						 b_must_dmae, b_can_dmae);
313 		} else {
314 			DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
315 			rc = -EINVAL;
316 		}
317 		break;
318 	case INIT_ARR_PATTERN:
319 	{
320 		u32 repeats = GET_FIELD(data,
321 					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
322 		u32 i;
323 
324 		size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
325 
326 		for (i = 0; i < repeats; i++, addr += size << 2) {
327 			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
328 						 dmae_array_offset + 1,
329 						 size, array_data,
330 						 b_must_dmae, b_can_dmae);
331 			if (rc)
332 				break;
333 		}
334 		break;
335 	}
336 	case INIT_ARR_STANDARD:
337 		size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
338 		rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
339 					 dmae_array_offset + 1,
340 					 size, array_data,
341 					 b_must_dmae, b_can_dmae);
342 		break;
343 	}
344 
345 	return rc;
346 }
347 
348 /* init_ops write command */
349 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
350 			   struct qed_ptt *p_ptt,
351 			   struct init_write_op *p_cmd, bool b_can_dmae)
352 {
353 	u32 data = le32_to_cpu(p_cmd->data);
354 	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
355 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
356 	union init_write_args *arg = &p_cmd->args;
357 	int rc = 0;
358 
359 	/* Sanitize */
360 	if (b_must_dmae && !b_can_dmae) {
361 		DP_NOTICE(p_hwfn,
362 			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
363 			  addr);
364 		return -EINVAL;
365 	}
366 
367 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
368 	case INIT_SRC_INLINE:
369 		data = le32_to_cpu(p_cmd->args.inline_val);
370 		qed_wr(p_hwfn, p_ptt, addr, data);
371 		break;
372 	case INIT_SRC_ZEROS:
373 		data = le32_to_cpu(p_cmd->args.zeros_count);
374 		if (b_must_dmae || (b_can_dmae && (data >= 64)))
375 			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
376 		else
377 			qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
378 		break;
379 	case INIT_SRC_ARRAY:
380 		rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
381 					b_must_dmae, b_can_dmae);
382 		break;
383 	case INIT_SRC_RUNTIME:
384 		qed_init_rt(p_hwfn, p_ptt, addr,
385 			    le16_to_cpu(arg->runtime.offset),
386 			    le16_to_cpu(arg->runtime.size),
387 			    b_must_dmae);
388 		break;
389 	}
390 
391 	return rc;
392 }
393 
394 static inline bool comp_eq(u32 val, u32 expected_val)
395 {
396 	return val == expected_val;
397 }
398 
399 static inline bool comp_and(u32 val, u32 expected_val)
400 {
401 	return (val & expected_val) == expected_val;
402 }
403 
404 static inline bool comp_or(u32 val, u32 expected_val)
405 {
406 	return (val | expected_val) > 0;
407 }
408 
409 /* init_ops read/poll commands */
410 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
411 			    struct qed_ptt *p_ptt, struct init_read_op *cmd)
412 {
413 	bool (*comp_check)(u32 val, u32 expected_val);
414 	u32 delay = QED_INIT_POLL_PERIOD_US, val;
415 	u32 data, addr, poll;
416 	int i;
417 
418 	data = le32_to_cpu(cmd->op_data);
419 	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
420 	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
421 
422 
423 	val = qed_rd(p_hwfn, p_ptt, addr);
424 
425 	if (poll == INIT_POLL_NONE)
426 		return;
427 
428 	switch (poll) {
429 	case INIT_POLL_EQ:
430 		comp_check = comp_eq;
431 		break;
432 	case INIT_POLL_OR:
433 		comp_check = comp_or;
434 		break;
435 	case INIT_POLL_AND:
436 		comp_check = comp_and;
437 		break;
438 	default:
439 		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
440 		       cmd->op_data);
441 		return;
442 	}
443 
444 	data = le32_to_cpu(cmd->expected_val);
445 	for (i = 0;
446 	     i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
447 	     i++) {
448 		udelay(delay);
449 		val = qed_rd(p_hwfn, p_ptt, addr);
450 	}
451 
452 	if (i == QED_INIT_MAX_POLL_COUNT) {
453 		DP_ERR(p_hwfn,
454 		       "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
455 		       addr, le32_to_cpu(cmd->expected_val),
456 		       val, le32_to_cpu(cmd->op_data));
457 	}
458 }
459 
460 /* init_ops callbacks entry point */
461 static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
462 			   struct qed_ptt *p_ptt,
463 			   struct init_callback_op *p_cmd)
464 {
465 	int rc;
466 
467 	switch (p_cmd->callback_id) {
468 	case DMAE_READY_CB:
469 		rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
470 		break;
471 	default:
472 		DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
473 			  p_cmd->callback_id);
474 		return -EINVAL;
475 	}
476 
477 	return rc;
478 }
479 
480 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
481 				  u16 *p_offset, int modes)
482 {
483 	struct qed_dev *cdev = p_hwfn->cdev;
484 	const u8 *modes_tree_buf;
485 	u8 arg1, arg2, tree_val;
486 
487 	modes_tree_buf = cdev->fw_data->modes_tree_buf;
488 	tree_val = modes_tree_buf[(*p_offset)++];
489 	switch (tree_val) {
490 	case INIT_MODE_OP_NOT:
491 		return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
492 	case INIT_MODE_OP_OR:
493 		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
494 		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
495 		return arg1 | arg2;
496 	case INIT_MODE_OP_AND:
497 		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
498 		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
499 		return arg1 & arg2;
500 	default:
501 		tree_val -= MAX_INIT_MODE_OPS;
502 		return (modes & BIT(tree_val)) ? 1 : 0;
503 	}
504 }
505 
506 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
507 			     struct init_if_mode_op *p_cmd, int modes)
508 {
509 	u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
510 
511 	if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
512 		return 0;
513 	else
514 		return GET_FIELD(le32_to_cpu(p_cmd->op_data),
515 				 INIT_IF_MODE_OP_CMD_OFFSET);
516 }
517 
518 static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
519 			      struct init_if_phase_op *p_cmd,
520 			      u32 phase, u32 phase_id)
521 {
522 	u32 data = le32_to_cpu(p_cmd->phase_data);
523 	u32 op_data = le32_to_cpu(p_cmd->op_data);
524 
525 	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
526 	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
527 	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
528 		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
529 	else
530 		return 0;
531 }
532 
533 int qed_init_run(struct qed_hwfn *p_hwfn,
534 		 struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
535 {
536 	bool b_dmae = (phase != PHASE_ENGINE);
537 	struct qed_dev *cdev = p_hwfn->cdev;
538 	u32 cmd_num, num_init_ops;
539 	union init_op *init_ops;
540 	int rc = 0;
541 
542 	num_init_ops = cdev->fw_data->init_ops_size;
543 	init_ops = cdev->fw_data->init_ops;
544 
545 	p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
546 	if (!p_hwfn->unzip_buf)
547 		return -ENOMEM;
548 
549 	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
550 		union init_op *cmd = &init_ops[cmd_num];
551 		u32 data = le32_to_cpu(cmd->raw.op_data);
552 
553 		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
554 		case INIT_OP_WRITE:
555 			rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
556 					     b_dmae);
557 			break;
558 		case INIT_OP_READ:
559 			qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
560 			break;
561 		case INIT_OP_IF_MODE:
562 			cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
563 						     modes);
564 			break;
565 		case INIT_OP_IF_PHASE:
566 			cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
567 						      phase, phase_id);
568 			break;
569 		case INIT_OP_DELAY:
570 			/* qed_init_run is always invoked from
571 			 * sleep-able context
572 			 */
573 			udelay(le32_to_cpu(cmd->delay.delay));
574 			break;
575 
576 		case INIT_OP_CALLBACK:
577 			rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
578 			if (phase == PHASE_ENGINE &&
579 			    cmd->callback.callback_id == DMAE_READY_CB)
580 				b_dmae = true;
581 			break;
582 		}
583 
584 		if (rc)
585 			break;
586 	}
587 
588 	kfree(p_hwfn->unzip_buf);
589 	p_hwfn->unzip_buf = NULL;
590 	return rc;
591 }
592 
593 void qed_gtt_init(struct qed_hwfn *p_hwfn)
594 {
595 	u32 gtt_base;
596 	u32 i;
597 
598 	/* Set the global windows */
599 	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
600 
601 	for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
602 		if (pxp_global_win[i])
603 			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
604 			       pxp_global_win[i]);
605 }
606 
607 int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
608 {
609 	struct qed_fw_data *fw = cdev->fw_data;
610 	struct bin_buffer_hdr *buf_hdr;
611 	u32 offset, len;
612 
613 	if (!data) {
614 		DP_NOTICE(cdev, "Invalid fw data\n");
615 		return -EINVAL;
616 	}
617 
618 	/* First Dword contains metadata and should be skipped */
619 	buf_hdr = (struct bin_buffer_hdr *)data;
620 
621 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
622 	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
623 
624 	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
625 	fw->init_ops = (union init_op *)(data + offset);
626 
627 	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
628 	fw->arr_data = (u32 *)(data + offset);
629 
630 	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
631 	fw->modes_tree_buf = (u8 *)(data + offset);
632 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
633 	fw->init_ops_size = len / sizeof(struct init_raw_op);
634 
635 	offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
636 	fw->fw_overlays = (u32 *)(data + offset);
637 	len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
638 	fw->fw_overlays_len = len;
639 
640 	return 0;
641 }
642