1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
5  */
6 
7 /* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
8  * abstraction layer.
9  */
10 
11 #include "../aq_nic.h"
12 #include "../aq_hw_utils.h"
13 #include "hw_atl_utils.h"
14 #include "hw_atl_llh.h"
15 #include "hw_atl_llh_internal.h"
16 
17 #include <linux/random.h>
18 
19 #define HW_ATL_UCP_0X370_REG    0x0370U
20 
21 #define HW_ATL_MIF_CMD          0x0200U
22 #define HW_ATL_MIF_ADDR         0x0208U
23 #define HW_ATL_MIF_VAL          0x020CU
24 
25 #define HW_ATL_RPC_CONTROL_ADR  0x0338U
26 #define HW_ATL_RPC_STATE_ADR    0x033CU
27 
28 #define HW_ATL_MPI_FW_VERSION	0x18
29 #define HW_ATL_MPI_CONTROL_ADR  0x0368U
30 #define HW_ATL_MPI_STATE_ADR    0x036CU
31 
32 #define HW_ATL_MPI_STATE_MSK      0x00FFU
33 #define HW_ATL_MPI_STATE_SHIFT    0U
34 #define HW_ATL_MPI_SPEED_MSK      0x00FF0000U
35 #define HW_ATL_MPI_SPEED_SHIFT    16U
36 #define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
37 
38 #define HW_ATL_MPI_DAISY_CHAIN_STATUS	0x704
39 #define HW_ATL_MPI_BOOT_EXIT_CODE	0x388
40 
41 #define HW_ATL_MAC_PHY_CONTROL	0x4000
42 #define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
43 
44 #define HW_ATL_FW_VER_1X 0x01050006U
45 #define HW_ATL_FW_VER_2X 0x02000000U
46 #define HW_ATL_FW_VER_3X 0x03000000U
47 
48 #define FORCE_FLASHLESS 0
49 
50 enum mcp_area {
51 	MCP_AREA_CONFIG = 0x80000000,
52 	MCP_AREA_SETTINGS = 0x20000000,
53 };
54 
55 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
56 
57 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
58 				      enum hal_atl_utils_fw_state_e state);
59 
60 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
61 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
62 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
63 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
64 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
65 
66 int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
67 {
68 	int err = 0;
69 
70 	err = hw_atl_utils_soft_reset(self);
71 	if (err)
72 		return err;
73 
74 	hw_atl_utils_hw_chip_features_init(self,
75 					   &self->chip_features);
76 
77 	hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
78 
79 	if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
80 				   self->fw_ver_actual) == 0) {
81 		*fw_ops = &aq_fw_1x_ops;
82 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
83 					  self->fw_ver_actual) == 0) {
84 		*fw_ops = &aq_fw_2x_ops;
85 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
86 					  self->fw_ver_actual) == 0) {
87 		*fw_ops = &aq_fw_2x_ops;
88 	} else {
89 		aq_pr_err("Bad FW version detected: %x\n",
90 			  self->fw_ver_actual);
91 		return -EOPNOTSUPP;
92 	}
93 	self->aq_fw_ops = *fw_ops;
94 	err = self->aq_fw_ops->init(self);
95 	return err;
96 }
97 
98 static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
99 {
100 	u32 gsr, val;
101 	int k = 0;
102 
103 	aq_hw_write_reg(self, 0x404, 0x40e1);
104 	AQ_HW_SLEEP(50);
105 
106 	/* Cleanup SPI */
107 	val = aq_hw_read_reg(self, 0x53C);
108 	aq_hw_write_reg(self, 0x53C, val | 0x10);
109 
110 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
111 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
112 
113 	/* Kickstart MAC */
114 	aq_hw_write_reg(self, 0x404, 0x80e0);
115 	aq_hw_write_reg(self, 0x32a8, 0x0);
116 	aq_hw_write_reg(self, 0x520, 0x1);
117 
118 	/* Reset SPI again because of possible interrupted SPI burst */
119 	val = aq_hw_read_reg(self, 0x53C);
120 	aq_hw_write_reg(self, 0x53C, val | 0x10);
121 	AQ_HW_SLEEP(10);
122 	/* Clear SPI reset state */
123 	aq_hw_write_reg(self, 0x53C, val & ~0x10);
124 
125 	aq_hw_write_reg(self, 0x404, 0x180e0);
126 
127 	for (k = 0; k < 1000; k++) {
128 		u32 flb_status = aq_hw_read_reg(self,
129 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
130 
131 		flb_status = flb_status & 0x10;
132 		if (flb_status)
133 			break;
134 		AQ_HW_SLEEP(10);
135 	}
136 	if (k == 1000) {
137 		aq_pr_err("MAC kickstart failed\n");
138 		return -EIO;
139 	}
140 
141 	/* FW reset */
142 	aq_hw_write_reg(self, 0x404, 0x80e0);
143 	AQ_HW_SLEEP(50);
144 	aq_hw_write_reg(self, 0x3a0, 0x1);
145 
146 	/* Kickstart PHY - skipped */
147 
148 	/* Global software reset*/
149 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
150 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
151 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
152 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
153 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
154 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
155 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
156 
157 	for (k = 0; k < 1000; k++) {
158 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
159 
160 		if (fw_state)
161 			break;
162 		AQ_HW_SLEEP(10);
163 	}
164 	if (k == 1000) {
165 		aq_pr_err("FW kickstart failed\n");
166 		return -EIO;
167 	}
168 	/* Old FW requires fixed delay after init */
169 	AQ_HW_SLEEP(15);
170 
171 	return 0;
172 }
173 
174 static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
175 {
176 	u32 gsr, val, rbl_status;
177 	int k;
178 
179 	aq_hw_write_reg(self, 0x404, 0x40e1);
180 	aq_hw_write_reg(self, 0x3a0, 0x1);
181 	aq_hw_write_reg(self, 0x32a8, 0x0);
182 
183 	/* Alter RBL status */
184 	aq_hw_write_reg(self, 0x388, 0xDEAD);
185 
186 	/* Cleanup SPI */
187 	val = aq_hw_read_reg(self, 0x53C);
188 	aq_hw_write_reg(self, 0x53C, val | 0x10);
189 
190 	/* Global software reset*/
191 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
192 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
193 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
194 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
195 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
196 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
197 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
198 			(gsr & 0xFFFFBFFF) | 0x8000);
199 
200 	if (FORCE_FLASHLESS)
201 		aq_hw_write_reg(self, 0x534, 0x0);
202 
203 	aq_hw_write_reg(self, 0x404, 0x40e0);
204 
205 	/* Wait for RBL boot */
206 	for (k = 0; k < 1000; k++) {
207 		rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
208 		if (rbl_status && rbl_status != 0xDEAD)
209 			break;
210 		AQ_HW_SLEEP(10);
211 	}
212 	if (!rbl_status || rbl_status == 0xDEAD) {
213 		aq_pr_err("RBL Restart failed");
214 		return -EIO;
215 	}
216 
217 	/* Restore NVR */
218 	if (FORCE_FLASHLESS)
219 		aq_hw_write_reg(self, 0x534, 0xA0);
220 
221 	if (rbl_status == 0xF1A7) {
222 		aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
223 		return -ENOTSUPP;
224 	}
225 
226 	for (k = 0; k < 1000; k++) {
227 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
228 
229 		if (fw_state)
230 			break;
231 		AQ_HW_SLEEP(10);
232 	}
233 	if (k == 1000) {
234 		aq_pr_err("FW kickstart failed\n");
235 		return -EIO;
236 	}
237 	/* Old FW requires fixed delay after init */
238 	AQ_HW_SLEEP(15);
239 
240 	return 0;
241 }
242 
243 int hw_atl_utils_soft_reset(struct aq_hw_s *self)
244 {
245 	int k;
246 	u32 boot_exit_code = 0;
247 	u32 val;
248 
249 	for (k = 0; k < 1000; ++k) {
250 		u32 flb_status = aq_hw_read_reg(self,
251 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
252 		boot_exit_code = aq_hw_read_reg(self,
253 						HW_ATL_MPI_BOOT_EXIT_CODE);
254 		if (flb_status != 0x06000000 || boot_exit_code != 0)
255 			break;
256 	}
257 
258 	if (k == 1000) {
259 		aq_pr_err("Neither RBL nor FLB firmware started\n");
260 		return -EOPNOTSUPP;
261 	}
262 
263 	self->rbl_enabled = (boot_exit_code != 0);
264 
265 	/* FW 1.x may bootup in an invalid POWER state (WOL feature).
266 	 * We should work around this by forcing its state back to DEINIT
267 	 */
268 	if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
269 				    aq_hw_read_reg(self,
270 						   HW_ATL_MPI_FW_VERSION))) {
271 		int err = 0;
272 
273 		hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
274 		err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state,
275 						self, val,
276 						(val & HW_ATL_MPI_STATE_MSK) ==
277 						 MPI_DEINIT,
278 						10, 10000U);
279 		if (err)
280 			return err;
281 	}
282 
283 	if (self->rbl_enabled)
284 		return hw_atl_utils_soft_reset_rbl(self);
285 	else
286 		return hw_atl_utils_soft_reset_flb(self);
287 }
288 
289 int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
290 				  u32 *p, u32 cnt)
291 {
292 	int err = 0;
293 	u32 val;
294 
295 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get,
296 					self, val, val == 1U,
297 					1U, 10000U);
298 
299 	if (err < 0) {
300 		bool is_locked;
301 
302 		hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
303 		is_locked = hw_atl_sem_ram_get(self);
304 		if (!is_locked) {
305 			err = -ETIME;
306 			goto err_exit;
307 		}
308 	}
309 
310 	aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
311 
312 	for (++cnt; --cnt && !err;) {
313 		aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
314 
315 		if (IS_CHIP_FEATURE(REVISION_B1))
316 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
317 							self, val, val != a,
318 							1U, 1000U);
319 		else
320 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
321 							self, val,
322 							!(val & 0x100),
323 							1U, 1000U);
324 
325 		*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
326 		a += 4;
327 	}
328 
329 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
330 
331 err_exit:
332 	return err;
333 }
334 
335 static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
336 				      u32 *p, u32 cnt, enum mcp_area area)
337 {
338 	u32 data_offset = 0;
339 	u32 offset = addr;
340 	int err = 0;
341 	u32 val;
342 
343 	switch (area) {
344 	case MCP_AREA_CONFIG:
345 		offset -= self->rpc_addr;
346 		break;
347 
348 	case MCP_AREA_SETTINGS:
349 		offset -= self->settings_addr;
350 		break;
351 	}
352 
353 	offset = offset / sizeof(u32);
354 
355 	for (; data_offset < cnt; ++data_offset, ++offset) {
356 		aq_hw_write_reg(self, 0x328, p[data_offset]);
357 		aq_hw_write_reg(self, 0x32C,
358 				(area | (0xFFFF & (offset * 4))));
359 		hw_atl_mcp_up_force_intr_set(self, 1);
360 		/* 1000 times by 10us = 10ms */
361 		err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
362 						self, val,
363 						(val & 0xF0000000) !=
364 						area,
365 						10U, 10000U);
366 
367 		if (err < 0)
368 			break;
369 	}
370 
371 	return err;
372 }
373 
374 static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
375 				      u32 *p, u32 cnt)
376 {
377 	u32 offset = 0;
378 	int err = 0;
379 	u32 val;
380 
381 	aq_hw_write_reg(self, 0x208, addr);
382 
383 	for (; offset < cnt; ++offset) {
384 		aq_hw_write_reg(self, 0x20C, p[offset]);
385 		aq_hw_write_reg(self, 0x200, 0xC000);
386 
387 		err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
388 						self, val,
389 						(val & 0x100) == 0U,
390 						10U, 10000U);
391 
392 		if (err < 0)
393 			break;
394 	}
395 
396 	return err;
397 }
398 
399 static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
400 					 u32 cnt, enum mcp_area area)
401 {
402 	int err = 0;
403 	u32 val;
404 
405 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
406 					val, val == 1U,
407 					10U, 100000U);
408 	if (err < 0)
409 		goto err_exit;
410 
411 	if (IS_CHIP_FEATURE(REVISION_B1))
412 		err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
413 	else
414 		err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
415 
416 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
417 
418 	if (err < 0)
419 		goto err_exit;
420 
421 	err = aq_hw_err_from_flags(self);
422 
423 err_exit:
424 	return err;
425 }
426 
427 int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
428 {
429 	return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
430 					     cnt, MCP_AREA_CONFIG);
431 }
432 
433 int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
434 				   u32 cnt)
435 {
436 	return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
437 					     p, cnt, MCP_AREA_SETTINGS);
438 }
439 
440 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
441 {
442 	int err = 0;
443 	const u32 dw_major_mask = 0xff000000U;
444 	const u32 dw_minor_mask = 0x00ffffffU;
445 
446 	err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
447 	if (err < 0)
448 		goto err_exit;
449 	err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
450 		-EOPNOTSUPP : 0;
451 err_exit:
452 	return err;
453 }
454 
455 static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
456 				 const struct aq_hw_caps_s *aq_hw_caps)
457 {
458 	int err = 0;
459 
460 	if (!aq_hw_read_reg(self, 0x370U)) {
461 		unsigned int rnd = 0U;
462 		unsigned int ucp_0x370 = 0U;
463 
464 		get_random_bytes(&rnd, sizeof(unsigned int));
465 
466 		ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
467 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
468 	}
469 
470 	hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
471 
472 	/* check 10 times by 1ms */
473 	err = readx_poll_timeout_atomic(hw_atl_scrpad25_get,
474 					self, self->mbox_addr,
475 					self->mbox_addr != 0U,
476 					1000U, 10000U);
477 
478 	return err;
479 }
480 
481 struct aq_hw_atl_utils_fw_rpc_tid_s {
482 	union {
483 		u32 val;
484 		struct {
485 			u16 tid;
486 			u16 len;
487 		};
488 	};
489 };
490 
491 #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
492 
493 int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
494 {
495 	int err = 0;
496 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
497 
498 	if (!IS_CHIP_FEATURE(MIPS)) {
499 		err = -1;
500 		goto err_exit;
501 	}
502 	err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
503 					(rpc_size + sizeof(u32) -
504 					 sizeof(u8)) / sizeof(u32));
505 	if (err < 0)
506 		goto err_exit;
507 
508 	sw.tid = 0xFFFFU & (++self->rpc_tid);
509 	sw.len = (u16)rpc_size;
510 	aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
511 
512 err_exit:
513 	return err;
514 }
515 
516 int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
517 			     struct hw_atl_utils_fw_rpc **rpc)
518 {
519 	int err = 0;
520 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
521 	struct aq_hw_atl_utils_fw_rpc_tid_s fw;
522 
523 	do {
524 		sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
525 
526 		self->rpc_tid = sw.tid;
527 
528 		err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get,
529 						self, fw.val,
530 						sw.tid == fw.tid,
531 						1000U, 100000U);
532 
533 		if (fw.len == 0xFFFFU) {
534 			err = hw_atl_utils_fw_rpc_call(self, sw.len);
535 			if (err < 0)
536 				goto err_exit;
537 		}
538 	} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
539 
540 	if (rpc) {
541 		if (fw.len) {
542 			err =
543 			hw_atl_utils_fw_downld_dwords(self,
544 						      self->rpc_addr,
545 						      (u32 *)(void *)
546 						      &self->rpc,
547 						      (fw.len + sizeof(u32) -
548 						       sizeof(u8)) /
549 						      sizeof(u32));
550 			if (err < 0)
551 				goto err_exit;
552 		}
553 
554 		*rpc = &self->rpc;
555 	}
556 
557 err_exit:
558 	return err;
559 }
560 
561 static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
562 {
563 	int err = 0;
564 
565 	err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
566 	if (err < 0)
567 		goto err_exit;
568 
569 	err = hw_atl_utils_fw_rpc_init(self);
570 	if (err < 0)
571 		goto err_exit;
572 
573 err_exit:
574 	return err;
575 }
576 
577 int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
578 			       struct hw_atl_utils_mbox_header *pmbox)
579 {
580 	return hw_atl_utils_fw_downld_dwords(self,
581 					     self->mbox_addr,
582 					     (u32 *)(void *)pmbox,
583 					     sizeof(*pmbox) / sizeof(u32));
584 }
585 
586 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
587 				 struct hw_atl_utils_mbox *pmbox)
588 {
589 	int err = 0;
590 
591 	err = hw_atl_utils_fw_downld_dwords(self,
592 					    self->mbox_addr,
593 					    (u32 *)(void *)pmbox,
594 					    sizeof(*pmbox) / sizeof(u32));
595 	if (err < 0)
596 		goto err_exit;
597 
598 	if (IS_CHIP_FEATURE(REVISION_A0)) {
599 		unsigned int mtu = self->aq_nic_cfg ?
600 					self->aq_nic_cfg->mtu : 1514U;
601 		pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
602 		pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
603 		pmbox->stats.dpc = atomic_read(&self->dpc);
604 	} else {
605 		pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
606 	}
607 
608 err_exit:;
609 }
610 
611 static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
612 {
613 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
614 
615 	val = val & ~HW_ATL_MPI_SPEED_MSK;
616 	val |= speed << HW_ATL_MPI_SPEED_SHIFT;
617 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
618 
619 	return 0;
620 }
621 
622 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
623 				      enum hal_atl_utils_fw_state_e state)
624 {
625 	int err = 0;
626 	u32 transaction_id = 0;
627 	struct hw_atl_utils_mbox_header mbox;
628 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
629 
630 	if (state == MPI_RESET) {
631 		hw_atl_utils_mpi_read_mbox(self, &mbox);
632 
633 		transaction_id = mbox.transaction_id;
634 
635 		err = readx_poll_timeout_atomic(hw_atl_utils_get_mpi_mbox_tid,
636 						self, mbox.transaction_id,
637 						transaction_id !=
638 						mbox.transaction_id,
639 						1000U, 100000U);
640 		if (err < 0)
641 			goto err_exit;
642 	}
643 	/* On interface DEINIT we disable DW (raise bit)
644 	 * Otherwise enable DW (clear bit)
645 	 */
646 	if (state == MPI_DEINIT || state == MPI_POWER)
647 		val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
648 	else
649 		val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
650 
651 	/* Set new state bits */
652 	val = val & ~HW_ATL_MPI_STATE_MSK;
653 	val |= state & HW_ATL_MPI_STATE_MSK;
654 
655 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
656 err_exit:
657 	return err;
658 }
659 
660 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
661 {
662 	u32 cp0x036C = hw_atl_utils_mpi_get_state(self);
663 	u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
664 	struct aq_hw_link_status_s *link_status = &self->aq_link_status;
665 
666 	if (!link_speed_mask) {
667 		link_status->mbps = 0U;
668 	} else {
669 		switch (link_speed_mask) {
670 		case HAL_ATLANTIC_RATE_10G:
671 			link_status->mbps = 10000U;
672 			break;
673 
674 		case HAL_ATLANTIC_RATE_5G:
675 		case HAL_ATLANTIC_RATE_5GSR:
676 			link_status->mbps = 5000U;
677 			break;
678 
679 		case HAL_ATLANTIC_RATE_2GS:
680 			link_status->mbps = 2500U;
681 			break;
682 
683 		case HAL_ATLANTIC_RATE_1G:
684 			link_status->mbps = 1000U;
685 			break;
686 
687 		case HAL_ATLANTIC_RATE_100M:
688 			link_status->mbps = 100U;
689 			break;
690 
691 		default:
692 			return -EBUSY;
693 		}
694 	}
695 
696 	return 0;
697 }
698 
699 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
700 				   u8 *mac)
701 {
702 	int err = 0;
703 	u32 h = 0U;
704 	u32 l = 0U;
705 	u32 mac_addr[2];
706 
707 	if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
708 		unsigned int rnd = 0;
709 		unsigned int ucp_0x370 = 0;
710 
711 		get_random_bytes(&rnd, sizeof(unsigned int));
712 
713 		ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
714 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
715 	}
716 
717 	err = hw_atl_utils_fw_downld_dwords(self,
718 					    aq_hw_read_reg(self, 0x00000374U) +
719 					    (40U * 4U),
720 					    mac_addr,
721 					    ARRAY_SIZE(mac_addr));
722 	if (err < 0) {
723 		mac_addr[0] = 0U;
724 		mac_addr[1] = 0U;
725 		err = 0;
726 	} else {
727 		mac_addr[0] = __swab32(mac_addr[0]);
728 		mac_addr[1] = __swab32(mac_addr[1]);
729 	}
730 
731 	ether_addr_copy(mac, (u8 *)mac_addr);
732 
733 	if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
734 		/* chip revision */
735 		l = 0xE3000000U |
736 		    (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) |
737 		    (0x00 << 16);
738 		h = 0x8001300EU;
739 
740 		mac[5] = (u8)(0xFFU & l);
741 		l >>= 8;
742 		mac[4] = (u8)(0xFFU & l);
743 		l >>= 8;
744 		mac[3] = (u8)(0xFFU & l);
745 		l >>= 8;
746 		mac[2] = (u8)(0xFFU & l);
747 		mac[1] = (u8)(0xFFU & h);
748 		h >>= 8;
749 		mac[0] = (u8)(0xFFU & h);
750 	}
751 
752 	return err;
753 }
754 
755 unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
756 {
757 	unsigned int ret = 0U;
758 
759 	switch (mbps) {
760 	case 100U:
761 		ret = 5U;
762 		break;
763 
764 	case 1000U:
765 		ret = 4U;
766 		break;
767 
768 	case 2500U:
769 		ret = 3U;
770 		break;
771 
772 	case 5000U:
773 		ret = 1U;
774 		break;
775 
776 	case 10000U:
777 		ret = 0U;
778 		break;
779 
780 	default:
781 		break;
782 	}
783 	return ret;
784 }
785 
786 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
787 {
788 	u32 chip_features = 0U;
789 	u32 val = hw_atl_reg_glb_mif_id_get(self);
790 	u32 mif_rev = val & 0xFFU;
791 
792 	if ((0xFU & mif_rev) == 1U) {
793 		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
794 			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
795 			HAL_ATLANTIC_UTILS_CHIP_MIPS;
796 	} else if ((0xFU & mif_rev) == 2U) {
797 		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
798 			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
799 			HAL_ATLANTIC_UTILS_CHIP_MIPS |
800 			HAL_ATLANTIC_UTILS_CHIP_TPO2 |
801 			HAL_ATLANTIC_UTILS_CHIP_RPF2;
802 	} else if ((0xFU & mif_rev) == 0xAU) {
803 		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
804 			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
805 			HAL_ATLANTIC_UTILS_CHIP_MIPS |
806 			HAL_ATLANTIC_UTILS_CHIP_TPO2 |
807 			HAL_ATLANTIC_UTILS_CHIP_RPF2;
808 	}
809 
810 	*p = chip_features;
811 }
812 
813 static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
814 {
815 	hw_atl_utils_mpi_set_speed(self, 0);
816 	hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
817 	return 0;
818 }
819 
820 int hw_atl_utils_update_stats(struct aq_hw_s *self)
821 {
822 	struct hw_atl_utils_mbox mbox;
823 	struct aq_stats_s *cs = &self->curr_stats;
824 
825 	hw_atl_utils_mpi_read_stats(self, &mbox);
826 
827 #define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
828 			mbox.stats._N_ - self->last_stats._N_)
829 
830 	if (self->aq_link_status.mbps) {
831 		AQ_SDELTA(uprc);
832 		AQ_SDELTA(mprc);
833 		AQ_SDELTA(bprc);
834 		AQ_SDELTA(erpt);
835 
836 		AQ_SDELTA(uptc);
837 		AQ_SDELTA(mptc);
838 		AQ_SDELTA(bptc);
839 		AQ_SDELTA(erpr);
840 
841 		AQ_SDELTA(ubrc);
842 		AQ_SDELTA(ubtc);
843 		AQ_SDELTA(mbrc);
844 		AQ_SDELTA(mbtc);
845 		AQ_SDELTA(bbrc);
846 		AQ_SDELTA(bbtc);
847 		AQ_SDELTA(dpc);
848 	}
849 #undef AQ_SDELTA
850 
851 	cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
852 	cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
853 	cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
854 	cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
855 
856 	memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
857 
858 	return 0;
859 }
860 
861 struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
862 {
863 	return &self->curr_stats;
864 }
865 
866 static const u32 hw_atl_utils_hw_mac_regs[] = {
867 	0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
868 	0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
869 	0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
870 	0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
871 	0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
872 	0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
873 	0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
874 	0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
875 	0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
876 	0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
877 	0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
878 	0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
879 	0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
880 	0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
881 	0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
882 	0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
883 	0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
884 	0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
885 	0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
886 	0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
887 	0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
888 	0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
889 };
890 
891 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
892 			     const struct aq_hw_caps_s *aq_hw_caps,
893 			     u32 *regs_buff)
894 {
895 	unsigned int i = 0U;
896 
897 	for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
898 		regs_buff[i] = aq_hw_read_reg(self,
899 					      hw_atl_utils_hw_mac_regs[i]);
900 	return 0;
901 }
902 
903 int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
904 {
905 	*fw_version = aq_hw_read_reg(self, 0x18U);
906 	return 0;
907 }
908 
909 static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
910 				  u8 *mac)
911 {
912 	struct hw_atl_utils_fw_rpc *prpc = NULL;
913 	unsigned int rpc_size = 0U;
914 	int err = 0;
915 
916 	err = hw_atl_utils_fw_rpc_wait(self, &prpc);
917 	if (err < 0)
918 		goto err_exit;
919 
920 	memset(prpc, 0, sizeof(*prpc));
921 
922 	if (wol_enabled) {
923 		rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
924 			   sizeof(prpc->msg_wol_add);
925 
926 
927 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
928 		prpc->msg_wol_add.priority =
929 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
930 		prpc->msg_wol_add.pattern_id =
931 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
932 		prpc->msg_wol_add.packet_type =
933 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
934 
935 		ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
936 				mac);
937 	} else {
938 		rpc_size = sizeof(prpc->msg_wol_remove) +
939 			   offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
940 
941 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
942 		prpc->msg_wol_add.pattern_id =
943 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
944 	}
945 
946 	err = hw_atl_utils_fw_rpc_call(self, rpc_size);
947 
948 err_exit:
949 	return err;
950 }
951 
952 static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
953 			     u8 *mac)
954 {
955 	struct hw_atl_utils_fw_rpc *prpc = NULL;
956 	unsigned int rpc_size = 0U;
957 	int err = 0;
958 
959 	if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
960 		err = aq_fw1x_set_wake_magic(self, 1, mac);
961 
962 		if (err < 0)
963 			goto err_exit;
964 
965 		rpc_size = sizeof(prpc->msg_id) +
966 			   sizeof(prpc->msg_enable_wakeup);
967 
968 		err = hw_atl_utils_fw_rpc_wait(self, &prpc);
969 
970 		if (err < 0)
971 			goto err_exit;
972 
973 		memset(prpc, 0, rpc_size);
974 
975 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP;
976 		prpc->msg_enable_wakeup.pattern_mask = 0x00000002;
977 
978 		err = hw_atl_utils_fw_rpc_call(self, rpc_size);
979 		if (err < 0)
980 			goto err_exit;
981 	}
982 	hw_atl_utils_mpi_set_speed(self, 0);
983 	hw_atl_utils_mpi_set_state(self, MPI_POWER);
984 
985 err_exit:
986 	return err;
987 }
988 
989 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self)
990 {
991 	struct hw_atl_utils_mbox_header mbox;
992 
993 	hw_atl_utils_mpi_read_mbox(self, &mbox);
994 
995 	return mbox.transaction_id;
996 }
997 
998 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self)
999 {
1000 	return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
1001 }
1002 
1003 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self)
1004 {
1005 	return aq_hw_read_reg(self, HW_ATL_MIF_CMD);
1006 }
1007 
1008 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self)
1009 {
1010 	return aq_hw_read_reg(self, HW_ATL_MIF_ADDR);
1011 }
1012 
1013 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
1014 {
1015 	return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
1016 }
1017 
1018 const struct aq_fw_ops aq_fw_1x_ops = {
1019 	.init = hw_atl_utils_mpi_create,
1020 	.deinit = hw_atl_fw1x_deinit,
1021 	.reset = NULL,
1022 	.get_mac_permanent = hw_atl_utils_get_mac_permanent,
1023 	.set_link_speed = hw_atl_utils_mpi_set_speed,
1024 	.set_state = hw_atl_utils_mpi_set_state,
1025 	.update_link_status = hw_atl_utils_mpi_get_link_status,
1026 	.update_stats = hw_atl_utils_update_stats,
1027 	.get_phy_temp = NULL,
1028 	.set_power = aq_fw1x_set_power,
1029 	.set_eee_rate = NULL,
1030 	.get_eee_rate = NULL,
1031 	.set_flow_control = NULL,
1032 	.send_fw_request = NULL,
1033 	.enable_ptp = NULL,
1034 	.led_control = NULL,
1035 };
1036