1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
5  */
6 
7 /* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
8  * abstraction layer.
9  */
10 
11 #include "../aq_nic.h"
12 #include "../aq_hw_utils.h"
13 #include "hw_atl_utils.h"
14 #include "hw_atl_llh.h"
15 #include "hw_atl_llh_internal.h"
16 
17 #include <linux/random.h>
18 
19 #define HW_ATL_UCP_0X370_REG    0x0370U
20 
21 #define HW_ATL_MIF_CMD          0x0200U
22 #define HW_ATL_MIF_ADDR         0x0208U
23 #define HW_ATL_MIF_VAL          0x020CU
24 
25 #define HW_ATL_RPC_CONTROL_ADR  0x0338U
26 #define HW_ATL_RPC_STATE_ADR    0x033CU
27 
28 #define HW_ATL_MPI_FW_VERSION	0x18
29 #define HW_ATL_MPI_CONTROL_ADR  0x0368U
30 #define HW_ATL_MPI_STATE_ADR    0x036CU
31 
32 #define HW_ATL_MPI_STATE_MSK      0x00FFU
33 #define HW_ATL_MPI_STATE_SHIFT    0U
34 #define HW_ATL_MPI_SPEED_MSK      0x00FF0000U
35 #define HW_ATL_MPI_SPEED_SHIFT    16U
36 #define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
37 
38 #define HW_ATL_MPI_DAISY_CHAIN_STATUS	0x704
39 #define HW_ATL_MPI_BOOT_EXIT_CODE	0x388
40 
41 #define HW_ATL_MAC_PHY_CONTROL	0x4000
42 #define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
43 
44 #define HW_ATL_FW_VER_1X 0x01050006U
45 #define HW_ATL_FW_VER_2X 0x02000000U
46 #define HW_ATL_FW_VER_3X 0x03000000U
47 
48 #define FORCE_FLASHLESS 0
49 
50 enum mcp_area {
51 	MCP_AREA_CONFIG = 0x80000000,
52 	MCP_AREA_SETTINGS = 0x20000000,
53 };
54 
55 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
56 
57 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
58 				      enum hal_atl_utils_fw_state_e state);
59 
60 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
61 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
62 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
63 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
64 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
65 
66 int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
67 {
68 	int err = 0;
69 
70 	err = hw_atl_utils_soft_reset(self);
71 	if (err)
72 		return err;
73 
74 	hw_atl_utils_hw_chip_features_init(self,
75 					   &self->chip_features);
76 
77 	hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
78 
79 	if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
80 				   self->fw_ver_actual) == 0) {
81 		*fw_ops = &aq_fw_1x_ops;
82 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
83 					  self->fw_ver_actual) == 0) {
84 		*fw_ops = &aq_fw_2x_ops;
85 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
86 					  self->fw_ver_actual) == 0) {
87 		*fw_ops = &aq_fw_2x_ops;
88 	} else {
89 		aq_pr_err("Bad FW version detected: %x\n",
90 			  self->fw_ver_actual);
91 		return -EOPNOTSUPP;
92 	}
93 	self->aq_fw_ops = *fw_ops;
94 	err = self->aq_fw_ops->init(self);
95 
96 	return err;
97 }
98 
99 static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
100 {
101 	u32 gsr, val;
102 	int k = 0;
103 
104 	aq_hw_write_reg(self, 0x404, 0x40e1);
105 	AQ_HW_SLEEP(50);
106 
107 	/* Cleanup SPI */
108 	val = aq_hw_read_reg(self, 0x53C);
109 	aq_hw_write_reg(self, 0x53C, val | 0x10);
110 
111 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
112 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
113 
114 	/* Kickstart MAC */
115 	aq_hw_write_reg(self, 0x404, 0x80e0);
116 	aq_hw_write_reg(self, 0x32a8, 0x0);
117 	aq_hw_write_reg(self, 0x520, 0x1);
118 
119 	/* Reset SPI again because of possible interrupted SPI burst */
120 	val = aq_hw_read_reg(self, 0x53C);
121 	aq_hw_write_reg(self, 0x53C, val | 0x10);
122 	AQ_HW_SLEEP(10);
123 	/* Clear SPI reset state */
124 	aq_hw_write_reg(self, 0x53C, val & ~0x10);
125 
126 	aq_hw_write_reg(self, 0x404, 0x180e0);
127 
128 	for (k = 0; k < 1000; k++) {
129 		u32 flb_status = aq_hw_read_reg(self,
130 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
131 
132 		flb_status = flb_status & 0x10;
133 		if (flb_status)
134 			break;
135 		AQ_HW_SLEEP(10);
136 	}
137 	if (k == 1000) {
138 		aq_pr_err("MAC kickstart failed\n");
139 		return -EIO;
140 	}
141 
142 	/* FW reset */
143 	aq_hw_write_reg(self, 0x404, 0x80e0);
144 	AQ_HW_SLEEP(50);
145 	aq_hw_write_reg(self, 0x3a0, 0x1);
146 
147 	/* Kickstart PHY - skipped */
148 
149 	/* Global software reset*/
150 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
151 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
152 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
153 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
154 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
155 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
156 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
157 
158 	for (k = 0; k < 1000; k++) {
159 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
160 
161 		if (fw_state)
162 			break;
163 		AQ_HW_SLEEP(10);
164 	}
165 	if (k == 1000) {
166 		aq_pr_err("FW kickstart failed\n");
167 		return -EIO;
168 	}
169 	/* Old FW requires fixed delay after init */
170 	AQ_HW_SLEEP(15);
171 
172 	return 0;
173 }
174 
175 static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
176 {
177 	u32 gsr, val, rbl_status;
178 	int k;
179 
180 	aq_hw_write_reg(self, 0x404, 0x40e1);
181 	aq_hw_write_reg(self, 0x3a0, 0x1);
182 	aq_hw_write_reg(self, 0x32a8, 0x0);
183 
184 	/* Alter RBL status */
185 	aq_hw_write_reg(self, 0x388, 0xDEAD);
186 
187 	/* Cleanup SPI */
188 	val = aq_hw_read_reg(self, 0x53C);
189 	aq_hw_write_reg(self, 0x53C, val | 0x10);
190 
191 	/* Global software reset*/
192 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
193 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
194 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
195 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
196 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
197 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
198 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
199 			(gsr & 0xFFFFBFFF) | 0x8000);
200 
201 	if (FORCE_FLASHLESS)
202 		aq_hw_write_reg(self, 0x534, 0x0);
203 
204 	aq_hw_write_reg(self, 0x404, 0x40e0);
205 
206 	/* Wait for RBL boot */
207 	for (k = 0; k < 1000; k++) {
208 		rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
209 		if (rbl_status && rbl_status != 0xDEAD)
210 			break;
211 		AQ_HW_SLEEP(10);
212 	}
213 	if (!rbl_status || rbl_status == 0xDEAD) {
214 		aq_pr_err("RBL Restart failed");
215 		return -EIO;
216 	}
217 
218 	/* Restore NVR */
219 	if (FORCE_FLASHLESS)
220 		aq_hw_write_reg(self, 0x534, 0xA0);
221 
222 	if (rbl_status == 0xF1A7) {
223 		aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
224 		return -ENOTSUPP;
225 	}
226 
227 	for (k = 0; k < 1000; k++) {
228 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
229 
230 		if (fw_state)
231 			break;
232 		AQ_HW_SLEEP(10);
233 	}
234 	if (k == 1000) {
235 		aq_pr_err("FW kickstart failed\n");
236 		return -EIO;
237 	}
238 	/* Old FW requires fixed delay after init */
239 	AQ_HW_SLEEP(15);
240 
241 	return 0;
242 }
243 
244 int hw_atl_utils_soft_reset(struct aq_hw_s *self)
245 {
246 	u32 boot_exit_code = 0;
247 	u32 val;
248 	int k;
249 
250 	for (k = 0; k < 1000; ++k) {
251 		u32 flb_status = aq_hw_read_reg(self,
252 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
253 		boot_exit_code = aq_hw_read_reg(self,
254 						HW_ATL_MPI_BOOT_EXIT_CODE);
255 		if (flb_status != 0x06000000 || boot_exit_code != 0)
256 			break;
257 	}
258 
259 	if (k == 1000) {
260 		aq_pr_err("Neither RBL nor FLB firmware started\n");
261 		return -EOPNOTSUPP;
262 	}
263 
264 	self->rbl_enabled = (boot_exit_code != 0);
265 
266 	/* FW 1.x may bootup in an invalid POWER state (WOL feature).
267 	 * We should work around this by forcing its state back to DEINIT
268 	 */
269 	if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
270 				    aq_hw_read_reg(self,
271 						   HW_ATL_MPI_FW_VERSION))) {
272 		int err = 0;
273 
274 		hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
275 		err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state,
276 						self, val,
277 						(val & HW_ATL_MPI_STATE_MSK) ==
278 						 MPI_DEINIT,
279 						10, 10000U);
280 		if (err)
281 			return err;
282 	}
283 
284 	if (self->rbl_enabled)
285 		return hw_atl_utils_soft_reset_rbl(self);
286 	else
287 		return hw_atl_utils_soft_reset_flb(self);
288 }
289 
290 int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
291 				  u32 *p, u32 cnt)
292 {
293 	int err = 0;
294 	u32 val;
295 
296 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get,
297 					self, val, val == 1U,
298 					1U, 10000U);
299 
300 	if (err < 0) {
301 		bool is_locked;
302 
303 		hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
304 		is_locked = hw_atl_sem_ram_get(self);
305 		if (!is_locked) {
306 			err = -ETIME;
307 			goto err_exit;
308 		}
309 	}
310 
311 	aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
312 
313 	for (++cnt; --cnt && !err;) {
314 		aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
315 
316 		if (IS_CHIP_FEATURE(REVISION_B1))
317 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
318 							self, val, val != a,
319 							1U, 1000U);
320 		else
321 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
322 							self, val,
323 							!(val & 0x100),
324 							1U, 1000U);
325 
326 		*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
327 		a += 4;
328 	}
329 
330 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
331 
332 err_exit:
333 	return err;
334 }
335 
336 static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
337 				      u32 *p, u32 cnt, enum mcp_area area)
338 {
339 	u32 data_offset = 0;
340 	u32 offset = addr;
341 	int err = 0;
342 	u32 val;
343 
344 	switch (area) {
345 	case MCP_AREA_CONFIG:
346 		offset -= self->rpc_addr;
347 		break;
348 
349 	case MCP_AREA_SETTINGS:
350 		offset -= self->settings_addr;
351 		break;
352 	}
353 
354 	offset = offset / sizeof(u32);
355 
356 	for (; data_offset < cnt; ++data_offset, ++offset) {
357 		aq_hw_write_reg(self, 0x328, p[data_offset]);
358 		aq_hw_write_reg(self, 0x32C,
359 				(area | (0xFFFF & (offset * 4))));
360 		hw_atl_mcp_up_force_intr_set(self, 1);
361 		/* 1000 times by 10us = 10ms */
362 		err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
363 						self, val,
364 						(val & 0xF0000000) !=
365 						area,
366 						10U, 10000U);
367 
368 		if (err < 0)
369 			break;
370 	}
371 
372 	return err;
373 }
374 
375 static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
376 				      u32 *p, u32 cnt)
377 {
378 	u32 offset = 0;
379 	int err = 0;
380 	u32 val;
381 
382 	aq_hw_write_reg(self, 0x208, addr);
383 
384 	for (; offset < cnt; ++offset) {
385 		aq_hw_write_reg(self, 0x20C, p[offset]);
386 		aq_hw_write_reg(self, 0x200, 0xC000);
387 
388 		err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
389 						self, val,
390 						(val & 0x100) == 0U,
391 						10U, 10000U);
392 
393 		if (err < 0)
394 			break;
395 	}
396 
397 	return err;
398 }
399 
400 static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
401 					 u32 cnt, enum mcp_area area)
402 {
403 	int err = 0;
404 	u32 val;
405 
406 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
407 					val, val == 1U,
408 					10U, 100000U);
409 	if (err < 0)
410 		goto err_exit;
411 
412 	if (IS_CHIP_FEATURE(REVISION_B1))
413 		err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
414 	else
415 		err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
416 
417 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
418 
419 	if (err < 0)
420 		goto err_exit;
421 
422 	err = aq_hw_err_from_flags(self);
423 
424 err_exit:
425 	return err;
426 }
427 
428 int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
429 {
430 	return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
431 					     cnt, MCP_AREA_CONFIG);
432 }
433 
434 int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
435 				   u32 cnt)
436 {
437 	return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
438 					     p, cnt, MCP_AREA_SETTINGS);
439 }
440 
441 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
442 {
443 	const u32 dw_major_mask = 0xff000000U;
444 	const u32 dw_minor_mask = 0x00ffffffU;
445 	int err = 0;
446 
447 	err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
448 	if (err < 0)
449 		goto err_exit;
450 	err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
451 		-EOPNOTSUPP : 0;
452 
453 err_exit:
454 	return err;
455 }
456 
457 static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
458 				 const struct aq_hw_caps_s *aq_hw_caps)
459 {
460 	int err = 0;
461 
462 	if (!aq_hw_read_reg(self, 0x370U)) {
463 		unsigned int rnd = 0U;
464 		unsigned int ucp_0x370 = 0U;
465 
466 		get_random_bytes(&rnd, sizeof(unsigned int));
467 
468 		ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
469 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
470 	}
471 
472 	hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
473 
474 	/* check 10 times by 1ms */
475 	err = readx_poll_timeout_atomic(hw_atl_scrpad25_get,
476 					self, self->mbox_addr,
477 					self->mbox_addr != 0U,
478 					1000U, 10000U);
479 
480 	return err;
481 }
482 
483 struct aq_hw_atl_utils_fw_rpc_tid_s {
484 	union {
485 		u32 val;
486 		struct {
487 			u16 tid;
488 			u16 len;
489 		};
490 	};
491 };
492 
493 #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
494 
495 int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
496 {
497 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
498 	int err = 0;
499 
500 	if (!IS_CHIP_FEATURE(MIPS)) {
501 		err = -1;
502 		goto err_exit;
503 	}
504 	err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
505 					(rpc_size + sizeof(u32) -
506 					 sizeof(u8)) / sizeof(u32));
507 	if (err < 0)
508 		goto err_exit;
509 
510 	sw.tid = 0xFFFFU & (++self->rpc_tid);
511 	sw.len = (u16)rpc_size;
512 	aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
513 
514 err_exit:
515 	return err;
516 }
517 
518 int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
519 			     struct hw_atl_utils_fw_rpc **rpc)
520 {
521 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
522 	struct aq_hw_atl_utils_fw_rpc_tid_s fw;
523 	int err = 0;
524 
525 	do {
526 		sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
527 
528 		self->rpc_tid = sw.tid;
529 
530 		err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get,
531 						self, fw.val,
532 						sw.tid == fw.tid,
533 						1000U, 100000U);
534 
535 		if (fw.len == 0xFFFFU) {
536 			err = hw_atl_utils_fw_rpc_call(self, sw.len);
537 			if (err < 0)
538 				goto err_exit;
539 		}
540 	} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
541 
542 	if (rpc) {
543 		if (fw.len) {
544 			err =
545 			hw_atl_utils_fw_downld_dwords(self,
546 						      self->rpc_addr,
547 						      (u32 *)(void *)
548 						      &self->rpc,
549 						      (fw.len + sizeof(u32) -
550 						       sizeof(u8)) /
551 						      sizeof(u32));
552 			if (err < 0)
553 				goto err_exit;
554 		}
555 
556 		*rpc = &self->rpc;
557 	}
558 
559 err_exit:
560 	return err;
561 }
562 
563 static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
564 {
565 	int err = 0;
566 
567 	err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
568 	if (err < 0)
569 		goto err_exit;
570 
571 	err = hw_atl_utils_fw_rpc_init(self);
572 	if (err < 0)
573 		goto err_exit;
574 
575 err_exit:
576 	return err;
577 }
578 
579 int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
580 			       struct hw_atl_utils_mbox_header *pmbox)
581 {
582 	return hw_atl_utils_fw_downld_dwords(self,
583 					     self->mbox_addr,
584 					     (u32 *)(void *)pmbox,
585 					     sizeof(*pmbox) / sizeof(u32));
586 }
587 
588 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
589 				 struct hw_atl_utils_mbox *pmbox)
590 {
591 	int err = 0;
592 
593 	err = hw_atl_utils_fw_downld_dwords(self,
594 					    self->mbox_addr,
595 					    (u32 *)(void *)pmbox,
596 					    sizeof(*pmbox) / sizeof(u32));
597 	if (err < 0)
598 		goto err_exit;
599 
600 	if (IS_CHIP_FEATURE(REVISION_A0)) {
601 		unsigned int mtu = self->aq_nic_cfg ?
602 					self->aq_nic_cfg->mtu : 1514U;
603 		pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
604 		pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
605 		pmbox->stats.dpc = atomic_read(&self->dpc);
606 	} else {
607 		pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
608 	}
609 
610 err_exit:;
611 }
612 
613 static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
614 {
615 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
616 
617 	val = val & ~HW_ATL_MPI_SPEED_MSK;
618 	val |= speed << HW_ATL_MPI_SPEED_SHIFT;
619 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
620 
621 	return 0;
622 }
623 
624 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
625 				      enum hal_atl_utils_fw_state_e state)
626 {
627 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
628 	struct hw_atl_utils_mbox_header mbox;
629 	u32 transaction_id = 0;
630 	int err = 0;
631 
632 	if (state == MPI_RESET) {
633 		hw_atl_utils_mpi_read_mbox(self, &mbox);
634 
635 		transaction_id = mbox.transaction_id;
636 
637 		err = readx_poll_timeout_atomic(hw_atl_utils_get_mpi_mbox_tid,
638 						self, mbox.transaction_id,
639 						transaction_id !=
640 						mbox.transaction_id,
641 						1000U, 100000U);
642 		if (err < 0)
643 			goto err_exit;
644 	}
645 	/* On interface DEINIT we disable DW (raise bit)
646 	 * Otherwise enable DW (clear bit)
647 	 */
648 	if (state == MPI_DEINIT || state == MPI_POWER)
649 		val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
650 	else
651 		val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
652 
653 	/* Set new state bits */
654 	val = val & ~HW_ATL_MPI_STATE_MSK;
655 	val |= state & HW_ATL_MPI_STATE_MSK;
656 
657 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
658 
659 err_exit:
660 	return err;
661 }
662 
663 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
664 {
665 	struct aq_hw_link_status_s *link_status = &self->aq_link_status;
666 	u32 mpi_state;
667 	u32 speed;
668 
669 	mpi_state = hw_atl_utils_mpi_get_state(self);
670 	speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
671 
672 	if (!speed) {
673 		link_status->mbps = 0U;
674 	} else {
675 		switch (speed) {
676 		case HAL_ATLANTIC_RATE_10G:
677 			link_status->mbps = 10000U;
678 			break;
679 
680 		case HAL_ATLANTIC_RATE_5G:
681 		case HAL_ATLANTIC_RATE_5GSR:
682 			link_status->mbps = 5000U;
683 			break;
684 
685 		case HAL_ATLANTIC_RATE_2GS:
686 			link_status->mbps = 2500U;
687 			break;
688 
689 		case HAL_ATLANTIC_RATE_1G:
690 			link_status->mbps = 1000U;
691 			break;
692 
693 		case HAL_ATLANTIC_RATE_100M:
694 			link_status->mbps = 100U;
695 			break;
696 
697 		default:
698 			return -EBUSY;
699 		}
700 	}
701 
702 	return 0;
703 }
704 
705 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
706 				   u8 *mac)
707 {
708 	u32 mac_addr[2];
709 	u32 efuse_addr;
710 	int err = 0;
711 	u32 h = 0U;
712 	u32 l = 0U;
713 
714 	if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
715 		unsigned int ucp_0x370 = 0;
716 		unsigned int rnd = 0;
717 
718 		get_random_bytes(&rnd, sizeof(unsigned int));
719 
720 		ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
721 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
722 	}
723 
724 	efuse_addr = aq_hw_read_reg(self, 0x00000374U);
725 
726 	err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
727 					    mac_addr, ARRAY_SIZE(mac_addr));
728 	if (err < 0) {
729 		mac_addr[0] = 0U;
730 		mac_addr[1] = 0U;
731 		err = 0;
732 	} else {
733 		mac_addr[0] = __swab32(mac_addr[0]);
734 		mac_addr[1] = __swab32(mac_addr[1]);
735 	}
736 
737 	ether_addr_copy(mac, (u8 *)mac_addr);
738 
739 	if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
740 		/* chip revision */
741 		l = 0xE3000000U |
742 		    (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) |
743 		    (0x00 << 16);
744 		h = 0x8001300EU;
745 
746 		mac[5] = (u8)(0xFFU & l);
747 		l >>= 8;
748 		mac[4] = (u8)(0xFFU & l);
749 		l >>= 8;
750 		mac[3] = (u8)(0xFFU & l);
751 		l >>= 8;
752 		mac[2] = (u8)(0xFFU & l);
753 		mac[1] = (u8)(0xFFU & h);
754 		h >>= 8;
755 		mac[0] = (u8)(0xFFU & h);
756 	}
757 
758 	return err;
759 }
760 
761 unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
762 {
763 	unsigned int ret = 0U;
764 
765 	switch (mbps) {
766 	case 100U:
767 		ret = 5U;
768 		break;
769 
770 	case 1000U:
771 		ret = 4U;
772 		break;
773 
774 	case 2500U:
775 		ret = 3U;
776 		break;
777 
778 	case 5000U:
779 		ret = 1U;
780 		break;
781 
782 	case 10000U:
783 		ret = 0U;
784 		break;
785 
786 	default:
787 		break;
788 	}
789 
790 	return ret;
791 }
792 
793 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
794 {
795 	u32 val = hw_atl_reg_glb_mif_id_get(self);
796 	u32 mif_rev = val & 0xFFU;
797 	u32 chip_features = 0U;
798 
799 	if ((0xFU & mif_rev) == 1U) {
800 		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
801 			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
802 			HAL_ATLANTIC_UTILS_CHIP_MIPS;
803 	} else if ((0xFU & mif_rev) == 2U) {
804 		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
805 			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
806 			HAL_ATLANTIC_UTILS_CHIP_MIPS |
807 			HAL_ATLANTIC_UTILS_CHIP_TPO2 |
808 			HAL_ATLANTIC_UTILS_CHIP_RPF2;
809 	} else if ((0xFU & mif_rev) == 0xAU) {
810 		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
811 			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
812 			HAL_ATLANTIC_UTILS_CHIP_MIPS |
813 			HAL_ATLANTIC_UTILS_CHIP_TPO2 |
814 			HAL_ATLANTIC_UTILS_CHIP_RPF2;
815 	}
816 
817 	*p = chip_features;
818 }
819 
820 static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
821 {
822 	hw_atl_utils_mpi_set_speed(self, 0);
823 	hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
824 
825 	return 0;
826 }
827 
828 int hw_atl_utils_update_stats(struct aq_hw_s *self)
829 {
830 	struct aq_stats_s *cs = &self->curr_stats;
831 	struct hw_atl_utils_mbox mbox;
832 
833 	hw_atl_utils_mpi_read_stats(self, &mbox);
834 
835 #define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
836 			mbox.stats._N_ - self->last_stats._N_)
837 
838 	if (self->aq_link_status.mbps) {
839 		AQ_SDELTA(uprc);
840 		AQ_SDELTA(mprc);
841 		AQ_SDELTA(bprc);
842 		AQ_SDELTA(erpt);
843 
844 		AQ_SDELTA(uptc);
845 		AQ_SDELTA(mptc);
846 		AQ_SDELTA(bptc);
847 		AQ_SDELTA(erpr);
848 
849 		AQ_SDELTA(ubrc);
850 		AQ_SDELTA(ubtc);
851 		AQ_SDELTA(mbrc);
852 		AQ_SDELTA(mbtc);
853 		AQ_SDELTA(bbrc);
854 		AQ_SDELTA(bbtc);
855 		AQ_SDELTA(dpc);
856 	}
857 #undef AQ_SDELTA
858 
859 	cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
860 	cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
861 	cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
862 	cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
863 
864 	memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
865 
866 	return 0;
867 }
868 
869 struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
870 {
871 	return &self->curr_stats;
872 }
873 
874 static const u32 hw_atl_utils_hw_mac_regs[] = {
875 	0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
876 	0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
877 	0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
878 	0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
879 	0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
880 	0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
881 	0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
882 	0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
883 	0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
884 	0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
885 	0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
886 	0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
887 	0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
888 	0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
889 	0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
890 	0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
891 	0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
892 	0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
893 	0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
894 	0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
895 	0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
896 	0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
897 };
898 
899 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
900 			     const struct aq_hw_caps_s *aq_hw_caps,
901 			     u32 *regs_buff)
902 {
903 	unsigned int i = 0U;
904 
905 	for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
906 		regs_buff[i] = aq_hw_read_reg(self,
907 					      hw_atl_utils_hw_mac_regs[i]);
908 
909 	return 0;
910 }
911 
912 int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
913 {
914 	*fw_version = aq_hw_read_reg(self, 0x18U);
915 
916 	return 0;
917 }
918 
919 static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
920 				  u8 *mac)
921 {
922 	struct hw_atl_utils_fw_rpc *prpc = NULL;
923 	unsigned int rpc_size = 0U;
924 	int err = 0;
925 
926 	err = hw_atl_utils_fw_rpc_wait(self, &prpc);
927 	if (err < 0)
928 		goto err_exit;
929 
930 	memset(prpc, 0, sizeof(*prpc));
931 
932 	if (wol_enabled) {
933 		rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
934 			   sizeof(prpc->msg_wol_add);
935 
936 
937 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
938 		prpc->msg_wol_add.priority =
939 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
940 		prpc->msg_wol_add.pattern_id =
941 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
942 		prpc->msg_wol_add.packet_type =
943 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
944 
945 		ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
946 				mac);
947 	} else {
948 		rpc_size = sizeof(prpc->msg_wol_remove) +
949 			   offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
950 
951 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
952 		prpc->msg_wol_add.pattern_id =
953 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
954 	}
955 
956 	err = hw_atl_utils_fw_rpc_call(self, rpc_size);
957 
958 err_exit:
959 	return err;
960 }
961 
962 static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
963 			     u8 *mac)
964 {
965 	struct hw_atl_utils_fw_rpc *prpc = NULL;
966 	unsigned int rpc_size = 0U;
967 	int err = 0;
968 
969 	if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
970 		err = aq_fw1x_set_wake_magic(self, 1, mac);
971 
972 		if (err < 0)
973 			goto err_exit;
974 
975 		rpc_size = sizeof(prpc->msg_id) +
976 			   sizeof(prpc->msg_enable_wakeup);
977 
978 		err = hw_atl_utils_fw_rpc_wait(self, &prpc);
979 
980 		if (err < 0)
981 			goto err_exit;
982 
983 		memset(prpc, 0, rpc_size);
984 
985 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP;
986 		prpc->msg_enable_wakeup.pattern_mask = 0x00000002;
987 
988 		err = hw_atl_utils_fw_rpc_call(self, rpc_size);
989 		if (err < 0)
990 			goto err_exit;
991 	}
992 	hw_atl_utils_mpi_set_speed(self, 0);
993 	hw_atl_utils_mpi_set_state(self, MPI_POWER);
994 
995 err_exit:
996 	return err;
997 }
998 
999 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self)
1000 {
1001 	struct hw_atl_utils_mbox_header mbox;
1002 
1003 	hw_atl_utils_mpi_read_mbox(self, &mbox);
1004 
1005 	return mbox.transaction_id;
1006 }
1007 
1008 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self)
1009 {
1010 	return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
1011 }
1012 
1013 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self)
1014 {
1015 	return aq_hw_read_reg(self, HW_ATL_MIF_CMD);
1016 }
1017 
1018 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self)
1019 {
1020 	return aq_hw_read_reg(self, HW_ATL_MIF_ADDR);
1021 }
1022 
1023 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
1024 {
1025 	return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
1026 }
1027 
1028 const struct aq_fw_ops aq_fw_1x_ops = {
1029 	.init = hw_atl_utils_mpi_create,
1030 	.deinit = hw_atl_fw1x_deinit,
1031 	.reset = NULL,
1032 	.get_mac_permanent = hw_atl_utils_get_mac_permanent,
1033 	.set_link_speed = hw_atl_utils_mpi_set_speed,
1034 	.set_state = hw_atl_utils_mpi_set_state,
1035 	.update_link_status = hw_atl_utils_mpi_get_link_status,
1036 	.update_stats = hw_atl_utils_update_stats,
1037 	.get_phy_temp = NULL,
1038 	.set_power = aq_fw1x_set_power,
1039 	.set_eee_rate = NULL,
1040 	.get_eee_rate = NULL,
1041 	.set_flow_control = NULL,
1042 	.send_fw_request = NULL,
1043 	.enable_ptp = NULL,
1044 	.led_control = NULL,
1045 };
1046