1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
9  * abstraction layer.
10  */
11 
12 #include "../aq_nic.h"
13 #include "../aq_hw_utils.h"
14 #include "hw_atl_utils.h"
15 #include "hw_atl_llh.h"
16 #include "hw_atl_llh_internal.h"
17 
18 #include <linux/random.h>
19 
20 #define HW_ATL_UCP_0X370_REG    0x0370U
21 
22 #define HW_ATL_MIF_CMD          0x0200U
23 #define HW_ATL_MIF_ADDR         0x0208U
24 #define HW_ATL_MIF_VAL          0x020CU
25 
26 #define HW_ATL_MPI_RPC_ADDR     0x0334U
27 #define HW_ATL_RPC_CONTROL_ADR  0x0338U
28 #define HW_ATL_RPC_STATE_ADR    0x033CU
29 
30 #define HW_ATL_MPI_FW_VERSION	0x18
31 #define HW_ATL_MPI_CONTROL_ADR  0x0368U
32 #define HW_ATL_MPI_STATE_ADR    0x036CU
33 
34 #define HW_ATL_MPI_STATE_MSK      0x00FFU
35 #define HW_ATL_MPI_STATE_SHIFT    0U
36 #define HW_ATL_MPI_SPEED_MSK      0x00FF0000U
37 #define HW_ATL_MPI_SPEED_SHIFT    16U
38 #define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
39 
40 #define HW_ATL_MPI_DAISY_CHAIN_STATUS	0x704
41 #define HW_ATL_MPI_BOOT_EXIT_CODE	0x388
42 
43 #define HW_ATL_MAC_PHY_CONTROL	0x4000
44 #define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
45 
46 #define HW_ATL_FW_VER_1X 0x01050006U
47 #define HW_ATL_FW_VER_2X 0x02000000U
48 #define HW_ATL_FW_VER_3X 0x03000000U
49 #define HW_ATL_FW_VER_4X 0x04000000U
50 
51 #define FORCE_FLASHLESS 0
52 
53 enum mcp_area {
54 	MCP_AREA_CONFIG = 0x80000000,
55 	MCP_AREA_SETTINGS = 0x20000000,
56 };
57 
58 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
59 				      enum hal_atl_utils_fw_state_e state);
60 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
61 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
62 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
63 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
64 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
65 static u32 aq_fw1x_rpc_get(struct aq_hw_s *self);
66 
hw_atl_utils_initfw(struct aq_hw_s * self,const struct aq_fw_ops ** fw_ops)67 int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
68 {
69 	int err = 0;
70 
71 	hw_atl_utils_hw_chip_features_init(self,
72 					   &self->chip_features);
73 
74 	self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
75 
76 	if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, self->fw_ver_actual)) {
77 		*fw_ops = &aq_fw_1x_ops;
78 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, self->fw_ver_actual)) {
79 		*fw_ops = &aq_fw_2x_ops;
80 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual)) {
81 		*fw_ops = &aq_fw_2x_ops;
82 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, self->fw_ver_actual)) {
83 		*fw_ops = &aq_fw_2x_ops;
84 	} else {
85 		aq_pr_err("Bad FW version detected: %x\n",
86 			  self->fw_ver_actual);
87 		return -EOPNOTSUPP;
88 	}
89 	self->aq_fw_ops = *fw_ops;
90 	err = self->aq_fw_ops->init(self);
91 
92 	return err;
93 }
94 
hw_atl_utils_soft_reset_flb(struct aq_hw_s * self)95 static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
96 {
97 	u32 gsr, val;
98 	int k = 0;
99 
100 	aq_hw_write_reg(self, 0x404, 0x40e1);
101 	AQ_HW_SLEEP(50);
102 
103 	/* Cleanup SPI */
104 	val = aq_hw_read_reg(self, 0x53C);
105 	aq_hw_write_reg(self, 0x53C, val | 0x10);
106 
107 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
108 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
109 
110 	/* Kickstart MAC */
111 	aq_hw_write_reg(self, 0x404, 0x80e0);
112 	aq_hw_write_reg(self, 0x32a8, 0x0);
113 	aq_hw_write_reg(self, 0x520, 0x1);
114 
115 	/* Reset SPI again because of possible interrupted SPI burst */
116 	val = aq_hw_read_reg(self, 0x53C);
117 	aq_hw_write_reg(self, 0x53C, val | 0x10);
118 	AQ_HW_SLEEP(10);
119 	/* Clear SPI reset state */
120 	aq_hw_write_reg(self, 0x53C, val & ~0x10);
121 
122 	aq_hw_write_reg(self, 0x404, 0x180e0);
123 
124 	for (k = 0; k < 1000; k++) {
125 		u32 flb_status = aq_hw_read_reg(self,
126 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
127 
128 		flb_status = flb_status & 0x10;
129 		if (flb_status)
130 			break;
131 		AQ_HW_SLEEP(10);
132 	}
133 	if (k == 1000) {
134 		aq_pr_err("MAC kickstart failed\n");
135 		return -EIO;
136 	}
137 
138 	/* FW reset */
139 	aq_hw_write_reg(self, 0x404, 0x80e0);
140 	AQ_HW_SLEEP(50);
141 	aq_hw_write_reg(self, 0x3a0, 0x1);
142 
143 	/* Kickstart PHY - skipped */
144 
145 	/* Global software reset*/
146 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
147 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
148 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
149 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
150 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
151 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
152 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
153 
154 	for (k = 0; k < 1000; k++) {
155 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
156 
157 		if (fw_state)
158 			break;
159 		AQ_HW_SLEEP(10);
160 	}
161 	if (k == 1000) {
162 		aq_pr_err("FW kickstart failed\n");
163 		return -EIO;
164 	}
165 	/* Old FW requires fixed delay after init */
166 	AQ_HW_SLEEP(15);
167 
168 	return 0;
169 }
170 
hw_atl_utils_soft_reset_rbl(struct aq_hw_s * self)171 static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
172 {
173 	u32 gsr, val, rbl_status;
174 	int k;
175 
176 	aq_hw_write_reg(self, 0x404, 0x40e1);
177 	aq_hw_write_reg(self, 0x3a0, 0x1);
178 	aq_hw_write_reg(self, 0x32a8, 0x0);
179 
180 	/* Alter RBL status */
181 	aq_hw_write_reg(self, 0x388, 0xDEAD);
182 
183 	/* Cleanup SPI */
184 	val = aq_hw_read_reg(self, 0x53C);
185 	aq_hw_write_reg(self, 0x53C, val | 0x10);
186 
187 	/* Global software reset*/
188 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
189 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
190 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
191 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
192 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
193 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
194 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
195 			(gsr & 0xFFFFBFFF) | 0x8000);
196 
197 	if (FORCE_FLASHLESS)
198 		aq_hw_write_reg(self, 0x534, 0x0);
199 
200 	aq_hw_write_reg(self, 0x404, 0x40e0);
201 
202 	/* Wait for RBL boot */
203 	for (k = 0; k < 1000; k++) {
204 		rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
205 		if (rbl_status && rbl_status != 0xDEAD)
206 			break;
207 		AQ_HW_SLEEP(10);
208 	}
209 	if (!rbl_status || rbl_status == 0xDEAD) {
210 		aq_pr_err("RBL Restart failed");
211 		return -EIO;
212 	}
213 
214 	/* Restore NVR */
215 	if (FORCE_FLASHLESS)
216 		aq_hw_write_reg(self, 0x534, 0xA0);
217 
218 	if (rbl_status == 0xF1A7) {
219 		aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
220 		return -EOPNOTSUPP;
221 	}
222 
223 	for (k = 0; k < 1000; k++) {
224 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
225 
226 		if (fw_state)
227 			break;
228 		AQ_HW_SLEEP(10);
229 	}
230 	if (k == 1000) {
231 		aq_pr_err("FW kickstart failed\n");
232 		return -EIO;
233 	}
234 	/* Old FW requires fixed delay after init */
235 	AQ_HW_SLEEP(15);
236 
237 	return 0;
238 }
239 
hw_atl_utils_soft_reset(struct aq_hw_s * self)240 int hw_atl_utils_soft_reset(struct aq_hw_s *self)
241 {
242 	int ver = hw_atl_utils_get_fw_version(self);
243 	u32 boot_exit_code = 0;
244 	u32 val;
245 	int k;
246 
247 	for (k = 0; k < 1000; ++k) {
248 		u32 flb_status = aq_hw_read_reg(self,
249 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
250 		boot_exit_code = aq_hw_read_reg(self,
251 						HW_ATL_MPI_BOOT_EXIT_CODE);
252 		if (flb_status != 0x06000000 || boot_exit_code != 0)
253 			break;
254 	}
255 
256 	if (k == 1000) {
257 		aq_pr_err("Neither RBL nor FLB firmware started\n");
258 		return -EOPNOTSUPP;
259 	}
260 
261 	self->rbl_enabled = (boot_exit_code != 0);
262 
263 	if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, ver)) {
264 		int err = 0;
265 
266 		/* FW 1.x may bootup in an invalid POWER state (WOL feature).
267 		 * We should work around this by forcing its state back to DEINIT
268 		 */
269 		hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
270 		err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state,
271 						self, val,
272 						(val & HW_ATL_MPI_STATE_MSK) ==
273 						 MPI_DEINIT,
274 						10, 10000U);
275 		if (err)
276 			return err;
277 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, ver)) {
278 		u64 sem_timeout = aq_hw_read_reg(self, HW_ATL_MIF_RESET_TIMEOUT_ADR);
279 
280 		/* Acquire 2 semaphores before issuing reset for FW 4.x */
281 		if (sem_timeout > 3000)
282 			sem_timeout = 3000;
283 		sem_timeout = sem_timeout * 1000;
284 
285 		if (sem_timeout != 0) {
286 			int err;
287 
288 			err = readx_poll_timeout_atomic(hw_atl_sem_reset1_get, self, val,
289 							val == 1U, 1U, sem_timeout);
290 			if (err)
291 				aq_pr_err("reset sema1 timeout");
292 
293 			err = readx_poll_timeout_atomic(hw_atl_sem_reset2_get, self, val,
294 							val == 1U, 1U, sem_timeout);
295 			if (err)
296 				aq_pr_err("reset sema2 timeout");
297 		}
298 	}
299 
300 	if (self->rbl_enabled)
301 		return hw_atl_utils_soft_reset_rbl(self);
302 	else
303 		return hw_atl_utils_soft_reset_flb(self);
304 }
305 
hw_atl_utils_fw_downld_dwords(struct aq_hw_s * self,u32 a,u32 * p,u32 cnt)306 int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
307 				  u32 *p, u32 cnt)
308 {
309 	int err = 0;
310 	u32 val;
311 
312 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get,
313 					self, val, val == 1U,
314 					1U, 10000U);
315 
316 	if (err < 0) {
317 		bool is_locked;
318 
319 		hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
320 		is_locked = hw_atl_sem_ram_get(self);
321 		if (!is_locked) {
322 			err = -ETIME;
323 			goto err_exit;
324 		}
325 	}
326 
327 	aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
328 
329 	for (++cnt; --cnt && !err;) {
330 		aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
331 
332 		if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
333 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
334 							self, val, val != a,
335 							1U, 1000U);
336 		else
337 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
338 							self, val,
339 							!(val & 0x100),
340 							1U, 1000U);
341 
342 		*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
343 		a += 4;
344 	}
345 
346 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
347 
348 err_exit:
349 	return err;
350 }
351 
hw_atl_utils_write_b1_mbox(struct aq_hw_s * self,u32 addr,u32 * p,u32 cnt,enum mcp_area area)352 static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
353 				      u32 *p, u32 cnt, enum mcp_area area)
354 {
355 	u32 data_offset = 0;
356 	u32 offset = addr;
357 	int err = 0;
358 	u32 val;
359 
360 	switch (area) {
361 	case MCP_AREA_CONFIG:
362 		offset -= self->rpc_addr;
363 		break;
364 
365 	case MCP_AREA_SETTINGS:
366 		offset -= self->settings_addr;
367 		break;
368 	}
369 
370 	offset = offset / sizeof(u32);
371 
372 	for (; data_offset < cnt; ++data_offset, ++offset) {
373 		aq_hw_write_reg(self, 0x328, p[data_offset]);
374 		aq_hw_write_reg(self, 0x32C,
375 				(area | (0xFFFF & (offset * 4))));
376 		hw_atl_mcp_up_force_intr_set(self, 1);
377 		/* 1000 times by 10us = 10ms */
378 		err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
379 						self, val,
380 						(val & 0xF0000000) !=
381 						area,
382 						10U, 10000U);
383 
384 		if (err < 0)
385 			break;
386 	}
387 
388 	return err;
389 }
390 
hw_atl_utils_write_b0_mbox(struct aq_hw_s * self,u32 addr,u32 * p,u32 cnt)391 static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
392 				      u32 *p, u32 cnt)
393 {
394 	u32 offset = 0;
395 	int err = 0;
396 	u32 val;
397 
398 	aq_hw_write_reg(self, 0x208, addr);
399 
400 	for (; offset < cnt; ++offset) {
401 		aq_hw_write_reg(self, 0x20C, p[offset]);
402 		aq_hw_write_reg(self, 0x200, 0xC000);
403 
404 		err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
405 						self, val,
406 						(val & 0x100) == 0U,
407 						10U, 10000U);
408 
409 		if (err < 0)
410 			break;
411 	}
412 
413 	return err;
414 }
415 
hw_atl_utils_fw_upload_dwords(struct aq_hw_s * self,u32 addr,u32 * p,u32 cnt,enum mcp_area area)416 static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
417 					 u32 cnt, enum mcp_area area)
418 {
419 	int err = 0;
420 	u32 val;
421 
422 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
423 					val, val == 1U,
424 					10U, 100000U);
425 	if (err < 0)
426 		goto err_exit;
427 
428 	if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
429 		err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
430 	else
431 		err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
432 
433 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
434 
435 	if (err < 0)
436 		goto err_exit;
437 
438 	err = aq_hw_err_from_flags(self);
439 
440 err_exit:
441 	return err;
442 }
443 
hw_atl_write_fwcfg_dwords(struct aq_hw_s * self,u32 * p,u32 cnt)444 int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
445 {
446 	return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
447 					     cnt, MCP_AREA_CONFIG);
448 }
449 
hw_atl_write_fwsettings_dwords(struct aq_hw_s * self,u32 offset,u32 * p,u32 cnt)450 int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
451 				   u32 cnt)
452 {
453 	return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
454 					     p, cnt, MCP_AREA_SETTINGS);
455 }
456 
hw_atl_utils_ver_match(u32 ver_expected,u32 ver_actual)457 bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
458 {
459 	const u32 dw_major_mask = 0xff000000U;
460 	const u32 dw_minor_mask = 0x00ffffffU;
461 	bool ver_match;
462 
463 	ver_match = (dw_major_mask & (ver_expected ^ ver_actual)) ? false : true;
464 	if (!ver_match)
465 		goto err_exit;
466 	ver_match = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
467 		false : true;
468 
469 err_exit:
470 	return ver_match;
471 }
472 
hw_atl_utils_init_ucp(struct aq_hw_s * self,const struct aq_hw_caps_s * aq_hw_caps)473 static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
474 				 const struct aq_hw_caps_s *aq_hw_caps)
475 {
476 	int err = 0;
477 
478 	if (!aq_hw_read_reg(self, 0x370U)) {
479 		unsigned int rnd = 0U;
480 		unsigned int ucp_0x370 = 0U;
481 
482 		get_random_bytes(&rnd, sizeof(unsigned int));
483 
484 		ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
485 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
486 	}
487 
488 	hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
489 
490 	/* check 10 times by 1ms */
491 	err = readx_poll_timeout_atomic(hw_atl_scrpad25_get,
492 					self, self->mbox_addr,
493 					self->mbox_addr != 0U,
494 					1000U, 10000U);
495 	err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self,
496 					self->rpc_addr,
497 					self->rpc_addr != 0U,
498 					1000U, 100000U);
499 
500 	return err;
501 }
502 
503 struct aq_hw_atl_utils_fw_rpc_tid_s {
504 	union {
505 		u32 val;
506 		struct {
507 			u16 tid;
508 			u16 len;
509 		};
510 	};
511 };
512 
513 #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
514 
hw_atl_utils_fw_rpc_call(struct aq_hw_s * self,unsigned int rpc_size)515 int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
516 {
517 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
518 	int err = 0;
519 
520 	if (!ATL_HW_IS_CHIP_FEATURE(self, MIPS)) {
521 		err = -1;
522 		goto err_exit;
523 	}
524 	err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
525 					(rpc_size + sizeof(u32) -
526 					 sizeof(u8)) / sizeof(u32));
527 	if (err < 0)
528 		goto err_exit;
529 
530 	sw.tid = 0xFFFFU & (++self->rpc_tid);
531 	sw.len = (u16)rpc_size;
532 	aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
533 
534 err_exit:
535 	return err;
536 }
537 
hw_atl_utils_fw_rpc_wait(struct aq_hw_s * self,struct hw_atl_utils_fw_rpc ** rpc)538 int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
539 			     struct hw_atl_utils_fw_rpc **rpc)
540 {
541 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
542 	struct aq_hw_atl_utils_fw_rpc_tid_s fw;
543 	int err = 0;
544 
545 	do {
546 		sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
547 
548 		self->rpc_tid = sw.tid;
549 
550 		err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get,
551 						self, fw.val,
552 						sw.tid == fw.tid,
553 						1000U, 100000U);
554 		if (err < 0)
555 			goto err_exit;
556 
557 		err = aq_hw_err_from_flags(self);
558 		if (err < 0)
559 			goto err_exit;
560 
561 		if (fw.len == 0xFFFFU) {
562 			if (sw.len > sizeof(self->rpc)) {
563 				printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
564 				err = -EINVAL;
565 				goto err_exit;
566 			}
567 			err = hw_atl_utils_fw_rpc_call(self, sw.len);
568 			if (err < 0)
569 				goto err_exit;
570 		}
571 	} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
572 
573 	if (rpc) {
574 		if (fw.len) {
575 			if (fw.len > sizeof(self->rpc)) {
576 				printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
577 				err = -EINVAL;
578 				goto err_exit;
579 			}
580 			err =
581 			hw_atl_utils_fw_downld_dwords(self,
582 						      self->rpc_addr,
583 						      (u32 *)(void *)
584 						      &self->rpc,
585 						      (fw.len + sizeof(u32) -
586 						       sizeof(u8)) /
587 						      sizeof(u32));
588 			if (err < 0)
589 				goto err_exit;
590 		}
591 
592 		*rpc = &self->rpc;
593 	}
594 
595 err_exit:
596 	return err;
597 }
598 
hw_atl_utils_mpi_create(struct aq_hw_s * self)599 static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
600 {
601 	int err = 0;
602 
603 	err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
604 	if (err < 0)
605 		goto err_exit;
606 
607 	err = hw_atl_utils_fw_rpc_init(self);
608 	if (err < 0)
609 		goto err_exit;
610 
611 err_exit:
612 	return err;
613 }
614 
hw_atl_utils_mpi_read_mbox(struct aq_hw_s * self,struct hw_atl_utils_mbox_header * pmbox)615 int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
616 			       struct hw_atl_utils_mbox_header *pmbox)
617 {
618 	return hw_atl_utils_fw_downld_dwords(self,
619 					     self->mbox_addr,
620 					     (u32 *)(void *)pmbox,
621 					     sizeof(*pmbox) / sizeof(u32));
622 }
623 
hw_atl_utils_mpi_read_stats(struct aq_hw_s * self,struct hw_atl_utils_mbox * pmbox)624 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
625 				 struct hw_atl_utils_mbox *pmbox)
626 {
627 	int err = 0;
628 
629 	err = hw_atl_utils_fw_downld_dwords(self,
630 					    self->mbox_addr,
631 					    (u32 *)(void *)pmbox,
632 					    sizeof(*pmbox) / sizeof(u32));
633 	if (err < 0)
634 		goto err_exit;
635 
636 	if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_A0)) {
637 		unsigned int mtu = self->aq_nic_cfg ?
638 					self->aq_nic_cfg->mtu : 1514U;
639 		pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
640 		pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
641 		pmbox->stats.dpc = atomic_read(&self->dpc);
642 	} else {
643 		pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
644 	}
645 
646 err_exit:;
647 }
648 
hw_atl_utils_mpi_set_speed(struct aq_hw_s * self,u32 speed)649 static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
650 {
651 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
652 
653 	val = val & ~HW_ATL_MPI_SPEED_MSK;
654 	val |= speed << HW_ATL_MPI_SPEED_SHIFT;
655 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
656 
657 	return 0;
658 }
659 
hw_atl_utils_mpi_set_state(struct aq_hw_s * self,enum hal_atl_utils_fw_state_e state)660 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
661 				      enum hal_atl_utils_fw_state_e state)
662 {
663 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
664 	struct hw_atl_utils_mbox_header mbox;
665 	u32 transaction_id = 0;
666 	int err = 0;
667 
668 	if (state == MPI_RESET) {
669 		hw_atl_utils_mpi_read_mbox(self, &mbox);
670 
671 		transaction_id = mbox.transaction_id;
672 
673 		err = readx_poll_timeout_atomic(hw_atl_utils_get_mpi_mbox_tid,
674 						self, mbox.transaction_id,
675 						transaction_id !=
676 						mbox.transaction_id,
677 						1000U, 100000U);
678 		if (err < 0)
679 			goto err_exit;
680 	}
681 	/* On interface DEINIT we disable DW (raise bit)
682 	 * Otherwise enable DW (clear bit)
683 	 */
684 	if (state == MPI_DEINIT || state == MPI_POWER)
685 		val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
686 	else
687 		val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
688 
689 	/* Set new state bits */
690 	val = val & ~HW_ATL_MPI_STATE_MSK;
691 	val |= state & HW_ATL_MPI_STATE_MSK;
692 
693 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
694 
695 err_exit:
696 	return err;
697 }
698 
hw_atl_utils_mpi_get_link_status(struct aq_hw_s * self)699 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
700 {
701 	struct aq_hw_link_status_s *link_status = &self->aq_link_status;
702 	u32 mpi_state;
703 	u32 speed;
704 
705 	mpi_state = hw_atl_utils_mpi_get_state(self);
706 	speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
707 
708 	if (!speed) {
709 		link_status->mbps = 0U;
710 	} else {
711 		switch (speed) {
712 		case HAL_ATLANTIC_RATE_10G:
713 			link_status->mbps = 10000U;
714 			break;
715 
716 		case HAL_ATLANTIC_RATE_5G:
717 		case HAL_ATLANTIC_RATE_5GSR:
718 			link_status->mbps = 5000U;
719 			break;
720 
721 		case HAL_ATLANTIC_RATE_2G5:
722 			link_status->mbps = 2500U;
723 			break;
724 
725 		case HAL_ATLANTIC_RATE_1G:
726 			link_status->mbps = 1000U;
727 			break;
728 
729 		case HAL_ATLANTIC_RATE_100M:
730 			link_status->mbps = 100U;
731 			break;
732 
733 		default:
734 			return -EBUSY;
735 		}
736 	}
737 	link_status->full_duplex = true;
738 
739 	return 0;
740 }
741 
hw_atl_utils_get_mac_permanent(struct aq_hw_s * self,u8 * mac)742 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
743 				   u8 *mac)
744 {
745 	u32 mac_addr[2];
746 	u32 efuse_addr;
747 	int err = 0;
748 	u32 h = 0U;
749 	u32 l = 0U;
750 
751 	if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
752 		unsigned int ucp_0x370 = 0;
753 		unsigned int rnd = 0;
754 
755 		get_random_bytes(&rnd, sizeof(unsigned int));
756 
757 		ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
758 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
759 	}
760 
761 	efuse_addr = aq_hw_read_reg(self, 0x00000374U);
762 
763 	err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
764 					    mac_addr, ARRAY_SIZE(mac_addr));
765 	if (err < 0) {
766 		mac_addr[0] = 0U;
767 		mac_addr[1] = 0U;
768 		err = 0;
769 	} else {
770 		mac_addr[0] = __swab32(mac_addr[0]);
771 		mac_addr[1] = __swab32(mac_addr[1]);
772 	}
773 
774 	ether_addr_copy(mac, (u8 *)mac_addr);
775 
776 	if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
777 		/* chip revision */
778 		l = 0xE3000000U |
779 		    (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) |
780 		    (0x00 << 16);
781 		h = 0x8001300EU;
782 
783 		mac[5] = (u8)(0xFFU & l);
784 		l >>= 8;
785 		mac[4] = (u8)(0xFFU & l);
786 		l >>= 8;
787 		mac[3] = (u8)(0xFFU & l);
788 		l >>= 8;
789 		mac[2] = (u8)(0xFFU & l);
790 		mac[1] = (u8)(0xFFU & h);
791 		h >>= 8;
792 		mac[0] = (u8)(0xFFU & h);
793 	}
794 
795 	return err;
796 }
797 
hw_atl_utils_mbps_2_speed_index(unsigned int mbps)798 unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
799 {
800 	unsigned int ret = 0U;
801 
802 	switch (mbps) {
803 	case 100U:
804 		ret = 5U;
805 		break;
806 
807 	case 1000U:
808 		ret = 4U;
809 		break;
810 
811 	case 2500U:
812 		ret = 3U;
813 		break;
814 
815 	case 5000U:
816 		ret = 1U;
817 		break;
818 
819 	case 10000U:
820 		ret = 0U;
821 		break;
822 
823 	default:
824 		break;
825 	}
826 
827 	return ret;
828 }
829 
hw_atl_utils_hw_chip_features_init(struct aq_hw_s * self,u32 * p)830 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
831 {
832 	u32 val = hw_atl_reg_glb_mif_id_get(self);
833 	u32 mif_rev = val & 0xFFU;
834 	u32 chip_features = 0U;
835 
836 	chip_features |= ATL_HW_CHIP_ATLANTIC;
837 
838 	if ((0xFU & mif_rev) == 1U) {
839 		chip_features |= ATL_HW_CHIP_REVISION_A0 |
840 			ATL_HW_CHIP_MPI_AQ |
841 			ATL_HW_CHIP_MIPS;
842 	} else if ((0xFU & mif_rev) == 2U) {
843 		chip_features |= ATL_HW_CHIP_REVISION_B0 |
844 			ATL_HW_CHIP_MPI_AQ |
845 			ATL_HW_CHIP_MIPS |
846 			ATL_HW_CHIP_TPO2 |
847 			ATL_HW_CHIP_RPF2;
848 	} else if ((0xFU & mif_rev) == 0xAU) {
849 		chip_features |= ATL_HW_CHIP_REVISION_B1 |
850 			ATL_HW_CHIP_MPI_AQ |
851 			ATL_HW_CHIP_MIPS |
852 			ATL_HW_CHIP_TPO2 |
853 			ATL_HW_CHIP_RPF2;
854 	}
855 
856 	*p = chip_features;
857 }
858 
hw_atl_fw1x_deinit(struct aq_hw_s * self)859 static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
860 {
861 	hw_atl_utils_mpi_set_speed(self, 0);
862 	hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
863 
864 	return 0;
865 }
866 
hw_atl_utils_update_stats(struct aq_hw_s * self)867 int hw_atl_utils_update_stats(struct aq_hw_s *self)
868 {
869 	struct aq_stats_s *cs = &self->curr_stats;
870 	struct aq_stats_s curr_stats = *cs;
871 	struct hw_atl_utils_mbox mbox;
872 	bool corrupted_stats = false;
873 
874 	hw_atl_utils_mpi_read_stats(self, &mbox);
875 
876 #define AQ_SDELTA(_N_)  \
877 do { \
878 	if (!corrupted_stats && \
879 	    ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
880 		curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
881 	else \
882 		corrupted_stats = true; \
883 } while (0)
884 
885 	if (self->aq_link_status.mbps) {
886 		AQ_SDELTA(uprc);
887 		AQ_SDELTA(mprc);
888 		AQ_SDELTA(bprc);
889 		AQ_SDELTA(erpt);
890 
891 		AQ_SDELTA(uptc);
892 		AQ_SDELTA(mptc);
893 		AQ_SDELTA(bptc);
894 		AQ_SDELTA(erpr);
895 
896 		AQ_SDELTA(ubrc);
897 		AQ_SDELTA(ubtc);
898 		AQ_SDELTA(mbrc);
899 		AQ_SDELTA(mbtc);
900 		AQ_SDELTA(bbrc);
901 		AQ_SDELTA(bbtc);
902 		AQ_SDELTA(dpc);
903 
904 		if (!corrupted_stats)
905 			*cs = curr_stats;
906 	}
907 #undef AQ_SDELTA
908 
909 	cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
910 	cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
911 	cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
912 	cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
913 
914 	memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
915 
916 	return 0;
917 }
918 
hw_atl_utils_get_hw_stats(struct aq_hw_s * self)919 struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
920 {
921 	return &self->curr_stats;
922 }
923 
924 static const u32 hw_atl_utils_hw_mac_regs[] = {
925 	0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
926 	0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
927 	0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
928 	0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
929 	0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
930 	0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
931 	0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
932 	0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
933 	0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
934 	0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
935 	0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
936 	0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
937 	0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
938 	0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
939 	0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
940 	0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
941 	0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
942 	0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
943 	0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
944 	0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
945 	0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
946 	0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
947 };
948 
hw_atl_utils_hw_get_regs(struct aq_hw_s * self,const struct aq_hw_caps_s * aq_hw_caps,u32 * regs_buff)949 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
950 			     const struct aq_hw_caps_s *aq_hw_caps,
951 			     u32 *regs_buff)
952 {
953 	unsigned int i = 0U;
954 
955 	for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
956 		regs_buff[i] = aq_hw_read_reg(self,
957 					      hw_atl_utils_hw_mac_regs[i]);
958 
959 	return 0;
960 }
961 
hw_atl_utils_get_fw_version(struct aq_hw_s * self)962 u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
963 {
964 	return aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
965 }
966 
aq_fw1x_set_wake_magic(struct aq_hw_s * self,bool wol_enabled,const u8 * mac)967 static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
968 				  const u8 *mac)
969 {
970 	struct hw_atl_utils_fw_rpc *prpc = NULL;
971 	unsigned int rpc_size = 0U;
972 	int err = 0;
973 
974 	err = hw_atl_utils_fw_rpc_wait(self, &prpc);
975 	if (err < 0)
976 		goto err_exit;
977 
978 	memset(prpc, 0, sizeof(*prpc));
979 
980 	if (wol_enabled) {
981 		rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
982 			   sizeof(prpc->msg_wol_add);
983 
984 
985 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
986 		prpc->msg_wol_add.priority =
987 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
988 		prpc->msg_wol_add.pattern_id =
989 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
990 		prpc->msg_wol_add.packet_type =
991 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
992 
993 		ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
994 				mac);
995 	} else {
996 		rpc_size = sizeof(prpc->msg_wol_remove) +
997 			   offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
998 
999 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
1000 		prpc->msg_wol_add.pattern_id =
1001 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
1002 	}
1003 
1004 	err = hw_atl_utils_fw_rpc_call(self, rpc_size);
1005 
1006 err_exit:
1007 	return err;
1008 }
1009 
aq_fw1x_set_power(struct aq_hw_s * self,unsigned int power_state,const u8 * mac)1010 static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
1011 			     const u8 *mac)
1012 {
1013 	struct hw_atl_utils_fw_rpc *prpc = NULL;
1014 	unsigned int rpc_size = 0U;
1015 	int err = 0;
1016 
1017 	if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
1018 		err = aq_fw1x_set_wake_magic(self, 1, mac);
1019 
1020 		if (err < 0)
1021 			goto err_exit;
1022 
1023 		rpc_size = sizeof(prpc->msg_id) +
1024 			   sizeof(prpc->msg_enable_wakeup);
1025 
1026 		err = hw_atl_utils_fw_rpc_wait(self, &prpc);
1027 
1028 		if (err < 0)
1029 			goto err_exit;
1030 
1031 		memset(prpc, 0, rpc_size);
1032 
1033 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP;
1034 		prpc->msg_enable_wakeup.pattern_mask = 0x00000002;
1035 
1036 		err = hw_atl_utils_fw_rpc_call(self, rpc_size);
1037 		if (err < 0)
1038 			goto err_exit;
1039 	}
1040 	hw_atl_utils_mpi_set_speed(self, 0);
1041 	hw_atl_utils_mpi_set_state(self, MPI_POWER);
1042 
1043 err_exit:
1044 	return err;
1045 }
1046 
hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s * self)1047 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self)
1048 {
1049 	struct hw_atl_utils_mbox_header mbox;
1050 
1051 	hw_atl_utils_mpi_read_mbox(self, &mbox);
1052 
1053 	return mbox.transaction_id;
1054 }
1055 
hw_atl_utils_mpi_get_state(struct aq_hw_s * self)1056 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self)
1057 {
1058 	return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
1059 }
1060 
hw_atl_utils_mif_cmd_get(struct aq_hw_s * self)1061 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self)
1062 {
1063 	return aq_hw_read_reg(self, HW_ATL_MIF_CMD);
1064 }
1065 
hw_atl_utils_mif_addr_get(struct aq_hw_s * self)1066 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self)
1067 {
1068 	return aq_hw_read_reg(self, HW_ATL_MIF_ADDR);
1069 }
1070 
hw_atl_utils_rpc_state_get(struct aq_hw_s * self)1071 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
1072 {
1073 	return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
1074 }
1075 
aq_fw1x_rpc_get(struct aq_hw_s * self)1076 static u32 aq_fw1x_rpc_get(struct aq_hw_s *self)
1077 {
1078 	return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR);
1079 }
1080 
1081 const struct aq_fw_ops aq_fw_1x_ops = {
1082 	.init = hw_atl_utils_mpi_create,
1083 	.deinit = hw_atl_fw1x_deinit,
1084 	.reset = NULL,
1085 	.get_mac_permanent = hw_atl_utils_get_mac_permanent,
1086 	.set_link_speed = hw_atl_utils_mpi_set_speed,
1087 	.set_state = hw_atl_utils_mpi_set_state,
1088 	.update_link_status = hw_atl_utils_mpi_get_link_status,
1089 	.update_stats = hw_atl_utils_update_stats,
1090 	.get_mac_temp = NULL,
1091 	.get_phy_temp = NULL,
1092 	.set_power = aq_fw1x_set_power,
1093 	.set_eee_rate = NULL,
1094 	.get_eee_rate = NULL,
1095 	.set_flow_control = NULL,
1096 	.send_fw_request = NULL,
1097 	.enable_ptp = NULL,
1098 	.led_control = NULL,
1099 };
1100