1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "coex.h"
6 #include "debug.h"
7 #include "mac.h"
8 #include "phy.h"
9 #include "reg.h"
10 #include "rtw8852a.h"
11 #include "rtw8852a_rfk.h"
12 #include "rtw8852a_rfk_table.h"
13 #include "rtw8852a_table.h"
14 
15 static void
16 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
17 {
18 	rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
19 }
20 
21 static void
22 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
23 {
24 	rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
25 }
26 
27 static void
28 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
29 {
30 	rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
31 }
32 
33 static void
34 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
35 {
36 	rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
37 }
38 
39 static void
40 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
41 {
42 	udelay(def->data);
43 }
44 
45 static void
46 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
47 	[RTW89_RFK_F_WRF] = _rfk_write_rf,
48 	[RTW89_RFK_F_WM] = _rfk_write32_mask,
49 	[RTW89_RFK_F_WS] = _rfk_write32_set,
50 	[RTW89_RFK_F_WC] = _rfk_write32_clr,
51 	[RTW89_RFK_F_DELAY] = _rfk_delay,
52 };
53 
54 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
55 
56 static void
57 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
58 {
59 	const struct rtw89_reg5_def *p = tbl->defs;
60 	const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
61 
62 	for (; p < end; p++)
63 		_rfk_handler[p->flag](rtwdev, p);
64 }
65 
66 #define rtw89_rfk_parser_by_cond(rtwdev, cond, tbl_t, tbl_f)	\
67 	do {							\
68 		typeof(rtwdev) _dev = (rtwdev);			\
69 		if (cond)					\
70 			rtw89_rfk_parser(_dev, (tbl_t));	\
71 		else						\
72 			rtw89_rfk_parser(_dev, (tbl_f));	\
73 	} while (0)
74 
75 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
76 {
77 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,  PHY%d\n",
78 		    rtwdev->dbcc_en, phy_idx);
79 
80 	if (!rtwdev->dbcc_en)
81 		return RF_AB;
82 
83 	if (phy_idx == RTW89_PHY_0)
84 		return RF_A;
85 	else
86 		return RF_B;
87 }
88 
89 static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0};
90 static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
91 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs)
92 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs)
93 
94 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
95 {
96 	u32 i;
97 
98 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
99 		backup_bb_reg_val[i] =
100 			rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
101 					      MASKDWORD);
102 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
103 			    "[IQK]backup bb reg : %x, value =%x\n",
104 			    rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
105 	}
106 }
107 
108 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
109 			       u8 rf_path)
110 {
111 	u32 i;
112 
113 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
114 		backup_rf_reg_val[i] =
115 			rtw89_read_rf(rtwdev, rf_path,
116 				      rtw8852a_backup_rf_regs[i], RFREG_MASK);
117 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
118 			    "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
119 			    rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
120 	}
121 }
122 
123 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
124 				u32 backup_bb_reg_val[])
125 {
126 	u32 i;
127 
128 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
129 		rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
130 				       MASKDWORD, backup_bb_reg_val[i]);
131 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
132 			    "[IQK]restore bb reg : %x, value =%x\n",
133 			    rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
134 	}
135 }
136 
137 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
138 				u32 backup_rf_reg_val[], u8 rf_path)
139 {
140 	u32 i;
141 
142 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
143 		rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i],
144 			       RFREG_MASK, backup_rf_reg_val[i]);
145 
146 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
147 			    "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
148 			    rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
149 	}
150 }
151 
152 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
153 {
154 	u8 path;
155 	u32 rf_mode;
156 	int ret;
157 
158 	for (path = 0; path < RF_PATH_MAX; path++) {
159 		if (!(kpath & BIT(path)))
160 			continue;
161 
162 		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
163 					       2, 5000, false, rtwdev, path, 0x00,
164 					       RR_MOD_MASK);
165 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
166 			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
167 			    path, ret);
168 	}
169 }
170 
171 static void _dack_dump(struct rtw89_dev *rtwdev)
172 {
173 	struct rtw89_dack_info *dack = &rtwdev->dack;
174 	u8 i;
175 	u8 t;
176 
177 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
178 		    "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
179 		    dack->addck_d[0][0], dack->addck_d[0][1]);
180 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
181 		    "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
182 		    dack->addck_d[1][0], dack->addck_d[1][1]);
183 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
184 		    "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
185 		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
186 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
187 		    "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
188 		    dack->dadck_d[1][0], dack->dadck_d[1][1]);
189 
190 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
191 		    "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
192 		    dack->biask_d[0][0], dack->biask_d[0][1]);
193 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
194 		    "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
195 		    dack->biask_d[1][0], dack->biask_d[1][1]);
196 
197 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
198 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
199 		t = dack->msbk_d[0][0][i];
200 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
201 	}
202 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
203 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
204 		t = dack->msbk_d[0][1][i];
205 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
206 	}
207 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
208 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
209 		t = dack->msbk_d[1][0][i];
210 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
211 	}
212 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
213 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
214 		t = dack->msbk_d[1][1][i];
215 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
216 	}
217 }
218 
219 static void _afe_init(struct rtw89_dev *rtwdev)
220 {
221 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl);
222 }
223 
224 static void _addck_backup(struct rtw89_dev *rtwdev)
225 {
226 	struct rtw89_dack_info *dack = &rtwdev->dack;
227 
228 	rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL);
229 	dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
230 							 B_S0_ADDCK_Q);
231 	dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
232 							 B_S0_ADDCK_I);
233 
234 	rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL);
235 	dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
236 							 B_S1_ADDCK_Q);
237 	dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
238 							 B_S1_ADDCK_I);
239 }
240 
241 static void _addck_reload(struct rtw89_dev *rtwdev)
242 {
243 	struct rtw89_dack_info *dack = &rtwdev->dack;
244 
245 	rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]);
246 	rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2,
247 			       (dack->addck_d[0][1] >> 6));
248 	rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q,
249 			       (dack->addck_d[0][1] & 0x3f));
250 	rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN);
251 	rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]);
252 	rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2,
253 			       (dack->addck_d[1][1] >> 6));
254 	rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q,
255 			       (dack->addck_d[1][1] & 0x3f));
256 	rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN);
257 }
258 
259 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
260 {
261 	struct rtw89_dack_info *dack = &rtwdev->dack;
262 	u8 i;
263 
264 	rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN);
265 	rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN);
266 	rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
267 
268 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
269 		rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i);
270 		dack->msbk_d[0][0][i] =
271 			(u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K);
272 		rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i);
273 		dack->msbk_d[0][1][i] =
274 			(u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K);
275 	}
276 	dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2,
277 							 B_S0_DACKI2_K);
278 	dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2,
279 							 B_S0_DACKQ2_K);
280 	dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8,
281 							B_S0_DACKI8_K) - 8;
282 	dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8,
283 							B_S0_DACKQ8_K) - 8;
284 }
285 
286 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
287 {
288 	struct rtw89_dack_info *dack = &rtwdev->dack;
289 	u8 i;
290 
291 	rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN);
292 	rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN);
293 	rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
294 
295 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
296 		rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i);
297 		dack->msbk_d[1][0][i] =
298 			(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K);
299 		rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i);
300 		dack->msbk_d[1][1][i] =
301 			(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K);
302 	}
303 	dack->biask_d[1][0] =
304 		(u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K);
305 	dack->biask_d[1][1] =
306 		(u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K);
307 	dack->dadck_d[1][0] =
308 		(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8;
309 	dack->dadck_d[1][1] =
310 		(u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8;
311 }
312 
313 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
314 				 enum rtw89_rf_path path, u8 index)
315 {
316 	struct rtw89_dack_info *dack = &rtwdev->dack;
317 	u32 tmp = 0, tmp_offset, tmp_reg;
318 	u8 i;
319 	u32 idx_offset, path_offset;
320 
321 	if (index == 0)
322 		idx_offset = 0;
323 	else
324 		idx_offset = 0x50;
325 
326 	if (path == RF_PATH_A)
327 		path_offset = 0;
328 	else
329 		path_offset = 0x2000;
330 
331 	tmp_offset = idx_offset + path_offset;
332 	/* msbk_d: 15/14/13/12 */
333 	tmp = 0x0;
334 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
335 		tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
336 	tmp_reg = 0x5e14 + tmp_offset;
337 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
338 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
339 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
340 	/* msbk_d: 11/10/9/8 */
341 	tmp = 0x0;
342 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
343 		tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
344 	tmp_reg = 0x5e18 + tmp_offset;
345 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
346 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
347 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
348 	/* msbk_d: 7/6/5/4 */
349 	tmp = 0x0;
350 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
351 		tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
352 	tmp_reg = 0x5e1c + tmp_offset;
353 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
354 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
355 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
356 	/* msbk_d: 3/2/1/0 */
357 	tmp = 0x0;
358 	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
359 		tmp |= dack->msbk_d[path][index][i] << (i * 8);
360 	tmp_reg = 0x5e20 + tmp_offset;
361 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
362 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
363 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
364 	/* dadak_d/biask_d */
365 	tmp = 0x0;
366 	tmp = (dack->biask_d[path][index] << 22) |
367 	       (dack->dadck_d[path][index] << 14);
368 	tmp_reg = 0x5e24 + tmp_offset;
369 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
370 }
371 
372 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
373 {
374 	u8 i;
375 
376 	for (i = 0; i < 2; i++)
377 		_dack_reload_by_path(rtwdev, path, i);
378 
379 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
380 				 &rtw8852a_rfk_dack_reload_defs_a_tbl,
381 				 &rtw8852a_rfk_dack_reload_defs_b_tbl);
382 }
383 
384 #define ADDC_T_AVG 100
385 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
386 {
387 	s32 dc_re = 0, dc_im = 0;
388 	u32 tmp;
389 	u32 i;
390 
391 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
392 				 &rtw8852a_rfk_check_addc_defs_a_tbl,
393 				 &rtw8852a_rfk_check_addc_defs_b_tbl);
394 
395 	for (i = 0; i < ADDC_T_AVG; i++) {
396 		tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
397 		dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
398 		dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
399 	}
400 
401 	dc_re /= ADDC_T_AVG;
402 	dc_im /= ADDC_T_AVG;
403 
404 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
405 		    "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
406 }
407 
408 static void _addck(struct rtw89_dev *rtwdev)
409 {
410 	struct rtw89_dack_info *dack = &rtwdev->dack;
411 	u32 val;
412 	int ret;
413 
414 	/* S0 */
415 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl);
416 
417 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
418 	_check_addc(rtwdev, RF_PATH_A);
419 
420 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl);
421 
422 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
423 				       false, rtwdev, 0x1e00, BIT(0));
424 	if (ret) {
425 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
426 		dack->addck_timeout[0] = true;
427 	}
428 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
429 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
430 	_check_addc(rtwdev, RF_PATH_A);
431 
432 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl);
433 
434 	/* S1 */
435 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl);
436 
437 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
438 	_check_addc(rtwdev, RF_PATH_B);
439 
440 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl);
441 
442 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
443 				       false, rtwdev, 0x3e00, BIT(0));
444 	if (ret) {
445 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
446 		dack->addck_timeout[1] = true;
447 	}
448 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
449 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
450 	_check_addc(rtwdev, RF_PATH_B);
451 
452 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl);
453 }
454 
455 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
456 {
457 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
458 				 &rtw8852a_rfk_check_dadc_defs_f_a_tbl,
459 				 &rtw8852a_rfk_check_dadc_defs_f_b_tbl);
460 
461 	_check_addc(rtwdev, path);
462 
463 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
464 				 &rtw8852a_rfk_check_dadc_defs_r_a_tbl,
465 				 &rtw8852a_rfk_check_dadc_defs_r_b_tbl);
466 }
467 
468 static void _dack_s0(struct rtw89_dev *rtwdev)
469 {
470 	struct rtw89_dack_info *dack = &rtwdev->dack;
471 	u32 val;
472 	int ret;
473 
474 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl);
475 
476 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
477 				       false, rtwdev, 0x5e28, BIT(15));
478 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
479 					false, rtwdev, 0x5e78, BIT(15));
480 	if (ret) {
481 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
482 		dack->msbk_timeout[0] = true;
483 	}
484 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
485 
486 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl);
487 
488 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
489 				       false, rtwdev, 0x5e48, BIT(17));
490 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
491 					false, rtwdev, 0x5e98, BIT(17));
492 	if (ret) {
493 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n");
494 		dack->dadck_timeout[0] = true;
495 	}
496 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
497 
498 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl);
499 
500 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
501 	_check_dadc(rtwdev, RF_PATH_A);
502 
503 	_dack_backup_s0(rtwdev);
504 	_dack_reload(rtwdev, RF_PATH_A);
505 
506 	rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
507 }
508 
509 static void _dack_s1(struct rtw89_dev *rtwdev)
510 {
511 	struct rtw89_dack_info *dack = &rtwdev->dack;
512 	u32 val;
513 	int ret;
514 
515 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl);
516 
517 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
518 				       false, rtwdev, 0x7e28, BIT(15));
519 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
520 					false, rtwdev, 0x7e78, BIT(15));
521 	if (ret) {
522 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
523 		dack->msbk_timeout[1] = true;
524 	}
525 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
526 
527 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl);
528 
529 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
530 				       false, rtwdev, 0x7e48, BIT(17));
531 	ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
532 					false, rtwdev, 0x7e98, BIT(17));
533 	if (ret) {
534 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
535 		dack->dadck_timeout[1] = true;
536 	}
537 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
538 
539 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl);
540 
541 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
542 	_check_dadc(rtwdev, RF_PATH_B);
543 
544 	_dack_backup_s1(rtwdev);
545 	_dack_reload(rtwdev, RF_PATH_B);
546 
547 	rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
548 }
549 
550 static void _dack(struct rtw89_dev *rtwdev)
551 {
552 	_dack_s0(rtwdev);
553 	_dack_s1(rtwdev);
554 }
555 
556 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
557 {
558 	struct rtw89_dack_info *dack = &rtwdev->dack;
559 	u32 rf0_0, rf1_0;
560 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
561 
562 	dack->dack_done = false;
563 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
564 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
565 	rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
566 	rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
567 	_afe_init(rtwdev);
568 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
569 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
570 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001);
571 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001);
572 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
573 	_addck(rtwdev);
574 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
575 	_addck_backup(rtwdev);
576 	_addck_reload(rtwdev);
577 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
578 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001);
579 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
580 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
581 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
582 	_dack(rtwdev);
583 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
584 	_dack_dump(rtwdev);
585 	dack->dack_done = true;
586 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
587 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
588 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
589 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
590 	dack->dack_cnt++;
591 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
592 }
593 
594 #define RTW8852A_NCTL_VER 0xd
595 #define RTW8852A_IQK_VER 0x2a
596 #define RTW8852A_IQK_SS 2
597 #define RTW8852A_IQK_THR_REK 8
598 #define RTW8852A_IQK_CFIR_GROUP_NR 4
599 
600 enum rtw8852a_iqk_type {
601 	ID_TXAGC,
602 	ID_FLOK_COARSE,
603 	ID_FLOK_FINE,
604 	ID_TXK,
605 	ID_RXAGC,
606 	ID_RXK,
607 	ID_NBTXK,
608 	ID_NBRXK,
609 };
610 
611 static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path)
612 {
613 	u8 i = 0x0;
614 	u32 fft[6] = {0x0};
615 
616 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
617 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000);
618 	fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
619 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000);
620 	fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
621 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000);
622 	fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
623 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000);
624 	fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
625 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000);
626 	fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
627 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000);
628 	fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
629 	for (i = 0; i < 6; i++)
630 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n",
631 			    path, i, fft[i]);
632 }
633 
634 static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path)
635 {
636 	u8 i = 0x0;
637 	u32 tmp = 0x0;
638 
639 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
640 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path);
641 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1);
642 
643 	for (i = 0x0; i < 0x18; i++) {
644 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i);
645 		rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD);
646 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
647 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n",
648 			    path, BIT(path), tmp);
649 		udelay(1);
650 	}
651 	rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX);
652 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
653 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100);
654 	udelay(1);
655 }
656 
657 static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
658 				   u8 group)
659 {
660 	static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
661 		{0x8f20, 0x8f54, 0x8f88, 0x8fbc},
662 		{0x9320, 0x9354, 0x9388, 0x93bc},
663 	};
664 	u8 idx = 0x0;
665 	u32 tmp = 0x0;
666 	u32 base_addr;
667 
668 	if (path >= RTW8852A_IQK_SS) {
669 		rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
670 		return;
671 	}
672 	if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
673 		rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
674 		return;
675 	}
676 
677 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
678 	rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
679 
680 	base_addr = base_addrs[path][group];
681 
682 	for (idx = 0; idx < 0x0d; idx++) {
683 		tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
684 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
685 			    "[IQK] %x = %x\n",
686 			    base_addr + (idx << 2), tmp);
687 	}
688 
689 	if (path == 0x0) {
690 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
691 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD);
692 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp);
693 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD);
694 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp);
695 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD);
696 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp);
697 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD);
698 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp);
699 	} else {
700 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
701 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD);
702 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp);
703 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD);
704 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp);
705 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD);
706 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp);
707 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD);
708 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp);
709 	}
710 	rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
711 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc);
712 	udelay(1);
713 	tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
714 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
715 		    BIT(path), tmp);
716 }
717 
718 static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
719 				   u8 group)
720 {
721 	static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
722 		{0x8d00, 0x8d44, 0x8d88, 0x8dcc},
723 		{0x9100, 0x9144, 0x9188, 0x91cc},
724 	};
725 	u8 idx = 0x0;
726 	u32 tmp = 0x0;
727 	u32 base_addr;
728 
729 	if (path >= RTW8852A_IQK_SS) {
730 		rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
731 		return;
732 	}
733 	if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
734 		rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
735 		return;
736 	}
737 
738 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
739 	rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
740 
741 	base_addr = base_addrs[path][group];
742 	for (idx = 0; idx < 0x10; idx++) {
743 		tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
744 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
745 			    "[IQK]%x = %x\n",
746 			    base_addr + (idx << 2), tmp);
747 	}
748 
749 	if (path == 0x0) {
750 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
751 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD);
752 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp);
753 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD);
754 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp);
755 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD);
756 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp);
757 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD);
758 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp);
759 	} else {
760 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
761 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD);
762 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp);
763 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD);
764 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp);
765 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD);
766 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp);
767 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD);
768 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp);
769 	}
770 	rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
771 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd);
772 	tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
773 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
774 		    BIT(path), tmp);
775 }
776 
777 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
778 {
779 	u32 tmp = 0x0;
780 	u32 i = 0x0;
781 
782 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
783 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
784 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
785 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
786 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
787 
788 	for (i = 0; i <= 0x9f; i++) {
789 		rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
790 		tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
791 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
792 	}
793 
794 	for (i = 0; i <= 0x9f; i++) {
795 		rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
796 		tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
797 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
798 	}
799 	rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD);
800 	rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD);
801 }
802 
803 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
804 {
805 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
806 	u32 tmp = 0x0;
807 
808 	rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
809 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3);
810 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
811 	udelay(1);
812 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3);
813 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
814 	udelay(1);
815 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
816 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0);
817 	udelay(1);
818 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
819 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
820 
821 	switch (iqk_info->iqk_band[path]) {
822 	case RTW89_BAND_2G:
823 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
824 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
825 		break;
826 	case RTW89_BAND_5G:
827 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
828 		rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5);
829 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
830 		break;
831 	default:
832 		break;
833 	}
834 	tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
835 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
836 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
837 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
838 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
839 	fsleep(128);
840 }
841 
842 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
843 {
844 	u32 tmp;
845 	u32 val;
846 	int ret;
847 
848 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200,
849 				       false, rtwdev, 0xbff8, MASKBYTE0);
850 	if (ret)
851 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
852 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
853 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
854 	tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
855 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
856 		    "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
857 
858 	return false;
859 }
860 
861 static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
862 			  enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
863 {
864 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
865 	bool fail = false;
866 	u32 iqk_cmd = 0x0;
867 	u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
868 	u32 addr_rfc_ctl = 0x0;
869 
870 	if (path == RF_PATH_A)
871 		addr_rfc_ctl = 0x5864;
872 	else
873 		addr_rfc_ctl = 0x7864;
874 
875 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
876 	switch (ktype) {
877 	case ID_TXAGC:
878 		iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
879 		break;
880 	case ID_FLOK_COARSE:
881 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
882 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
883 		iqk_cmd = 0x108 | (1 << (4 + path));
884 		break;
885 	case ID_FLOK_FINE:
886 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
887 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
888 		iqk_cmd = 0x208 | (1 << (4 + path));
889 		break;
890 	case ID_TXK:
891 		rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
892 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
893 		iqk_cmd = 0x008 | (1 << (path + 4)) |
894 			  (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
895 		break;
896 	case ID_RXAGC:
897 		iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
898 		break;
899 	case ID_RXK:
900 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
901 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
902 		iqk_cmd = 0x008 | (1 << (path + 4)) |
903 			  (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
904 		break;
905 	case ID_NBTXK:
906 		rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
907 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
908 		iqk_cmd = 0x308 | (1 << (4 + path));
909 		break;
910 	case ID_NBRXK:
911 		rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
912 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
913 		iqk_cmd = 0x608 | (1 << (4 + path));
914 		break;
915 	default:
916 		return false;
917 	}
918 
919 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
920 	rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
921 	udelay(1);
922 	fail = _iqk_check_cal(rtwdev, path, ktype);
923 	if (iqk_info->iqk_xym_en)
924 		_iqk_read_xym_dbcc0(rtwdev, path);
925 	if (iqk_info->iqk_fft_en)
926 		_iqk_read_fft_dbcc0(rtwdev, path);
927 	if (iqk_info->iqk_sram_en)
928 		_iqk_sram(rtwdev, path);
929 	if (iqk_info->iqk_cfir_en) {
930 		if (ktype == ID_TXK) {
931 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x0);
932 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x1);
933 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x2);
934 			_iqk_read_txcfir_dbcc0(rtwdev, path, 0x3);
935 		} else {
936 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0);
937 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1);
938 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2);
939 			_iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3);
940 		}
941 	}
942 
943 	rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
944 
945 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
946 
947 	return fail;
948 }
949 
950 static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
951 			   enum rtw89_phy_idx phy_idx, u8 path)
952 {
953 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
954 	static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
955 	static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30};
956 	static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1};
957 	static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0};
958 	static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a};
959 	static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0};
960 	u8 gp = 0x0;
961 	bool fail = false;
962 	u32 rf0 = 0x0;
963 
964 	for (gp = 0; gp < 0x4; gp++) {
965 		switch (iqk_info->iqk_band[path]) {
966 		case RTW89_BAND_2G:
967 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]);
968 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]);
969 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]);
970 			break;
971 		case RTW89_BAND_5G:
972 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]);
973 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]);
974 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]);
975 			break;
976 		default:
977 			break;
978 		}
979 		rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
980 		rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
981 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
982 				       rf0 | iqk_info->syn1to2);
983 		rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
984 		rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
985 		rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
986 		rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
987 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
988 		rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
989 		rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
990 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
991 		rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
992 	}
993 
994 	switch (iqk_info->iqk_band[path]) {
995 	case RTW89_BAND_2G:
996 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
997 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
998 		break;
999 	case RTW89_BAND_5G:
1000 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
1001 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1002 		rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
1003 		break;
1004 	default:
1005 		break;
1006 	}
1007 	iqk_info->nb_rxcfir[path] = 0x40000000;
1008 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1009 			       B_IQK_RES_RXCFIR, 0x5);
1010 	iqk_info->is_wb_rxiqk[path] = true;
1011 	return false;
1012 }
1013 
1014 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
1015 		       enum rtw89_phy_idx phy_idx, u8 path)
1016 {
1017 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1018 	u8 group = 0x0;
1019 	u32 rf0 = 0x0, tmp = 0x0;
1020 	u32 idxrxgain_a = 0x1a0;
1021 	u32 idxattc2_a = 0x00;
1022 	u32 idxattc1_a = 0x5;
1023 	u32 idxrxgain_g = 0x1E0;
1024 	u32 idxattc2_g = 0x15;
1025 	u32 idxattc1_g = 0x0;
1026 	bool fail = false;
1027 
1028 	switch (iqk_info->iqk_band[path]) {
1029 	case RTW89_BAND_2G:
1030 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g);
1031 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g);
1032 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g);
1033 		break;
1034 	case RTW89_BAND_5G:
1035 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a);
1036 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a);
1037 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a);
1038 		break;
1039 	default:
1040 		break;
1041 	}
1042 	rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
1043 	rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1044 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
1045 			       rf0 | iqk_info->syn1to2);
1046 	rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
1047 	rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
1048 	rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1049 	rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1050 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1051 			       B_CFIR_LUT_GP, group);
1052 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
1053 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1054 	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
1055 
1056 	switch (iqk_info->iqk_band[path]) {
1057 	case RTW89_BAND_2G:
1058 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
1059 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1060 		break;
1061 	case RTW89_BAND_5G:
1062 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
1063 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1064 		rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
1065 		break;
1066 	default:
1067 		break;
1068 	}
1069 	if (!fail) {
1070 		tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1071 		iqk_info->nb_rxcfir[path] = tmp | 0x2;
1072 	} else {
1073 		iqk_info->nb_rxcfir[path] = 0x40000002;
1074 	}
1075 	return fail;
1076 }
1077 
1078 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1079 {
1080 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1081 
1082 	if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1083 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1084 		rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1085 				       MASKDWORD, 0x4d000a08);
1086 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1087 				       B_P0_RXCK_VAL, 0x2);
1088 		rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1089 		rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1090 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
1091 	} else {
1092 		rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1093 				       MASKDWORD, 0x44000a08);
1094 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1095 				       B_P0_RXCK_VAL, 0x1);
1096 		rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1097 		rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1098 		rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL);
1099 	}
1100 }
1101 
1102 static bool _txk_group_sel(struct rtw89_dev *rtwdev,
1103 			   enum rtw89_phy_idx phy_idx, u8 path)
1104 {
1105 	static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
1106 	static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
1107 	static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b};
1108 	static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12};
1109 	static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1};
1110 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1111 	bool fail = false;
1112 	u8 gp = 0x0;
1113 	u32 tmp = 0x0;
1114 
1115 	for (gp = 0x0; gp < 0x4; gp++) {
1116 		switch (iqk_info->iqk_band[path]) {
1117 		case RTW89_BAND_2G:
1118 			rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1119 					       B_RFGAIN_BND, 0x08);
1120 			rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1121 				       g_txgain[gp]);
1122 			rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1,
1123 				       g_attsmxr[gp]);
1124 			rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0,
1125 				       g_attsmxr[gp]);
1126 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1127 					       MASKDWORD, g_itqt[gp]);
1128 			break;
1129 		case RTW89_BAND_5G:
1130 			rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1131 					       B_RFGAIN_BND, 0x04);
1132 			rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1133 				       a_txgain[gp]);
1134 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1135 					       MASKDWORD, a_itqt[gp]);
1136 			break;
1137 		default:
1138 			break;
1139 		}
1140 		rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1141 		rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1142 		rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1143 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1144 				       B_CFIR_LUT_GP, gp);
1145 		rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1146 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1147 		rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
1148 	}
1149 
1150 	iqk_info->nb_txcfir[path] = 0x40000000;
1151 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1152 			       B_IQK_RES_TXCFIR, 0x5);
1153 	iqk_info->is_wb_txiqk[path] = true;
1154 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1155 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1156 		    BIT(path), tmp);
1157 	return false;
1158 }
1159 
1160 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
1161 		       enum rtw89_phy_idx phy_idx, u8 path)
1162 {
1163 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1164 	u8 group = 0x2;
1165 	u32 a_mode_txgain = 0x64e2;
1166 	u32 g_mode_txgain = 0x61e8;
1167 	u32 attsmxr = 0x1;
1168 	u32 itqt = 0x12;
1169 	u32 tmp = 0x0;
1170 	bool fail = false;
1171 
1172 	switch (iqk_info->iqk_band[path]) {
1173 	case RTW89_BAND_2G:
1174 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1175 				       B_RFGAIN_BND, 0x08);
1176 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain);
1177 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr);
1178 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr);
1179 		break;
1180 	case RTW89_BAND_5G:
1181 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1182 				       B_RFGAIN_BND, 0x04);
1183 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain);
1184 		break;
1185 	default:
1186 		break;
1187 	}
1188 	rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1189 	rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1190 	rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1191 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
1192 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1193 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1194 	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1195 	if (!fail) {
1196 		tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1197 		iqk_info->nb_txcfir[path] = tmp | 0x2;
1198 	} else {
1199 		iqk_info->nb_txcfir[path] = 0x40000002;
1200 	}
1201 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1202 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1203 		    BIT(path), tmp);
1204 	return fail;
1205 }
1206 
1207 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1208 {
1209 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1210 
1211 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1212 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1213 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1214 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1215 	else
1216 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1217 	rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1218 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1219 }
1220 
1221 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1222 {
1223 	bool is_fail = false;
1224 	u32 tmp = 0x0;
1225 	u32 core_i = 0x0;
1226 	u32 core_q = 0x0;
1227 
1228 	tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1229 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n",
1230 		    path, tmp);
1231 	core_i = FIELD_GET(RR_TXMO_COI, tmp);
1232 	core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1233 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i);
1234 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q);
1235 
1236 	if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1237 		is_fail = true;
1238 	return is_fail;
1239 }
1240 
1241 static bool _iqk_lok(struct rtw89_dev *rtwdev,
1242 		     enum rtw89_phy_idx phy_idx, u8 path)
1243 {
1244 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1245 	u32 rf0 = 0x0;
1246 	u8 itqt = 0x12;
1247 	bool fail = false;
1248 	bool tmp = false;
1249 
1250 	switch (iqk_info->iqk_band[path]) {
1251 	case RTW89_BAND_2G:
1252 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0);
1253 		itqt = 0x09;
1254 		break;
1255 	case RTW89_BAND_5G:
1256 		rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0);
1257 		itqt = 0x12;
1258 		break;
1259 	default:
1260 		break;
1261 	}
1262 	rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
1263 	rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1264 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI,
1265 			       rf0 | iqk_info->syn1to2);
1266 	rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1267 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1268 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1);
1269 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0);
1270 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
1271 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1272 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1273 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1274 	iqk_info->lok_cor_fail[0][path] = tmp;
1275 	fsleep(10);
1276 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1277 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1278 	iqk_info->lok_fin_fail[0][path] = tmp;
1279 	fail = _lok_finetune_check(rtwdev, path);
1280 	return fail;
1281 }
1282 
1283 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1284 {
1285 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1286 
1287 	rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1288 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1289 	udelay(1);
1290 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1291 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1292 	udelay(1);
1293 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1294 	udelay(1);
1295 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
1296 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
1297 	switch (iqk_info->iqk_band[path]) {
1298 	case RTW89_BAND_2G:
1299 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00);
1300 		rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1301 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1302 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1);
1303 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1304 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1305 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1306 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1307 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000);
1308 		rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1309 		rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1310 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1311 			       0x403e0 | iqk_info->syn1to2);
1312 		udelay(1);
1313 		break;
1314 	case RTW89_BAND_5G:
1315 		rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1316 		rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1317 		rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7);
1318 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1319 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1320 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1321 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100);
1322 		rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1323 		rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1324 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1);
1325 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0);
1326 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1327 			       0x403e0 | iqk_info->syn1to2);
1328 		udelay(1);
1329 		break;
1330 	default:
1331 		break;
1332 	}
1333 }
1334 
1335 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1336 {
1337 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
1338 }
1339 
1340 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1341 			  u8 path)
1342 {
1343 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1344 	u32 tmp = 0x0;
1345 	bool flag = 0x0;
1346 
1347 	iqk_info->thermal[path] =
1348 		ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1349 	iqk_info->thermal_rek_en = false;
1350 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
1351 		    iqk_info->thermal[path]);
1352 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
1353 		    iqk_info->lok_cor_fail[0][path]);
1354 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
1355 		    iqk_info->lok_fin_fail[0][path]);
1356 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
1357 		    iqk_info->iqk_tx_fail[0][path]);
1358 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
1359 		    iqk_info->iqk_rx_fail[0][path]);
1360 	flag = iqk_info->lok_cor_fail[0][path];
1361 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag);
1362 	flag = iqk_info->lok_fin_fail[0][path];
1363 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag);
1364 	flag = iqk_info->iqk_tx_fail[0][path];
1365 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag);
1366 	flag = iqk_info->iqk_rx_fail[0][path];
1367 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag);
1368 
1369 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1370 	iqk_info->bp_iqkenable[path] = tmp;
1371 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1372 	iqk_info->bp_txkresult[path] = tmp;
1373 	tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1374 	iqk_info->bp_rxkresult[path] = tmp;
1375 
1376 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
1377 			       (u8)iqk_info->iqk_times);
1378 
1379 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4));
1380 	if (tmp != 0x0)
1381 		iqk_info->iqk_fail_cnt++;
1382 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4),
1383 			       iqk_info->iqk_fail_cnt);
1384 }
1385 
1386 static
1387 void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1388 {
1389 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1390 	bool lok_is_fail = false;
1391 	u8 ibias = 0x1;
1392 	u8 i = 0;
1393 
1394 	_iqk_txclk_setting(rtwdev, path);
1395 
1396 	for (i = 0; i < 3; i++) {
1397 		_lok_res_table(rtwdev, path, ibias++);
1398 		_iqk_txk_setting(rtwdev, path);
1399 		lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1400 		if (!lok_is_fail)
1401 			break;
1402 	}
1403 	if (iqk_info->is_nbiqk)
1404 		iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1405 	else
1406 		iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1407 
1408 	_iqk_rxclk_setting(rtwdev, path);
1409 	_iqk_rxk_setting(rtwdev, path);
1410 	if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
1411 		iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1412 	else
1413 		iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1414 
1415 	_iqk_info_iqk(rtwdev, phy_idx, path);
1416 }
1417 
1418 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
1419 			     enum rtw89_phy_idx phy, u8 path)
1420 {
1421 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1422 	struct rtw89_hal *hal = &rtwdev->hal;
1423 	u32 reg_rf18 = 0x0, reg_35c = 0x0;
1424 	u8 idx = 0;
1425 	u8 get_empty_table = false;
1426 
1427 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1428 	for  (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1429 		if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1430 			get_empty_table = true;
1431 			break;
1432 		}
1433 	}
1434 	if (!get_empty_table) {
1435 		idx = iqk_info->iqk_table_idx[path] + 1;
1436 		if (idx > RTW89_IQK_CHS_NR - 1)
1437 			idx = 0;
1438 	}
1439 	reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1440 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
1441 	reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
1442 
1443 	iqk_info->iqk_band[path] = hal->current_band_type;
1444 	iqk_info->iqk_bw[path] = hal->current_band_width;
1445 	iqk_info->iqk_ch[path] = hal->current_channel;
1446 
1447 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1448 		    "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
1449 		    iqk_info->iqk_band[path]);
1450 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
1451 		    path, iqk_info->iqk_bw[path]);
1452 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
1453 		    path, iqk_info->iqk_ch[path]);
1454 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1455 		    "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
1456 		    rtwdev->dbcc_en ? "on" : "off",
1457 		    iqk_info->iqk_band[path] == 0 ? "2G" :
1458 		    iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1459 		    iqk_info->iqk_ch[path],
1460 		    iqk_info->iqk_bw[path] == 0 ? "20M" :
1461 		    iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1462 	if (reg_35c == 0x01)
1463 		iqk_info->syn1to2 = 0x1;
1464 	else
1465 		iqk_info->syn1to2 = 0x0;
1466 
1467 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER);
1468 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16),
1469 			       (u8)iqk_info->iqk_band[path]);
1470 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16),
1471 			       (u8)iqk_info->iqk_bw[path]);
1472 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16),
1473 			       (u8)iqk_info->iqk_ch[path]);
1474 
1475 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER);
1476 }
1477 
1478 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1479 			   u8 path)
1480 {
1481 	_iqk_by_path(rtwdev, phy_idx, path);
1482 }
1483 
1484 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1485 {
1486 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1487 
1488 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1489 			       iqk_info->nb_txcfir[path]);
1490 	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1491 			       iqk_info->nb_rxcfir[path]);
1492 	rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
1493 	rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD);
1494 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1495 	rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD);
1496 	rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR);
1497 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
1498 	rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
1499 	rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4);
1500 	rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1501 	rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW);
1502 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002);
1503 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1504 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0);
1505 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1506 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1507 	rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0);
1508 	rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0);
1509 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1510 }
1511 
1512 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1513 			       enum rtw89_phy_idx phy_idx, u8 path)
1514 {
1515 	const struct rtw89_rfk_tbl *tbl;
1516 
1517 	switch (_kpath(rtwdev, phy_idx)) {
1518 	case RF_A:
1519 		tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl;
1520 		break;
1521 	case RF_B:
1522 		tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl;
1523 		break;
1524 	default:
1525 		tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl;
1526 		break;
1527 	}
1528 
1529 	rtw89_rfk_parser(rtwdev, tbl);
1530 }
1531 
1532 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1533 {
1534 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1535 	u8 idx = iqk_info->iqk_table_idx[path];
1536 
1537 	if (rtwdev->dbcc_en) {
1538 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1539 				       B_COEF_SEL_IQC, path & 0x1);
1540 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1541 				       B_CFIR_LUT_G2, path & 0x1);
1542 	} else {
1543 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1544 				       B_COEF_SEL_IQC, idx);
1545 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1546 				       B_CFIR_LUT_G2, idx);
1547 	}
1548 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1549 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1550 	rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD);
1551 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1552 	rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000);
1553 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
1554 	rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD);
1555 }
1556 
1557 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1558 			       enum rtw89_phy_idx phy_idx, u8 path)
1559 {
1560 	const struct rtw89_rfk_tbl *tbl;
1561 
1562 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
1563 
1564 	switch (_kpath(rtwdev, phy_idx)) {
1565 	case RF_A:
1566 		tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl;
1567 		break;
1568 	case RF_B:
1569 		tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl;
1570 		break;
1571 	default:
1572 		tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl;
1573 		break;
1574 	}
1575 
1576 	rtw89_rfk_parser(rtwdev, tbl);
1577 }
1578 
1579 static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
1580 {
1581 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1582 	u8 phy_idx = 0x0;
1583 
1584 	iqk_info->iqk_times++;
1585 
1586 	if (path == 0x0)
1587 		phy_idx = RTW89_PHY_0;
1588 	else
1589 		phy_idx = RTW89_PHY_1;
1590 
1591 	_iqk_get_ch_info(rtwdev, phy_idx, path);
1592 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1593 	_iqk_preset(rtwdev, path);
1594 	_iqk_start_iqk(rtwdev, phy_idx, path);
1595 	_iqk_restore(rtwdev, path);
1596 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1597 }
1598 
1599 static void _iqk_track(struct rtw89_dev *rtwdev)
1600 {
1601 	struct rtw89_iqk_info *iqk = &rtwdev->iqk;
1602 	u8 path = 0x0;
1603 	u8 cur_ther;
1604 
1605 	if (iqk->iqk_band[0] == RTW89_BAND_2G)
1606 		return;
1607 	if (iqk->iqk_bw[0] < RTW89_CHANNEL_WIDTH_80)
1608 		return;
1609 
1610 	/* only check path 0 */
1611 	for (path = 0; path < 1; path++) {
1612 		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1613 
1614 		if (abs(cur_ther - iqk->thermal[path]) > RTW8852A_IQK_THR_REK)
1615 			iqk->thermal_rek_en = true;
1616 		else
1617 			iqk->thermal_rek_en = false;
1618 	}
1619 }
1620 
1621 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1622 {
1623 	u32 rf_reg5, rck_val = 0;
1624 	u32 val;
1625 	int ret;
1626 
1627 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1628 
1629 	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1630 
1631 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1632 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1633 
1634 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
1635 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1636 
1637 	/* RCK trigger */
1638 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1639 
1640 	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
1641 				       false, rtwdev, path, 0x1c, BIT(3));
1642 	if (ret)
1643 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
1644 
1645 	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1646 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1647 
1648 	/* RCK_ADC_OFFSET */
1649 	rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4);
1650 
1651 	rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1);
1652 	rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0);
1653 
1654 	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1655 
1656 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1657 		    "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n",
1658 		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
1659 		    rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK),
1660 		    rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK));
1661 }
1662 
1663 static void _iqk_init(struct rtw89_dev *rtwdev)
1664 {
1665 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1666 	u8 ch, path;
1667 
1668 	rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
1669 	if (iqk_info->is_iqk_init)
1670 		return;
1671 
1672 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1673 	iqk_info->is_iqk_init = true;
1674 	iqk_info->is_nbiqk = false;
1675 	iqk_info->iqk_fft_en = false;
1676 	iqk_info->iqk_sram_en = false;
1677 	iqk_info->iqk_cfir_en = false;
1678 	iqk_info->iqk_xym_en = false;
1679 	iqk_info->thermal_rek_en = false;
1680 	iqk_info->iqk_times = 0x0;
1681 
1682 	for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
1683 		iqk_info->iqk_channel[ch] = 0x0;
1684 		for (path = 0; path < RTW8852A_IQK_SS; path++) {
1685 			iqk_info->lok_cor_fail[ch][path] = false;
1686 			iqk_info->lok_fin_fail[ch][path] = false;
1687 			iqk_info->iqk_tx_fail[ch][path] = false;
1688 			iqk_info->iqk_rx_fail[ch][path] = false;
1689 			iqk_info->iqk_mcc_ch[ch][path] = 0x0;
1690 			iqk_info->iqk_table_idx[path] = 0x0;
1691 		}
1692 	}
1693 }
1694 
1695 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1696 		   enum rtw89_phy_idx phy_idx, u8 path)
1697 {
1698 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1699 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1700 	u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
1701 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1702 
1703 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1704 
1705 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1706 		    "[IQK]==========IQK strat!!!!!==========\n");
1707 	iqk_info->iqk_times++;
1708 	iqk_info->kcount = 0;
1709 	iqk_info->version = RTW8852A_IQK_VER;
1710 
1711 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1712 	_iqk_get_ch_info(rtwdev, phy_idx, path);
1713 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1714 	_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1715 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1716 	_iqk_preset(rtwdev, path);
1717 	_iqk_start_iqk(rtwdev, phy_idx, path);
1718 	_iqk_restore(rtwdev, path);
1719 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1720 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1721 	_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1722 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1723 }
1724 
1725 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1726 {
1727 	switch (_kpath(rtwdev, phy_idx)) {
1728 	case RF_A:
1729 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1730 		break;
1731 	case RF_B:
1732 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1733 		break;
1734 	case RF_AB:
1735 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1736 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1737 		break;
1738 	default:
1739 		break;
1740 	}
1741 }
1742 
1743 #define RXDCK_VER_8852A 0xe
1744 
1745 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1746 			enum rtw89_rf_path path, bool is_afe)
1747 {
1748 	u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
1749 	u32 ori_val;
1750 
1751 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1752 		    "[RX_DCK] ==== S%d RX DCK (by %s)====\n",
1753 		    path, is_afe ? "AFE" : "RFC");
1754 
1755 	ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD);
1756 
1757 	if (is_afe) {
1758 		rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1759 		rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1760 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1761 				       B_P0_RXCK_VAL, 0x3);
1762 		rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN);
1763 		rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13),
1764 				       B_S0_RXDC2_AVG, 0x3);
1765 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
1766 		rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK);
1767 		rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1768 		rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1769 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1);
1770 	}
1771 
1772 	rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f);
1773 	rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe);
1774 
1775 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START);
1776 
1777 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1778 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1779 
1780 	fsleep(600);
1781 
1782 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP);
1783 
1784 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1785 
1786 	if (is_afe) {
1787 		rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1788 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1789 				       MASKDWORD, ori_val);
1790 	}
1791 }
1792 
1793 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1794 		    bool is_afe)
1795 {
1796 	u8 path, kpath, dck_tune;
1797 	u32 rf_reg5;
1798 	u32 addr;
1799 
1800 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1801 		    "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
1802 		    RXDCK_VER_8852A, rtwdev->hal.cv);
1803 
1804 	kpath = _kpath(rtwdev, phy);
1805 
1806 	for (path = 0; path < 2; path++) {
1807 		if (!(kpath & BIT(path)))
1808 			continue;
1809 
1810 		rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1811 		dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
1812 
1813 		if (rtwdev->is_tssi_mode[path]) {
1814 			addr = 0x5818 + (path << 13);
1815 			/* TSSI pause */
1816 			rtw89_phy_write32_set(rtwdev, addr, BIT(30));
1817 		}
1818 
1819 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1820 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
1821 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1822 		_set_rx_dck(rtwdev, phy, path, is_afe);
1823 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
1824 		rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1825 
1826 		if (rtwdev->is_tssi_mode[path]) {
1827 			addr = 0x5818 + (path << 13);
1828 			/* TSSI resume */
1829 			rtw89_phy_write32_clr(rtwdev, addr, BIT(30));
1830 		}
1831 	}
1832 }
1833 
1834 #define RTW8852A_RF_REL_VERSION 34
1835 #define RTW8852A_DPK_VER 0x10
1836 #define RTW8852A_DPK_TH_AVG_NUM 4
1837 #define RTW8852A_DPK_RF_PATH 2
1838 #define RTW8852A_DPK_KIP_REG_NUM 2
1839 
1840 enum rtw8852a_dpk_id {
1841 	LBK_RXIQK	= 0x06,
1842 	SYNC		= 0x10,
1843 	MDPK_IDL	= 0x11,
1844 	MDPK_MPA	= 0x12,
1845 	GAIN_LOSS	= 0x13,
1846 	GAIN_CAL	= 0x14,
1847 };
1848 
1849 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
1850 			     enum rtw89_rf_path path, bool is_bybb)
1851 {
1852 	if (is_bybb)
1853 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1854 	else
1855 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1856 }
1857 
1858 static void _dpk_onoff(struct rtw89_dev *rtwdev,
1859 		       enum rtw89_rf_path path, bool off);
1860 
1861 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg,
1862 			  u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],
1863 			  u8 path)
1864 {
1865 	u8 i;
1866 
1867 	for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1868 		reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev,
1869 							  reg[i] + (path << 8),
1870 							  MASKDWORD);
1871 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1872 			    reg[i] + (path << 8), reg_bkup[path][i]);
1873 	}
1874 }
1875 
1876 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
1877 			    u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path)
1878 {
1879 	u8 i;
1880 
1881 	for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1882 		rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
1883 				       MASKDWORD, reg_bkup[path][i]);
1884 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1885 			    reg[i] + (path << 8), reg_bkup[path][i]);
1886 	}
1887 }
1888 
1889 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1890 			enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
1891 {
1892 	u8 phy_map  = rtw89_btc_path_phymap(rtwdev, phy, path);
1893 	u16 dpk_cmd = 0x0;
1894 	u32 val;
1895 	int ret;
1896 
1897 	dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4)));
1898 
1899 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START);
1900 
1901 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1902 	rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
1903 
1904 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1905 				       10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
1906 
1907 	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
1908 
1909 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP);
1910 
1911 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1912 		    "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
1913 		    id == 0x06 ? "LBK_RXIQK" :
1914 		    id == 0x10 ? "SYNC" :
1915 		    id == 0x11 ? "MDPK_IDL" :
1916 		    id == 0x12 ? "MDPK_MPA" :
1917 		    id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
1918 		    dpk_cmd, ret);
1919 
1920 	if (ret) {
1921 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1922 			    "[DPK] one-shot over 20ms!!!!\n");
1923 		return 1;
1924 	}
1925 
1926 	return 0;
1927 }
1928 
1929 static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
1930 			enum rtw89_phy_idx phy,
1931 			enum rtw89_rf_path path)
1932 {
1933 	rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1934 	_set_rx_dck(rtwdev, phy, path, false);
1935 }
1936 
1937 static void _dpk_information(struct rtw89_dev *rtwdev,
1938 			     enum rtw89_phy_idx phy,
1939 			     enum rtw89_rf_path path)
1940 {
1941 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1942 	struct rtw89_hal *hal = &rtwdev->hal;
1943 
1944 	u8 kidx = dpk->cur_idx[path];
1945 
1946 	dpk->bp[path][kidx].band = hal->current_band_type;
1947 	dpk->bp[path][kidx].ch = hal->current_channel;
1948 	dpk->bp[path][kidx].bw = hal->current_band_width;
1949 
1950 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1951 		    "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1952 		    path, dpk->cur_idx[path], phy,
1953 		    rtwdev->is_tssi_mode[path] ? "on" : "off",
1954 		    rtwdev->dbcc_en ? "on" : "off",
1955 		    dpk->bp[path][kidx].band == 0 ? "2G" :
1956 		    dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1957 		    dpk->bp[path][kidx].ch,
1958 		    dpk->bp[path][kidx].bw == 0 ? "20M" :
1959 		    dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1960 }
1961 
1962 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1963 				enum rtw89_phy_idx phy,
1964 				enum rtw89_rf_path path, u8 kpath)
1965 {
1966 	switch (kpath) {
1967 	case RF_A:
1968 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl);
1969 
1970 		if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0)
1971 			rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1972 
1973 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl);
1974 		break;
1975 	case RF_B:
1976 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl);
1977 
1978 		if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1)
1979 			rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1980 
1981 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl);
1982 		break;
1983 	case RF_AB:
1984 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl);
1985 		break;
1986 	default:
1987 		break;
1988 	}
1989 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1990 		    "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1991 }
1992 
1993 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1994 				enum rtw89_phy_idx phy,
1995 				enum rtw89_rf_path path, u8 kpath)
1996 {
1997 	switch (kpath) {
1998 	case RF_A:
1999 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl);
2000 		break;
2001 	case RF_B:
2002 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl);
2003 		break;
2004 	case RF_AB:
2005 		rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl);
2006 		break;
2007 	default:
2008 		break;
2009 	}
2010 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2011 		    "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
2012 }
2013 
2014 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
2015 			    enum rtw89_rf_path path, bool is_pause)
2016 {
2017 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
2018 			       B_P0_TSSI_TRK_EN, is_pause);
2019 
2020 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
2021 		    is_pause ? "pause" : "resume");
2022 }
2023 
2024 static void _dpk_kip_setting(struct rtw89_dev *rtwdev,
2025 			     enum rtw89_rf_path path, u8 kidx)
2026 {
2027 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2028 	rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f);
2029 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2030 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2031 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2);
2032 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); /*subpage_id*/
2033 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2),
2034 			       MASKDWORD, 0x003f2e2e);
2035 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2036 			       MASKDWORD, 0x005b5b5b);
2037 
2038 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n",
2039 		    path, kidx);
2040 }
2041 
2042 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
2043 			     enum rtw89_rf_path path)
2044 {
2045 	rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
2046 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
2047 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
2048 	rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD);
2049 
2050 	if (rtwdev->hal.cv > CHIP_CBV)
2051 		rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1);
2052 
2053 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
2054 }
2055 
2056 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
2057 			   enum rtw89_phy_idx phy,
2058 			   enum rtw89_rf_path path)
2059 {
2060 	u8 cur_rxbb;
2061 
2062 	cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2063 
2064 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl);
2065 
2066 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
2067 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
2068 	rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2);
2069 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK,
2070 		       rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK));
2071 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
2072 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
2073 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
2074 
2075 	fsleep(70);
2076 
2077 	rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f);
2078 
2079 	if (cur_rxbb <= 0xa)
2080 		rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3);
2081 	else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb)
2082 		rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1);
2083 	else
2084 		rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0);
2085 
2086 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
2087 
2088 	_dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
2089 
2090 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
2091 		    rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
2092 
2093 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
2094 	rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0);
2095 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); /*POW IQKPLL*/
2096 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK);
2097 
2098 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl);
2099 }
2100 
2101 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx,
2102 			     enum rtw89_rf_path path)
2103 {
2104 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2105 
2106 	dpk->bp[path][kidx].ther_dpk =
2107 		ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2108 
2109 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
2110 		    dpk->bp[path][kidx].ther_dpk);
2111 }
2112 
2113 static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain,
2114 			  enum rtw89_rf_path path)
2115 {
2116 	u8 txagc_ori = 0x38;
2117 
2118 	rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori);
2119 
2120 	return txagc_ori;
2121 }
2122 
2123 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
2124 			    enum rtw89_rf_path path, u8 kidx)
2125 {
2126 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2127 
2128 	if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
2129 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b);
2130 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0);
2131 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
2132 		rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0);
2133 	} else {
2134 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e);
2135 		rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7);
2136 		rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3);
2137 		rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3);
2138 	}
2139 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
2140 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
2141 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
2142 
2143 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2144 		    "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n",
2145 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
2146 		    rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK),
2147 		    rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
2148 }
2149 
2150 static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev,
2151 			       enum rtw89_rf_path path, bool is_manual)
2152 {
2153 	u8 tmp_pad, tmp_txbb;
2154 
2155 	if (is_manual) {
2156 		rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1);
2157 		tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD);
2158 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2159 				       B_RFGAIN_PAD, tmp_pad);
2160 
2161 		tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB);
2162 		rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2163 				       B_RFGAIN_TXBB, tmp_txbb);
2164 
2165 		rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8),
2166 				       B_LOAD_COEF_CFIR, 0x1);
2167 		rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8),
2168 				      B_LOAD_COEF_CFIR);
2169 
2170 		rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1);
2171 
2172 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2173 			    "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad,
2174 			    tmp_txbb);
2175 	} else {
2176 		rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
2177 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2178 			    "[DPK] disable manual switch TXCFIR\n");
2179 	}
2180 }
2181 
2182 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
2183 			       enum rtw89_rf_path path, bool is_bypass)
2184 {
2185 	if (is_bypass) {
2186 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2187 				       B_RXIQC_BYPASS2, 0x1);
2188 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2189 				       B_RXIQC_BYPASS, 0x1);
2190 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2191 			    "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
2192 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2193 						  MASKDWORD));
2194 	} else {
2195 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
2196 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
2197 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2198 			    "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
2199 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2200 						  MASKDWORD));
2201 	}
2202 }
2203 
2204 static
2205 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2206 {
2207 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2208 
2209 	if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2210 		rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
2211 	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
2212 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2213 	else
2214 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2215 
2216 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2217 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2218 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2219 }
2220 
2221 static void _dpk_table_select(struct rtw89_dev *rtwdev,
2222 			      enum rtw89_rf_path path, u8 kidx, u8 gain)
2223 {
2224 	u8 val;
2225 
2226 	val = 0x80 + kidx * 0x20 + gain * 0x10;
2227 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
2228 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2229 		    "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2230 		    gain, val);
2231 }
2232 
2233 static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
2234 			    enum rtw89_rf_path path)
2235 {
2236 #define DPK_SYNC_TH_DC_I 200
2237 #define DPK_SYNC_TH_DC_Q 200
2238 #define DPK_SYNC_TH_CORR 170
2239 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2240 	u16 dc_i, dc_q;
2241 	u8 corr_val, corr_idx;
2242 
2243 	rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2244 
2245 	corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2246 	corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2247 
2248 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2249 		    "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
2250 		    corr_val);
2251 
2252 	dpk->corr_idx[path] = corr_idx;
2253 	dpk->corr_val[path] = corr_val;
2254 
2255 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2256 
2257 	dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2258 	dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2259 
2260 	dc_i = abs(sign_extend32(dc_i, 11));
2261 	dc_q = abs(sign_extend32(dc_q, 11));
2262 
2263 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2264 		    path, dc_i, dc_q);
2265 
2266 	dpk->dc_i[path] = dc_i;
2267 	dpk->dc_q[path] = dc_q;
2268 
2269 	if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2270 	    corr_val < DPK_SYNC_TH_CORR)
2271 		return true;
2272 	else
2273 		return false;
2274 }
2275 
2276 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2277 		      enum rtw89_rf_path path, u8 kidx)
2278 {
2279 	_dpk_tpg_sel(rtwdev, path, kidx);
2280 	_dpk_one_shot(rtwdev, phy, path, SYNC);
2281 	return _dpk_sync_check(rtwdev, path); /*1= fail*/
2282 }
2283 
2284 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2285 {
2286 	u16 dgain = 0x0;
2287 
2288 	rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2289 
2290 	rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2291 
2292 	dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2293 
2294 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain,
2295 		    dgain);
2296 
2297 	return dgain;
2298 }
2299 
2300 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2301 {
2302 	s8 offset;
2303 
2304 	if (dgain >= 0x783)
2305 		offset = 0x6;
2306 	else if (dgain <= 0x782 && dgain >= 0x551)
2307 		offset = 0x3;
2308 	else if (dgain <= 0x550 && dgain >= 0x3c4)
2309 		offset = 0x0;
2310 	else if (dgain <= 0x3c3 && dgain >= 0x2aa)
2311 		offset = -3;
2312 	else if (dgain <= 0x2a9 && dgain >= 0x1e3)
2313 		offset = -6;
2314 	else if (dgain <= 0x1e2 && dgain >= 0x156)
2315 		offset = -9;
2316 	else if (dgain <= 0x155)
2317 		offset = -12;
2318 	else
2319 		offset = 0x0;
2320 
2321 	return offset;
2322 }
2323 
2324 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2325 {
2326 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2327 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2328 	return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2329 }
2330 
2331 static void _dpk_gainloss(struct rtw89_dev *rtwdev,
2332 			  enum rtw89_phy_idx phy, enum rtw89_rf_path path,
2333 			  u8 kidx)
2334 {
2335 	_dpk_table_select(rtwdev, path, kidx, 1);
2336 	_dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2337 }
2338 
2339 #define DPK_TXAGC_LOWER 0x2e
2340 #define DPK_TXAGC_UPPER 0x3f
2341 #define DPK_TXAGC_INVAL 0xff
2342 
2343 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev,
2344 			  enum rtw89_rf_path path, s8 gain_offset)
2345 {
2346 	u8 txagc;
2347 
2348 	txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK);
2349 
2350 	if (txagc - gain_offset < DPK_TXAGC_LOWER)
2351 		txagc = DPK_TXAGC_LOWER;
2352 	else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2353 		txagc = DPK_TXAGC_UPPER;
2354 	else
2355 		txagc = txagc - gain_offset;
2356 
2357 	rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc);
2358 
2359 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2360 		    gain_offset, txagc);
2361 	return txagc;
2362 }
2363 
2364 enum dpk_agc_step {
2365 	DPK_AGC_STEP_SYNC_DGAIN,
2366 	DPK_AGC_STEP_GAIN_ADJ,
2367 	DPK_AGC_STEP_GAIN_LOSS_IDX,
2368 	DPK_AGC_STEP_GL_GT_CRITERION,
2369 	DPK_AGC_STEP_GL_LT_CRITERION,
2370 	DPK_AGC_STEP_SET_TX_GAIN,
2371 };
2372 
2373 static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2374 {
2375 	u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2376 	u8 i;
2377 
2378 	rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl);
2379 
2380 	if (is_check) {
2381 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2382 		val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2383 		val1_i = abs(sign_extend32(val1_i, 11));
2384 		val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2385 		val1_q = abs(sign_extend32(val1_q, 11));
2386 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2387 		val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2388 		val2_i = abs(sign_extend32(val2_i, 11));
2389 		val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2390 		val2_q = abs(sign_extend32(val2_q, 11));
2391 
2392 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2393 			    (val1_i * val1_i + val1_q * val1_q) /
2394 			    (val2_i * val2_i + val2_q * val2_q));
2395 
2396 	} else {
2397 		for (i = 0; i < 32; i++) {
2398 			rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2399 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2400 				    "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2401 				    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2402 		}
2403 	}
2404 	if ((val1_i * val1_i + val1_q * val1_q) >=
2405 	    ((val2_i * val2_i + val2_q * val2_q) * 8 / 5))
2406 		return 1;
2407 	else
2408 		return 0;
2409 }
2410 
2411 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2412 		   enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2413 		   bool loss_only)
2414 {
2415 #define DPK_AGC_ADJ_LMT 6
2416 #define DPK_DGAIN_UPPER 1922
2417 #define DPK_DGAIN_LOWER 342
2418 #define DPK_RXBB_UPPER 0x1f
2419 #define DPK_RXBB_LOWER 0
2420 #define DPK_GL_CRIT 7
2421 	u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2422 	u8 agc_cnt = 0;
2423 	bool limited_rxbb = false;
2424 	s8 offset = 0;
2425 	u16 dgain = 0;
2426 	u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2427 	bool goout = false;
2428 
2429 	tmp_txagc = init_txagc;
2430 
2431 	do {
2432 		switch (step) {
2433 		case DPK_AGC_STEP_SYNC_DGAIN:
2434 			if (_dpk_sync(rtwdev, phy, path, kidx)) {
2435 				tmp_txagc = DPK_TXAGC_INVAL;
2436 				goout = true;
2437 				break;
2438 			}
2439 
2440 			dgain = _dpk_dgain_read(rtwdev);
2441 
2442 			if (loss_only || limited_rxbb)
2443 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2444 			else
2445 				step = DPK_AGC_STEP_GAIN_ADJ;
2446 			break;
2447 
2448 		case DPK_AGC_STEP_GAIN_ADJ:
2449 			tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2450 			offset = _dpk_dgain_mapping(rtwdev, dgain);
2451 
2452 			if (tmp_rxbb + offset > DPK_RXBB_UPPER) {
2453 				tmp_rxbb = DPK_RXBB_UPPER;
2454 				limited_rxbb = true;
2455 			} else if (tmp_rxbb + offset < DPK_RXBB_LOWER) {
2456 				tmp_rxbb = DPK_RXBB_LOWER;
2457 				limited_rxbb = true;
2458 			} else {
2459 				tmp_rxbb = tmp_rxbb + offset;
2460 			}
2461 
2462 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
2463 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2464 				    "[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
2465 				    tmp_rxbb);
2466 			if (offset != 0 || agc_cnt == 0) {
2467 				if (rtwdev->hal.current_band_width < RTW89_CHANNEL_WIDTH_80)
2468 					_dpk_bypass_rxcfir(rtwdev, path, true);
2469 				else
2470 					_dpk_lbk_rxiqk(rtwdev, phy, path);
2471 			}
2472 			if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
2473 				step = DPK_AGC_STEP_SYNC_DGAIN;
2474 			else
2475 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2476 
2477 			agc_cnt++;
2478 			break;
2479 
2480 		case DPK_AGC_STEP_GAIN_LOSS_IDX:
2481 			_dpk_gainloss(rtwdev, phy, path, kidx);
2482 			tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2483 
2484 			if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2485 			    tmp_gl_idx > DPK_GL_CRIT)
2486 				step = DPK_AGC_STEP_GL_GT_CRITERION;
2487 			else if (tmp_gl_idx == 0)
2488 				step = DPK_AGC_STEP_GL_LT_CRITERION;
2489 			else
2490 				step = DPK_AGC_STEP_SET_TX_GAIN;
2491 			break;
2492 
2493 		case DPK_AGC_STEP_GL_GT_CRITERION:
2494 			if (tmp_txagc == DPK_TXAGC_LOWER) {
2495 				goout = true;
2496 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2497 					    "[DPK] Txagc@lower bound!!\n");
2498 			} else {
2499 				tmp_txagc = _dpk_set_offset(rtwdev, path, 3);
2500 			}
2501 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2502 			agc_cnt++;
2503 			break;
2504 
2505 		case DPK_AGC_STEP_GL_LT_CRITERION:
2506 			if (tmp_txagc == DPK_TXAGC_UPPER) {
2507 				goout = true;
2508 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2509 					    "[DPK] Txagc@upper bound!!\n");
2510 			} else {
2511 				tmp_txagc = _dpk_set_offset(rtwdev, path, -2);
2512 			}
2513 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2514 			agc_cnt++;
2515 			break;
2516 
2517 		case DPK_AGC_STEP_SET_TX_GAIN:
2518 			tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx);
2519 			goout = true;
2520 			agc_cnt++;
2521 			break;
2522 
2523 		default:
2524 			goout = true;
2525 			break;
2526 		}
2527 	} while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT));
2528 
2529 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2530 		    "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2531 		    tmp_rxbb);
2532 
2533 	return tmp_txagc;
2534 }
2535 
2536 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2537 {
2538 	switch (order) {
2539 	case 0:
2540 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2541 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2542 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2543 		break;
2544 	case 1:
2545 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2546 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2547 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2548 		break;
2549 	case 2:
2550 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2551 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2552 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2553 		break;
2554 	default:
2555 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2556 			    "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2557 		break;
2558 	}
2559 
2560 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2561 		    "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2562 }
2563 
2564 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2565 			 enum rtw89_rf_path path, u8 kidx, u8 gain)
2566 {
2567 	_dpk_set_mdpd_para(rtwdev, 0x0);
2568 	_dpk_table_select(rtwdev, path, kidx, 1);
2569 	_dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2570 }
2571 
2572 static void _dpk_fill_result(struct rtw89_dev *rtwdev,
2573 			     enum rtw89_rf_path path, u8 kidx, u8 gain,
2574 			     u8 txagc)
2575 {
2576 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2577 
2578 	u16 pwsf = 0x78;
2579 	u8 gs = 0x5b;
2580 
2581 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
2582 
2583 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2584 		    "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2585 		    pwsf, gs);
2586 
2587 	dpk->bp[path][kidx].txagc_dpk = txagc;
2588 	rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2589 			       0x3F << ((gain << 3) + (kidx << 4)), txagc);
2590 
2591 	dpk->bp[path][kidx].pwsf = pwsf;
2592 	rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2593 			       0x1FF << (gain << 4), pwsf);
2594 
2595 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2596 	rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD);
2597 
2598 	dpk->bp[path][kidx].gs = gs;
2599 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2600 			       MASKDWORD, 0x065b5b5b);
2601 
2602 	rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD);
2603 
2604 	rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL);
2605 }
2606 
2607 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2608 			      enum rtw89_rf_path path)
2609 {
2610 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2611 	bool is_reload = false;
2612 	u8 idx, cur_band, cur_ch;
2613 
2614 	cur_band = rtwdev->hal.current_band_type;
2615 	cur_ch = rtwdev->hal.current_channel;
2616 
2617 	for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2618 		if (cur_band != dpk->bp[path][idx].band ||
2619 		    cur_ch != dpk->bp[path][idx].ch)
2620 			continue;
2621 
2622 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2623 				       B_COEF_SEL_MDPD, idx);
2624 		dpk->cur_idx[path] = idx;
2625 		is_reload = true;
2626 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2627 			    "[DPK] reload S%d[%d] success\n", path, idx);
2628 	}
2629 
2630 	return is_reload;
2631 }
2632 
2633 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2634 		      enum rtw89_rf_path path, u8 gain)
2635 {
2636 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2637 	u8 txagc = 0, kidx = dpk->cur_idx[path];
2638 	bool is_fail = false;
2639 
2640 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2641 		    "[DPK] ========= S%d[%d] DPK Start =========\n", path,
2642 		    kidx);
2643 
2644 	_rf_direct_cntrl(rtwdev, path, false);
2645 	txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
2646 	_dpk_rf_setting(rtwdev, gain, path, kidx);
2647 	_dpk_rx_dck(rtwdev, phy, path);
2648 
2649 	_dpk_kip_setting(rtwdev, path, kidx);
2650 	_dpk_manual_txcfir(rtwdev, path, true);
2651 	txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
2652 	if (txagc == DPK_TXAGC_INVAL)
2653 		is_fail = true;
2654 	_dpk_get_thermal(rtwdev, kidx, path);
2655 
2656 	_dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2657 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2658 	_dpk_fill_result(rtwdev, path, kidx, gain, txagc);
2659 	_dpk_manual_txcfir(rtwdev, path, false);
2660 
2661 	if (!is_fail)
2662 		dpk->bp[path][kidx].path_ok = true;
2663 	else
2664 		dpk->bp[path][kidx].path_ok = false;
2665 
2666 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2667 		    is_fail ? "Check" : "Success");
2668 
2669 	return is_fail;
2670 }
2671 
2672 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2673 			    enum rtw89_phy_idx phy, u8 kpath)
2674 {
2675 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2676 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
2677 	u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2678 	u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}};
2679 	u32 kip_reg[] = {R_RXIQC, R_IQK_RES};
2680 	u8 path;
2681 	bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false};
2682 
2683 	if (dpk->is_dpk_reload_en) {
2684 		for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2685 			if (!(kpath & BIT(path)))
2686 				continue;
2687 
2688 			reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2689 			if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2690 				dpk->cur_idx[path] = !dpk->cur_idx[path];
2691 			else
2692 				_dpk_onoff(rtwdev, path, false);
2693 		}
2694 	} else {
2695 		for (path = 0; path < RTW8852A_DPK_RF_PATH; path++)
2696 			dpk->cur_idx[path] = 0;
2697 	}
2698 
2699 	if ((kpath == RF_A && reloaded[RF_PATH_A]) ||
2700 	    (kpath == RF_B && reloaded[RF_PATH_B]) ||
2701 	    (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B]))
2702 		return;
2703 
2704 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2705 
2706 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2707 		if (!(kpath & BIT(path)) || reloaded[path])
2708 			continue;
2709 		if (rtwdev->is_tssi_mode[path])
2710 			_dpk_tssi_pause(rtwdev, path, true);
2711 		_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2712 		_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2713 		_dpk_information(rtwdev, phy, path);
2714 	}
2715 
2716 	_dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2717 
2718 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2719 		if (!(kpath & BIT(path)) || reloaded[path])
2720 			continue;
2721 
2722 		is_fail = _dpk_main(rtwdev, phy, path, 1);
2723 		_dpk_onoff(rtwdev, path, is_fail);
2724 	}
2725 
2726 	_dpk_bb_afe_restore(rtwdev, phy, path, kpath);
2727 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2728 
2729 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2730 		if (!(kpath & BIT(path)) || reloaded[path])
2731 			continue;
2732 
2733 		_dpk_kip_restore(rtwdev, path);
2734 		_dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2735 		_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2736 		if (rtwdev->is_tssi_mode[path])
2737 			_dpk_tssi_pause(rtwdev, path, false);
2738 	}
2739 }
2740 
2741 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2742 {
2743 	struct rtw89_fem_info *fem = &rtwdev->fem;
2744 
2745 	if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
2746 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2747 			    "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2748 		return true;
2749 	} else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
2750 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2751 			    "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2752 		return true;
2753 	}
2754 
2755 	return false;
2756 }
2757 
2758 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2759 {
2760 	u8 path, kpath;
2761 
2762 	kpath = _kpath(rtwdev, phy);
2763 
2764 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2765 		if (kpath & BIT(path))
2766 			_dpk_onoff(rtwdev, path, true);
2767 	}
2768 }
2769 
2770 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2771 {
2772 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2773 		    "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2774 		    RTW8852A_DPK_VER, rtwdev->hal.cv,
2775 		    RTW8852A_RF_REL_VERSION);
2776 
2777 	if (_dpk_bypass_check(rtwdev, phy))
2778 		_dpk_force_bypass(rtwdev, phy);
2779 	else
2780 		_dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
2781 }
2782 
2783 static void _dpk_onoff(struct rtw89_dev *rtwdev,
2784 		       enum rtw89_rf_path path, bool off)
2785 {
2786 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2787 	u8 val, kidx = dpk->cur_idx[path];
2788 
2789 	val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
2790 
2791 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2792 			       MASKBYTE3, 0x6 | val);
2793 
2794 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
2795 		    kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
2796 }
2797 
2798 static void _dpk_track(struct rtw89_dev *rtwdev)
2799 {
2800 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2801 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2802 	u8 path, kidx;
2803 	u8 trk_idx = 0, txagc_rf = 0;
2804 	s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0;
2805 	u16 pwsf[2];
2806 	u8 cur_ther;
2807 	s8 delta_ther[2] = {0};
2808 
2809 	for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2810 		kidx = dpk->cur_idx[path];
2811 
2812 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2813 			    "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2814 			    path, kidx, dpk->bp[path][kidx].ch);
2815 
2816 		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2817 
2818 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2819 			    "[DPK_TRK] thermal now = %d\n", cur_ther);
2820 
2821 		if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
2822 			delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2823 
2824 		if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2825 			delta_ther[path] = delta_ther[path] * 3 / 2;
2826 		else
2827 			delta_ther[path] = delta_ther[path] * 5 / 2;
2828 
2829 		txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB  + (path << 13),
2830 						     RR_MODOPT_M_TXPWR);
2831 
2832 		if (rtwdev->is_tssi_mode[path]) {
2833 			trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2834 
2835 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2836 				    "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2837 				    txagc_rf, trk_idx);
2838 
2839 			txagc_bb =
2840 				(s8)rtw89_phy_read32_mask(rtwdev,
2841 							  R_TXAGC_BB + (path << 13),
2842 							  MASKBYTE2);
2843 			txagc_bb_tp =
2844 				(u8)rtw89_phy_read32_mask(rtwdev,
2845 							  R_TXAGC_TP + (path << 13),
2846 							  B_TXAGC_TP);
2847 
2848 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2849 				    "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2850 				    txagc_bb_tp, txagc_bb);
2851 
2852 			txagc_ofst =
2853 				(s8)rtw89_phy_read32_mask(rtwdev,
2854 							  R_TXAGC_BB + (path << 13),
2855 							  MASKBYTE3);
2856 
2857 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2858 				    "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2859 				    txagc_ofst, delta_ther[path]);
2860 
2861 			if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2862 						  BIT(15)) == 0x1)
2863 				txagc_ofst = 0;
2864 
2865 			if (txagc_rf != 0 && cur_ther != 0)
2866 				ini_diff = txagc_ofst + delta_ther[path];
2867 
2868 			if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13),
2869 						  B_P0_TXDPD) == 0x0) {
2870 				pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2871 					  txagc_bb + ini_diff +
2872 					  tssi_info->extra_ofst[path];
2873 				pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2874 					  txagc_bb + ini_diff +
2875 					  tssi_info->extra_ofst[path];
2876 			} else {
2877 				pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff +
2878 					  tssi_info->extra_ofst[path];
2879 				pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff +
2880 					  tssi_info->extra_ofst[path];
2881 			}
2882 
2883 		} else {
2884 			pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2885 			pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2886 		}
2887 
2888 		if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 &&
2889 		    txagc_rf != 0) {
2890 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2891 				    "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2892 				    pwsf[0], pwsf[1]);
2893 
2894 			rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2895 					       0x000001FF, pwsf[0]);
2896 			rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2897 					       0x01FF0000, pwsf[1]);
2898 		}
2899 	}
2900 }
2901 
2902 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2903 			     enum rtw89_rf_path path)
2904 {
2905 	enum rtw89_band band = rtwdev->hal.current_band_type;
2906 
2907 	if (band == RTW89_BAND_2G)
2908 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2909 	else
2910 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2911 }
2912 
2913 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2914 {
2915 	enum rtw89_band band = rtwdev->hal.current_band_type;
2916 
2917 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
2918 	rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2919 				 &rtw8852a_tssi_sys_defs_2g_tbl,
2920 				 &rtw8852a_tssi_sys_defs_5g_tbl);
2921 }
2922 
2923 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2924 				    enum rtw89_rf_path path)
2925 {
2926 	enum rtw89_band band = rtwdev->hal.current_band_type;
2927 
2928 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2929 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
2930 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl);
2931 	rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2932 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl,
2933 				 &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl);
2934 }
2935 
2936 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2937 					  enum rtw89_phy_idx phy,
2938 					  enum rtw89_rf_path path)
2939 {
2940 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2941 				 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
2942 				 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
2943 }
2944 
2945 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2946 			  enum rtw89_rf_path path)
2947 {
2948 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2949 				 &rtw8852a_tssi_dck_defs_a_tbl,
2950 				 &rtw8852a_tssi_dck_defs_b_tbl);
2951 }
2952 
2953 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2954 				 enum rtw89_rf_path path)
2955 {
2956 #define __get_val(ptr, idx)				\
2957 ({							\
2958 	s8 *__ptr = (ptr);				\
2959 	u8 __idx = (idx), __i, __v;			\
2960 	u32 __val = 0;					\
2961 	for (__i = 0; __i < 4; __i++) {			\
2962 		__v = (__ptr[__idx + __i]);		\
2963 		__val |= (__v << (8 * __i));		\
2964 	}						\
2965 	__val;						\
2966 })
2967 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2968 	u8 ch = rtwdev->hal.current_channel;
2969 	u8 subband = rtwdev->hal.current_subband;
2970 	const u8 *thm_up_a = NULL;
2971 	const u8 *thm_down_a = NULL;
2972 	const u8 *thm_up_b = NULL;
2973 	const u8 *thm_down_b = NULL;
2974 	u8 thermal = 0xff;
2975 	s8 thm_ofst[64] = {0};
2976 	u32 tmp = 0;
2977 	u8 i, j;
2978 
2979 	switch (subband) {
2980 	case RTW89_CH_2G:
2981 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
2982 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
2983 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p;
2984 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n;
2985 		break;
2986 	case RTW89_CH_5G_BAND_1:
2987 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0];
2988 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0];
2989 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0];
2990 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0];
2991 		break;
2992 	case RTW89_CH_5G_BAND_3:
2993 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1];
2994 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1];
2995 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1];
2996 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1];
2997 		break;
2998 	case RTW89_CH_5G_BAND_4:
2999 		thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2];
3000 		thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2];
3001 		thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2];
3002 		thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2];
3003 		break;
3004 	}
3005 
3006 	if (path == RF_PATH_A) {
3007 		thermal = tssi_info->thermal[RF_PATH_A];
3008 
3009 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3010 			    "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
3011 
3012 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
3013 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
3014 
3015 		if (thermal == 0xff) {
3016 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
3017 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
3018 
3019 			for (i = 0; i < 64; i += 4) {
3020 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
3021 
3022 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3023 					    "[TSSI] write 0x%x val=0x%08x\n",
3024 					    0x5c00 + i, 0x0);
3025 			}
3026 
3027 		} else {
3028 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
3029 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
3030 					       thermal);
3031 
3032 			i = 0;
3033 			for (j = 0; j < 32; j++)
3034 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3035 					      -thm_down_a[i++] :
3036 					      -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
3037 
3038 			i = 1;
3039 			for (j = 63; j >= 32; j--)
3040 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3041 					      thm_up_a[i++] :
3042 					      thm_up_a[DELTA_SWINGIDX_SIZE - 1];
3043 
3044 			for (i = 0; i < 64; i += 4) {
3045 				tmp = __get_val(thm_ofst, i);
3046 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
3047 
3048 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3049 					    "[TSSI] write 0x%x val=0x%08x\n",
3050 					    0x5c00 + i, tmp);
3051 			}
3052 		}
3053 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
3054 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
3055 
3056 	} else {
3057 		thermal = tssi_info->thermal[RF_PATH_B];
3058 
3059 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3060 			    "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
3061 
3062 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
3063 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
3064 
3065 		if (thermal == 0xff) {
3066 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
3067 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
3068 
3069 			for (i = 0; i < 64; i += 4) {
3070 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
3071 
3072 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3073 					    "[TSSI] write 0x%x val=0x%08x\n",
3074 					    0x7c00 + i, 0x0);
3075 			}
3076 
3077 		} else {
3078 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
3079 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3080 					       thermal);
3081 
3082 			i = 0;
3083 			for (j = 0; j < 32; j++)
3084 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3085 					      -thm_down_b[i++] :
3086 					      -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3087 
3088 			i = 1;
3089 			for (j = 63; j >= 32; j--)
3090 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3091 					      thm_up_b[i++] :
3092 					      thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3093 
3094 			for (i = 0; i < 64; i += 4) {
3095 				tmp = __get_val(thm_ofst, i);
3096 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3097 
3098 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3099 					    "[TSSI] write 0x%x val=0x%08x\n",
3100 					    0x7c00 + i, tmp);
3101 			}
3102 		}
3103 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3104 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3105 	}
3106 #undef __get_val
3107 }
3108 
3109 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3110 				   enum rtw89_rf_path path)
3111 {
3112 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3113 				 &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl,
3114 				 &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl);
3115 }
3116 
3117 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3118 				enum rtw89_rf_path path)
3119 {
3120 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3121 				 &rtw8852a_tssi_slope_cal_org_defs_a_tbl,
3122 				 &rtw8852a_tssi_slope_cal_org_defs_b_tbl);
3123 }
3124 
3125 static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3126 				 enum rtw89_rf_path path)
3127 {
3128 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3129 				 &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl,
3130 				 &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl);
3131 }
3132 
3133 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3134 			    enum rtw89_rf_path path)
3135 {
3136 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3137 				 &rtw8852a_tssi_slope_defs_a_tbl,
3138 				 &rtw8852a_tssi_slope_defs_b_tbl);
3139 }
3140 
3141 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3142 			    enum rtw89_rf_path path)
3143 {
3144 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3145 				 &rtw8852a_tssi_track_defs_a_tbl,
3146 				 &rtw8852a_tssi_track_defs_b_tbl);
3147 }
3148 
3149 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3150 					  enum rtw89_phy_idx phy,
3151 					  enum rtw89_rf_path path)
3152 {
3153 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3154 				 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl,
3155 				 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl);
3156 }
3157 
3158 static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3159 		      enum rtw89_rf_path path)
3160 {
3161 	u8 subband = rtwdev->hal.current_subband;
3162 
3163 	switch (subband) {
3164 	case RTW89_CH_2G:
3165 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3166 					 &rtw8852a_tssi_pak_defs_a_2g_tbl,
3167 					 &rtw8852a_tssi_pak_defs_b_2g_tbl);
3168 		break;
3169 	case RTW89_CH_5G_BAND_1:
3170 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3171 					 &rtw8852a_tssi_pak_defs_a_5g_1_tbl,
3172 					 &rtw8852a_tssi_pak_defs_b_5g_1_tbl);
3173 		break;
3174 	case RTW89_CH_5G_BAND_3:
3175 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3176 					 &rtw8852a_tssi_pak_defs_a_5g_3_tbl,
3177 					 &rtw8852a_tssi_pak_defs_b_5g_3_tbl);
3178 		break;
3179 	case RTW89_CH_5G_BAND_4:
3180 		rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3181 					 &rtw8852a_tssi_pak_defs_a_5g_4_tbl,
3182 					 &rtw8852a_tssi_pak_defs_b_5g_4_tbl);
3183 		break;
3184 	}
3185 }
3186 
3187 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3188 {
3189 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3190 	u8 i;
3191 
3192 	for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3193 		_tssi_set_track(rtwdev, phy, i);
3194 		_tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3195 
3196 		rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
3197 					 &rtw8852a_tssi_enable_defs_a_tbl,
3198 					 &rtw8852a_tssi_enable_defs_b_tbl);
3199 
3200 		tssi_info->base_thermal[i] =
3201 			ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
3202 		rtwdev->is_tssi_mode[i] = true;
3203 	}
3204 }
3205 
3206 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3207 {
3208 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3209 
3210 	rtwdev->is_tssi_mode[RF_PATH_A] = false;
3211 	rtwdev->is_tssi_mode[RF_PATH_B] = false;
3212 }
3213 
3214 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3215 {
3216 	switch (ch) {
3217 	case 1 ... 2:
3218 		return 0;
3219 	case 3 ... 5:
3220 		return 1;
3221 	case 6 ... 8:
3222 		return 2;
3223 	case 9 ... 11:
3224 		return 3;
3225 	case 12 ... 13:
3226 		return 4;
3227 	case 14:
3228 		return 5;
3229 	}
3230 
3231 	return 0;
3232 }
3233 
3234 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3235 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3236 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3237 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3238 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3239 
3240 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3241 {
3242 	switch (ch) {
3243 	case 1 ... 2:
3244 		return 0;
3245 	case 3 ... 5:
3246 		return 1;
3247 	case 6 ... 8:
3248 		return 2;
3249 	case 9 ... 11:
3250 		return 3;
3251 	case 12 ... 14:
3252 		return 4;
3253 	case 36 ... 40:
3254 		return 5;
3255 	case 41 ... 43:
3256 		return TSSI_EXTRA_GROUP(5);
3257 	case 44 ... 48:
3258 		return 6;
3259 	case 49 ... 51:
3260 		return TSSI_EXTRA_GROUP(6);
3261 	case 52 ... 56:
3262 		return 7;
3263 	case 57 ... 59:
3264 		return TSSI_EXTRA_GROUP(7);
3265 	case 60 ... 64:
3266 		return 8;
3267 	case 100 ... 104:
3268 		return 9;
3269 	case 105 ... 107:
3270 		return TSSI_EXTRA_GROUP(9);
3271 	case 108 ... 112:
3272 		return 10;
3273 	case 113 ... 115:
3274 		return TSSI_EXTRA_GROUP(10);
3275 	case 116 ... 120:
3276 		return 11;
3277 	case 121 ... 123:
3278 		return TSSI_EXTRA_GROUP(11);
3279 	case 124 ... 128:
3280 		return 12;
3281 	case 129 ... 131:
3282 		return TSSI_EXTRA_GROUP(12);
3283 	case 132 ... 136:
3284 		return 13;
3285 	case 137 ... 139:
3286 		return TSSI_EXTRA_GROUP(13);
3287 	case 140 ... 144:
3288 		return 14;
3289 	case 149 ... 153:
3290 		return 15;
3291 	case 154 ... 156:
3292 		return TSSI_EXTRA_GROUP(15);
3293 	case 157 ... 161:
3294 		return 16;
3295 	case 162 ... 164:
3296 		return TSSI_EXTRA_GROUP(16);
3297 	case 165 ... 169:
3298 		return 17;
3299 	case 170 ... 172:
3300 		return TSSI_EXTRA_GROUP(17);
3301 	case 173 ... 177:
3302 		return 18;
3303 	}
3304 
3305 	return 0;
3306 }
3307 
3308 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3309 {
3310 	switch (ch) {
3311 	case 1 ... 8:
3312 		return 0;
3313 	case 9 ... 14:
3314 		return 1;
3315 	case 36 ... 48:
3316 		return 2;
3317 	case 52 ... 64:
3318 		return 3;
3319 	case 100 ... 112:
3320 		return 4;
3321 	case 116 ... 128:
3322 		return 5;
3323 	case 132 ... 144:
3324 		return 6;
3325 	case 149 ... 177:
3326 		return 7;
3327 	}
3328 
3329 	return 0;
3330 }
3331 
3332 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3333 			    enum rtw89_rf_path path)
3334 {
3335 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3336 	u8 ch = rtwdev->hal.current_channel;
3337 	u32 gidx, gidx_1st, gidx_2nd;
3338 	s8 de_1st = 0;
3339 	s8 de_2nd = 0;
3340 	s8 val;
3341 
3342 	gidx = _tssi_get_ofdm_group(rtwdev, ch);
3343 
3344 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3345 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3346 		    path, gidx);
3347 
3348 	if (IS_TSSI_EXTRA_GROUP(gidx)) {
3349 		gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3350 		gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3351 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3352 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3353 		val = (de_1st + de_2nd) / 2;
3354 
3355 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3356 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3357 			    path, val, de_1st, de_2nd);
3358 	} else {
3359 		val = tssi_info->tssi_mcs[path][gidx];
3360 
3361 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3362 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3363 	}
3364 
3365 	return val;
3366 }
3367 
3368 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3369 				 enum rtw89_phy_idx phy,
3370 				 enum rtw89_rf_path path)
3371 {
3372 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3373 	u8 ch = rtwdev->hal.current_channel;
3374 	u32 tgidx, tgidx_1st, tgidx_2nd;
3375 	s8 tde_1st = 0;
3376 	s8 tde_2nd = 0;
3377 	s8 val;
3378 
3379 	tgidx = _tssi_get_trim_group(rtwdev, ch);
3380 
3381 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3382 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3383 		    path, tgidx);
3384 
3385 	if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3386 		tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3387 		tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3388 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3389 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3390 		val = (tde_1st + tde_2nd) / 2;
3391 
3392 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3393 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3394 			    path, val, tde_1st, tde_2nd);
3395 	} else {
3396 		val = tssi_info->tssi_trim[path][tgidx];
3397 
3398 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3399 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3400 			    path, val);
3401 	}
3402 
3403 	return val;
3404 }
3405 
3406 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
3407 				  enum rtw89_phy_idx phy)
3408 {
3409 #define __DE_MASK 0x003ff000
3410 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3411 	static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
3412 	static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
3413 	static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
3414 	static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840};
3415 	static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848};
3416 	static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
3417 	static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
3418 	static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
3419 	u8 ch = rtwdev->hal.current_channel;
3420 	u8 i, gidx;
3421 	s8 ofdm_de;
3422 	s8 trim_de;
3423 	s32 val;
3424 
3425 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3426 		    phy, ch);
3427 
3428 	for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3429 		gidx = _tssi_get_cck_group(rtwdev, ch);
3430 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3431 		val = tssi_info->tssi_cck[i][gidx] + trim_de;
3432 
3433 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3434 			    "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3435 			    i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3436 
3437 		rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val);
3438 		rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val);
3439 
3440 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3441 			    "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3442 			    r_cck_long[i],
3443 			    rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
3444 						  __DE_MASK));
3445 
3446 		ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3447 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3448 		val = ofdm_de + trim_de;
3449 
3450 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3451 			    "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3452 			    i, ofdm_de, trim_de);
3453 
3454 		rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val);
3455 		rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val);
3456 		rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val);
3457 		rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val);
3458 		rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val);
3459 		rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val);
3460 
3461 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3462 			    "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3463 			    r_mcs_20m[i],
3464 			    rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i],
3465 						  __DE_MASK));
3466 	}
3467 #undef __DE_MASK
3468 }
3469 
3470 static void _tssi_track(struct rtw89_dev *rtwdev)
3471 {
3472 	static const u32 tx_gain_scale_table[] = {
3473 		0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c,
3474 		0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1
3475 	};
3476 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3477 	u8 path;
3478 	u8 cur_ther;
3479 	s32 delta_ther = 0, gain_offset_int, gain_offset_float;
3480 	s8 gain_offset;
3481 
3482 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n",
3483 		    __func__);
3484 
3485 	if (!rtwdev->is_tssi_mode[RF_PATH_A])
3486 		return;
3487 	if (!rtwdev->is_tssi_mode[RF_PATH_B])
3488 		return;
3489 
3490 	for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) {
3491 		if (!tssi_info->tssi_tracking_check[path]) {
3492 			rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n");
3493 			continue;
3494 		}
3495 
3496 		cur_ther = (u8)rtw89_phy_read32_mask(rtwdev,
3497 						  R_TSSI_THER + (path << 13),
3498 						  B_TSSI_THER);
3499 
3500 		if (cur_ther == 0 || tssi_info->base_thermal[path] == 0)
3501 			continue;
3502 
3503 		delta_ther = cur_ther - tssi_info->base_thermal[path];
3504 
3505 		gain_offset = (s8)delta_ther * 15 / 10;
3506 
3507 		tssi_info->extra_ofst[path] = gain_offset;
3508 
3509 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3510 			    "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n",
3511 			    tssi_info->base_thermal[path], gain_offset, path);
3512 
3513 		gain_offset_int = gain_offset >> 3;
3514 		gain_offset_float = gain_offset & 7;
3515 
3516 		if (gain_offset_int > 15)
3517 			gain_offset_int = 15;
3518 		else if (gain_offset_int < -16)
3519 			gain_offset_int = -16;
3520 
3521 		rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13),
3522 				       B_DPD_OFT_EN, 0x1);
3523 
3524 		rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3525 				       B_TXGAIN_SCALE_EN, 0x1);
3526 
3527 		rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13),
3528 				       B_DPD_OFT_ADDR, gain_offset_int);
3529 
3530 		rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3531 				       B_TXGAIN_SCALE_OFT,
3532 				       tx_gain_scale_table[gain_offset_float]);
3533 	}
3534 }
3535 
3536 static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3537 {
3538 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3539 	u8 ch = rtwdev->hal.current_channel, ch_tmp;
3540 	u8 bw = rtwdev->hal.current_band_width;
3541 	u8 subband = rtwdev->hal.current_subband;
3542 	s8 power;
3543 	s32 xdbm;
3544 
3545 	if (bw == RTW89_CHANNEL_WIDTH_40)
3546 		ch_tmp = ch - 2;
3547 	else if (bw == RTW89_CHANNEL_WIDTH_80)
3548 		ch_tmp = ch - 6;
3549 	else
3550 		ch_tmp = ch;
3551 
3552 	power = rtw89_phy_read_txpwr_limit(rtwdev, bw, RTW89_1TX,
3553 					   RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
3554 
3555 	xdbm = power * 100 / 4;
3556 
3557 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n",
3558 		    __func__, phy, xdbm);
3559 
3560 	if (xdbm > 1800 && subband == RTW89_CH_2G) {
3561 		tssi_info->tssi_tracking_check[RF_PATH_A] = true;
3562 		tssi_info->tssi_tracking_check[RF_PATH_B] = true;
3563 	} else {
3564 		rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl);
3565 		tssi_info->extra_ofst[RF_PATH_A] = 0;
3566 		tssi_info->extra_ofst[RF_PATH_B] = 0;
3567 		tssi_info->tssi_tracking_check[RF_PATH_A] = false;
3568 		tssi_info->tssi_tracking_check[RF_PATH_B] = false;
3569 	}
3570 }
3571 
3572 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3573 			u8 path, s16 pwr_dbm, u8 enable)
3574 {
3575 	rtw8852a_bb_set_plcp_tx(rtwdev);
3576 	rtw8852a_bb_cfg_tx_path(rtwdev, path);
3577 	rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
3578 	rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
3579 }
3580 
3581 static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3582 {
3583 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3584 	const struct rtw89_chip_info *mac_reg = rtwdev->chip;
3585 	u8 ch = rtwdev->hal.current_channel, ch_tmp;
3586 	u8 bw = rtwdev->hal.current_band_width;
3587 	u16 tx_en;
3588 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
3589 	s8 power;
3590 	s16 xdbm;
3591 	u32 i, tx_counter = 0;
3592 
3593 	if (bw == RTW89_CHANNEL_WIDTH_40)
3594 		ch_tmp = ch - 2;
3595 	else if (bw == RTW89_CHANNEL_WIDTH_80)
3596 		ch_tmp = ch - 6;
3597 	else
3598 		ch_tmp = ch;
3599 
3600 	power = rtw89_phy_read_txpwr_limit(rtwdev, RTW89_CHANNEL_WIDTH_20, RTW89_1TX,
3601 					   RTW89_RS_OFDM, RTW89_NONBF, ch_tmp);
3602 
3603 	xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
3604 
3605 	if (xdbm > 1800)
3606 		xdbm = 68;
3607 	else
3608 		xdbm = power * 2;
3609 
3610 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3611 		    "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n",
3612 		    __func__, phy, power, xdbm);
3613 
3614 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3615 	rtw89_mac_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3616 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
3617 	tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3618 
3619 	_tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
3620 	mdelay(15);
3621 	_tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
3622 
3623 	tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
3624 		    tx_counter;
3625 
3626 	if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 &&
3627 	    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) {
3628 		for (i = 0; i < 6; i++) {
3629 			tssi_info->default_txagc_offset[RF_PATH_A] =
3630 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3631 						      MASKBYTE3);
3632 
3633 			if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0)
3634 				break;
3635 		}
3636 	}
3637 
3638 	if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 &&
3639 	    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) {
3640 		for (i = 0; i < 6; i++) {
3641 			tssi_info->default_txagc_offset[RF_PATH_B] =
3642 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3643 						      MASKBYTE3);
3644 
3645 			if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0)
3646 				break;
3647 		}
3648 	}
3649 
3650 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3651 		    "[TSSI] %s: tx counter=%d\n",
3652 		    __func__, tx_counter);
3653 
3654 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3655 		    "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n",
3656 		    tssi_info->default_txagc_offset[RF_PATH_A],
3657 		    tssi_info->default_txagc_offset[RF_PATH_B]);
3658 
3659 	rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
3660 
3661 	rtw89_mac_resume_sch_tx(rtwdev, phy, tx_en);
3662 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3663 }
3664 
3665 void rtw8852a_rck(struct rtw89_dev *rtwdev)
3666 {
3667 	u8 path;
3668 
3669 	for (path = 0; path < 2; path++)
3670 		_rck(rtwdev, path);
3671 }
3672 
3673 void rtw8852a_dack(struct rtw89_dev *rtwdev)
3674 {
3675 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3676 
3677 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3678 	_dac_cal(rtwdev, false);
3679 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3680 }
3681 
3682 void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3683 {
3684 	u16 tx_en;
3685 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3686 
3687 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3688 	rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3689 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3690 
3691 	_iqk_init(rtwdev);
3692 	if (rtwdev->dbcc_en)
3693 		_iqk_dbcc(rtwdev, phy_idx);
3694 	else
3695 		_iqk(rtwdev, phy_idx, false);
3696 
3697 	rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
3698 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3699 }
3700 
3701 void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
3702 {
3703 	_iqk_track(rtwdev);
3704 }
3705 
3706 void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3707 		     bool is_afe)
3708 {
3709 	u16 tx_en;
3710 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3711 
3712 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3713 	rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3714 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3715 
3716 	_rx_dck(rtwdev, phy_idx, is_afe);
3717 
3718 	rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
3719 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3720 }
3721 
3722 void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3723 {
3724 	u16 tx_en;
3725 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3726 
3727 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3728 	rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3729 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3730 
3731 	rtwdev->dpk.is_dpk_enable = true;
3732 	rtwdev->dpk.is_dpk_reload_en = false;
3733 	_dpk(rtwdev, phy_idx, false);
3734 
3735 	rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
3736 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3737 }
3738 
3739 void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
3740 {
3741 	_dpk_track(rtwdev);
3742 }
3743 
3744 void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3745 {
3746 	u8 i;
3747 
3748 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3749 		    __func__, phy);
3750 
3751 	_tssi_disable(rtwdev, phy);
3752 
3753 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3754 		_tssi_rf_setting(rtwdev, phy, i);
3755 		_tssi_set_sys(rtwdev, phy);
3756 		_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3757 		_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3758 		_tssi_set_dck(rtwdev, phy, i);
3759 		_tssi_set_tmeter_tbl(rtwdev, phy, i);
3760 		_tssi_set_dac_gain_tbl(rtwdev, phy, i);
3761 		_tssi_slope_cal_org(rtwdev, phy, i);
3762 		_tssi_set_rf_gap_tbl(rtwdev, phy, i);
3763 		_tssi_set_slope(rtwdev, phy, i);
3764 		_tssi_pak(rtwdev, phy, i);
3765 	}
3766 
3767 	_tssi_enable(rtwdev, phy);
3768 	_tssi_set_efuse_to_de(rtwdev, phy);
3769 	_tssi_high_power(rtwdev, phy);
3770 	_tssi_pre_tx(rtwdev, phy);
3771 }
3772 
3773 void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3774 {
3775 	u8 i;
3776 
3777 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3778 		    __func__, phy);
3779 
3780 	if (!rtwdev->is_tssi_mode[RF_PATH_A])
3781 		return;
3782 	if (!rtwdev->is_tssi_mode[RF_PATH_B])
3783 		return;
3784 
3785 	_tssi_disable(rtwdev, phy);
3786 
3787 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3788 		_tssi_rf_setting(rtwdev, phy, i);
3789 		_tssi_set_sys(rtwdev, phy);
3790 		_tssi_set_tmeter_tbl(rtwdev, phy, i);
3791 		_tssi_pak(rtwdev, phy, i);
3792 	}
3793 
3794 	_tssi_enable(rtwdev, phy);
3795 	_tssi_set_efuse_to_de(rtwdev, phy);
3796 }
3797 
3798 void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
3799 {
3800 	_tssi_track(rtwdev);
3801 }
3802 
3803 static
3804 void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3805 {
3806 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3807 		return;
3808 
3809 	/* disable */
3810 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3811 
3812 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0);
3813 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0);
3814 
3815 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0);
3816 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0);
3817 
3818 	/* enable */
3819 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3820 }
3821 
3822 static
3823 void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3824 {
3825 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3826 		return;
3827 
3828 	/* disable */
3829 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3830 
3831 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4);
3832 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3833 
3834 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4);
3835 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3836 
3837 	/* enable */
3838 	rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3839 }
3840 
3841 static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev,
3842 				  enum rtw89_phy_idx phy, bool enable)
3843 {
3844 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3845 		return;
3846 
3847 	if (enable) {
3848 		/* SCAN_START */
3849 		_rtw8852a_tssi_avg_scan(rtwdev, phy);
3850 	} else {
3851 		/* SCAN_END */
3852 		_rtw8852a_tssi_set_avg(rtwdev, phy);
3853 	}
3854 }
3855 
3856 static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev,
3857 					enum rtw89_phy_idx phy, bool enable)
3858 {
3859 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3860 	u8 i;
3861 
3862 	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3863 		return;
3864 
3865 	if (enable) {
3866 		if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
3867 		    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
3868 			for (i = 0; i < 6; i++) {
3869 				tssi_info->default_txagc_offset[RF_PATH_A] =
3870 					rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3871 							      B_TXAGC_BB);
3872 				if (tssi_info->default_txagc_offset[RF_PATH_A])
3873 					break;
3874 			}
3875 		}
3876 
3877 		if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
3878 		    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
3879 			for (i = 0; i < 6; i++) {
3880 				tssi_info->default_txagc_offset[RF_PATH_B] =
3881 					rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3882 							      B_TXAGC_BB_S1);
3883 				if (tssi_info->default_txagc_offset[RF_PATH_B])
3884 					break;
3885 			}
3886 		}
3887 	} else {
3888 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
3889 				       tssi_info->default_txagc_offset[RF_PATH_A]);
3890 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
3891 				       tssi_info->default_txagc_offset[RF_PATH_B]);
3892 
3893 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3894 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3895 
3896 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3897 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3898 	}
3899 }
3900 
3901 void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev,
3902 			       bool scan_start, enum rtw89_phy_idx phy_idx)
3903 {
3904 	if (scan_start) {
3905 		rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true);
3906 		rtw8852a_tssi_set_avg(rtwdev, phy_idx, true);
3907 	} else {
3908 		rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false);
3909 		rtw8852a_tssi_set_avg(rtwdev, phy_idx, false);
3910 	}
3911 }
3912