xref: /openbmc/linux/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c (revision c496daeb863093a046e0bb8db7265bf45d91775a)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2022-2023  Realtek Corporation
3  */
4 
5 #include "coex.h"
6 #include "debug.h"
7 #include "mac.h"
8 #include "phy.h"
9 #include "reg.h"
10 #include "rtw8851b.h"
11 #include "rtw8851b_rfk.h"
12 #include "rtw8851b_rfk_table.h"
13 #include "rtw8851b_table.h"
14 
15 #define RTW8851B_RXK_GROUP_NR 4
16 #define RTW8851B_TXK_GROUP_NR 1
17 #define RTW8851B_IQK_VER 0x2a
18 #define RTW8851B_IQK_SS 1
19 #define RTW8851B_LOK_GRAM 10
20 
21 enum rtw8851b_iqk_type {
22 	ID_TXAGC = 0x0,
23 	ID_FLOK_COARSE = 0x1,
24 	ID_FLOK_FINE = 0x2,
25 	ID_TXK = 0x3,
26 	ID_RXAGC = 0x4,
27 	ID_RXK = 0x5,
28 	ID_NBTXK = 0x6,
29 	ID_NBRXK = 0x7,
30 	ID_FLOK_VBUFFER = 0x8,
31 	ID_A_FLOK_COARSE = 0x9,
32 	ID_G_FLOK_COARSE = 0xa,
33 	ID_A_FLOK_FINE = 0xb,
34 	ID_G_FLOK_FINE = 0xc,
35 	ID_IQK_RESTORE = 0x10,
36 };
37 
38 static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296};
39 static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf};
40 static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3};
41 static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10C, 0x112, 0x28c, 0x292};
42 static const u32 a_idxattc2[RTW8851B_RXK_GROUP_NR] = {0xf, 0xf, 0xf, 0xf};
43 static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x4, 0x5, 0x6, 0x7};
44 static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
45 static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
46 static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a};
47 static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
48 static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
49 static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
50 static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10};
51 static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
52 
53 static const u32 rtw8851b_backup_bb_regs[] = {0xc0ec, 0xc0e8};
54 static const u32 rtw8851b_backup_rf_regs[] = {
55 	0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
56 
57 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8851b_backup_bb_regs)
58 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8851b_backup_rf_regs)
59 
60 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
61 {
62 	return RF_A;
63 }
64 
65 static void _adc_fifo_rst(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
66 			  u8 path)
67 {
68 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
69 	fsleep(10);
70 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1111);
71 }
72 
73 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
74 {
75 	u32 rf_mode;
76 	u8 path;
77 	int ret;
78 
79 	for (path = 0; path < RF_PATH_MAX; path++) {
80 		if (!(kpath & BIT(path)))
81 			continue;
82 
83 		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
84 					       rf_mode != 2, 2, 5000, false,
85 					       rtwdev, path, 0x00, RR_MOD_MASK);
86 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
87 			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
88 			    path, ret);
89 	}
90 }
91 
92 static void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
93 {
94 	rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0);
95 	rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1);
96 }
97 
98 static void _drck(struct rtw89_dev *rtwdev)
99 {
100 	u32 rck_d;
101 	u32 val;
102 	int ret;
103 
104 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
105 
106 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x1);
107 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
108 
109 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
110 				       1, 10000, false,
111 				       rtwdev, R_DRCK_RES, B_DRCK_POL);
112 	if (ret)
113 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
114 
115 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0);
116 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
117 	udelay(1);
118 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
119 
120 	rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00);
121 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
122 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d);
123 
124 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
125 		    rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
126 }
127 
128 static void _addck_backup(struct rtw89_dev *rtwdev)
129 {
130 	struct rtw89_dack_info *dack = &rtwdev->dack;
131 
132 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
133 
134 	dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
135 	dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
136 }
137 
138 static void _addck_reload(struct rtw89_dev *rtwdev)
139 {
140 	struct rtw89_dack_info *dack = &rtwdev->dack;
141 
142 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]);
143 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]);
144 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
145 }
146 
147 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
148 {
149 	struct rtw89_dack_info *dack = &rtwdev->dack;
150 	u8 i;
151 
152 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
153 
154 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
155 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
156 		dack->msbk_d[0][0][i] =
157 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
158 
159 		rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
160 		dack->msbk_d[0][1][i] =
161 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
162 	}
163 
164 	dack->biask_d[0][0] =
165 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
166 	dack->biask_d[0][1] =
167 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
168 	dack->dadck_d[0][0] =
169 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00) + 24;
170 	dack->dadck_d[0][1] =
171 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01) + 24;
172 }
173 
174 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
175 				 enum rtw89_rf_path path, u8 index)
176 {
177 	struct rtw89_dack_info *dack = &rtwdev->dack;
178 	u32 idx_offset, path_offset;
179 	u32 offset, reg;
180 	u32 tmp;
181 	u8 i;
182 
183 	if (index == 0)
184 		idx_offset = 0;
185 	else
186 		idx_offset = 0x14;
187 
188 	if (path == RF_PATH_A)
189 		path_offset = 0;
190 	else
191 		path_offset = 0x28;
192 
193 	offset = idx_offset + path_offset;
194 
195 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1);
196 	rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1);
197 
198 	/* msbk_d: 15/14/13/12 */
199 	tmp = 0x0;
200 	for (i = 0; i < 4; i++)
201 		tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
202 	reg = 0xc200 + offset;
203 	rtw89_phy_write32(rtwdev, reg, tmp);
204 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
205 		    rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
206 
207 	/* msbk_d: 11/10/9/8 */
208 	tmp = 0x0;
209 	for (i = 0; i < 4; i++)
210 		tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
211 	reg = 0xc204 + offset;
212 	rtw89_phy_write32(rtwdev, reg, tmp);
213 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
214 		    rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
215 
216 	/* msbk_d: 7/6/5/4 */
217 	tmp = 0x0;
218 	for (i = 0; i < 4; i++)
219 		tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
220 	reg = 0xc208 + offset;
221 	rtw89_phy_write32(rtwdev, reg, tmp);
222 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
223 		    rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
224 
225 	/* msbk_d: 3/2/1/0 */
226 	tmp = 0x0;
227 	for (i = 0; i < 4; i++)
228 		tmp |= dack->msbk_d[path][index][i] << (i * 8);
229 	reg = 0xc20c + offset;
230 	rtw89_phy_write32(rtwdev, reg, tmp);
231 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
232 		    rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
233 
234 	/* dadak_d/biask_d */
235 	tmp = 0x0;
236 	tmp = (dack->biask_d[path][index] << 22) |
237 	      (dack->dadck_d[path][index] << 14);
238 	reg = 0xc210 + offset;
239 	rtw89_phy_write32(rtwdev, reg, tmp);
240 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
241 		    rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
242 
243 	rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + offset, B_DACKN0_EN, 0x1);
244 }
245 
246 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
247 {
248 	u8 index;
249 
250 	for (index = 0; index < 2; index++)
251 		_dack_reload_by_path(rtwdev, path, index);
252 }
253 
254 static void _addck(struct rtw89_dev *rtwdev)
255 {
256 	struct rtw89_dack_info *dack = &rtwdev->dack;
257 	u32 val;
258 	int ret;
259 
260 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
261 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
262 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
263 	udelay(1);
264 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
265 
266 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
267 				       1, 10000, false,
268 				       rtwdev, R_ADDCKR0, BIT(0));
269 	if (ret) {
270 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
271 		dack->addck_timeout[0] = true;
272 	}
273 
274 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
275 
276 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
277 }
278 
279 static void _new_dadck(struct rtw89_dev *rtwdev)
280 {
281 	struct rtw89_dack_info *dack = &rtwdev->dack;
282 	u32 i_dc, q_dc, ic, qc;
283 	u32 val;
284 	int ret;
285 
286 	rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_setup_defs_tbl);
287 
288 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
289 				       1, 10000, false,
290 				       rtwdev, R_ADDCKR0, BIT(0));
291 	if (ret) {
292 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
293 		dack->addck_timeout[0] = true;
294 	}
295 
296 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DADCK ret = %d\n", ret);
297 
298 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x0);
299 	i_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC);
300 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x1);
301 	q_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC);
302 
303 	ic = 0x80 - sign_extend32(i_dc, 11) * 6;
304 	qc = 0x80 - sign_extend32(q_dc, 11) * 6;
305 
306 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
307 		    "[DACK]before DADCK, i_dc=0x%x, q_dc=0x%x\n", i_dc, q_dc);
308 
309 	dack->dadck_d[0][0] = ic;
310 	dack->dadck_d[0][1] = qc;
311 
312 	rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_V, dack->dadck_d[0][0]);
313 	rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_V, dack->dadck_d[0][1]);
314 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
315 		    "[DACK]after DADCK, 0xc210=0x%x, 0xc224=0x%x\n",
316 		    rtw89_phy_read32_mask(rtwdev, R_DACKN0_CTL, MASKDWORD),
317 		    rtw89_phy_read32_mask(rtwdev, R_DACKN1_CTL, MASKDWORD));
318 
319 	rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_post_defs_tbl);
320 }
321 
322 static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
323 {
324 	if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
325 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
326 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
327 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
328 		return false;
329 
330 	return true;
331 }
332 
333 static void _dack_s0(struct rtw89_dev *rtwdev)
334 {
335 	struct rtw89_dack_info *dack = &rtwdev->dack;
336 	bool done;
337 	int ret;
338 
339 	rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_1_defs_tbl);
340 	_dack_reset(rtwdev, RF_PATH_A);
341 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
342 
343 	ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
344 				       1, 10000, false, rtwdev);
345 	if (ret) {
346 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
347 		dack->msbk_timeout[0] = true;
348 	}
349 
350 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
351 
352 	rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_2_defs_tbl);
353 
354 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
355 
356 	_dack_backup_s0(rtwdev);
357 	_dack_reload(rtwdev, RF_PATH_A);
358 
359 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
360 }
361 
362 static void _dack(struct rtw89_dev *rtwdev)
363 {
364 	_dack_s0(rtwdev);
365 }
366 
367 static void _dack_dump(struct rtw89_dev *rtwdev)
368 {
369 	struct rtw89_dack_info *dack = &rtwdev->dack;
370 	u8 i;
371 	u8 t;
372 
373 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
374 		    dack->addck_d[0][0], dack->addck_d[0][1]);
375 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
376 		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
377 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
378 		    dack->biask_d[0][0], dack->biask_d[0][1]);
379 
380 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
381 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
382 		t = dack->msbk_d[0][0][i];
383 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
384 	}
385 
386 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
387 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
388 		t = dack->msbk_d[0][1][i];
389 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
390 	}
391 }
392 
393 static void _dack_manual_off(struct rtw89_dev *rtwdev)
394 {
395 	rtw89_rfk_parser(rtwdev, &rtw8851b_dack_manual_off_defs_tbl);
396 }
397 
398 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
399 {
400 	struct rtw89_dack_info *dack = &rtwdev->dack;
401 	u32 rf0_0;
402 
403 	dack->dack_done = false;
404 
405 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x2\n");
406 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
407 	rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
408 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]RF0=0x%x\n", rf0_0);
409 
410 	_drck(rtwdev);
411 	_dack_manual_off(rtwdev);
412 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
413 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
414 
415 	_addck(rtwdev);
416 	_addck_backup(rtwdev);
417 	_addck_reload(rtwdev);
418 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
419 
420 	_dack(rtwdev);
421 	_new_dadck(rtwdev);
422 	_dack_dump(rtwdev);
423 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
424 
425 	dack->dack_done = true;
426 	dack->dack_cnt++;
427 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
428 }
429 
430 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
431 {
432 	u32 i;
433 
434 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
435 
436 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
437 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
438 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
439 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
440 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
441 
442 	for (i = 0; i <= 0x9f; i++) {
443 		rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD,
444 				       0x00010000 + i);
445 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n",
446 			    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI));
447 	}
448 
449 	for (i = 0; i <= 0x9f; i++) {
450 		rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD,
451 				       0x00010000 + i);
452 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n",
453 			    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ));
454 	}
455 
456 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000000);
457 	rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00000000);
458 }
459 
460 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
461 {
462 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
463 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
464 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
465 }
466 
467 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
468 {
469 	bool fail1 = false, fail2 = false;
470 	u32 val;
471 	int ret;
472 
473 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
474 				       10, 8200, false,
475 				       rtwdev, 0xbff8, MASKBYTE0);
476 	if (ret) {
477 		fail1 = true;
478 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
479 			    "[IQK]NCTL1 IQK timeout!!!\n");
480 	}
481 
482 	fsleep(10);
483 
484 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
485 				       10, 200, false,
486 				       rtwdev, R_RPT_COM, B_RPT_COM_RDY);
487 	if (ret) {
488 		fail2 = true;
489 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
490 			    "[IQK]NCTL2 IQK timeout!!!\n");
491 	}
492 
493 	fsleep(10);
494 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
495 
496 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
497 		    "[IQK]S%x, ret = %d, notready = %x fail=%d,%d\n",
498 		    path, ret, fail1 || fail2, fail1, fail2);
499 
500 	return fail1 || fail2;
501 }
502 
503 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
504 			  u8 path, u8 ktype)
505 {
506 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
507 	bool notready;
508 	u32 iqk_cmd;
509 
510 	switch (ktype) {
511 	case ID_A_FLOK_COARSE:
512 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
513 			    "[IQK]============ S%d ID_A_FLOK_COARSE ============\n", path);
514 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
515 		iqk_cmd = 0x108 | (1 << (4 + path));
516 		break;
517 	case ID_G_FLOK_COARSE:
518 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
519 			    "[IQK]============ S%d ID_G_FLOK_COARSE ============\n", path);
520 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
521 		iqk_cmd = 0x108 | (1 << (4 + path));
522 		break;
523 	case ID_A_FLOK_FINE:
524 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
525 			    "[IQK]============ S%d ID_A_FLOK_FINE ============\n", path);
526 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
527 		iqk_cmd = 0x308 | (1 << (4 + path));
528 		break;
529 	case ID_G_FLOK_FINE:
530 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
531 			    "[IQK]============ S%d ID_G_FLOK_FINE ============\n", path);
532 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
533 		iqk_cmd = 0x308 | (1 << (4 + path));
534 		break;
535 	case ID_TXK:
536 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
537 			    "[IQK]============ S%d ID_TXK ============\n", path);
538 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
539 		iqk_cmd = 0x008 | (1 << (path + 4)) |
540 			  (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
541 		break;
542 	case ID_RXAGC:
543 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
544 			    "[IQK]============ S%d ID_RXAGC ============\n", path);
545 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
546 		iqk_cmd = 0x708 | (1 << (4 + path)) | (path << 1);
547 		break;
548 	case ID_RXK:
549 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
550 			    "[IQK]============ S%d ID_RXK ============\n", path);
551 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
552 		iqk_cmd = 0x008 | (1 << (path + 4)) |
553 			  (((0xc + iqk_info->iqk_bw[path]) & 0xf) << 8);
554 		break;
555 	case ID_NBTXK:
556 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
557 			    "[IQK]============ S%d ID_NBTXK ============\n", path);
558 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
559 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
560 				       0x00b);
561 		iqk_cmd = 0x408 | (1 << (4 + path));
562 		break;
563 	case ID_NBRXK:
564 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
565 			    "[IQK]============ S%d ID_NBRXK ============\n", path);
566 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
567 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT,
568 				       0x011);
569 		iqk_cmd = 0x608 | (1 << (4 + path));
570 		break;
571 	default:
572 		return false;
573 	}
574 
575 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
576 	notready = _iqk_check_cal(rtwdev, path);
577 	if (iqk_info->iqk_sram_en &&
578 	    (ktype == ID_NBRXK || ktype == ID_RXK))
579 		_iqk_sram(rtwdev, path);
580 
581 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
582 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
583 		    "[IQK]S%x, ktype= %x, id = %x, notready = %x\n",
584 		    path, ktype, iqk_cmd + 1, notready);
585 
586 	return notready;
587 }
588 
589 static bool _rxk_2g_group_sel(struct rtw89_dev *rtwdev,
590 			      enum rtw89_phy_idx phy_idx, u8 path)
591 {
592 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
593 	bool kfail = false;
594 	bool notready;
595 	u32 rf_0;
596 	u8 gp;
597 
598 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
599 
600 	for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) {
601 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
602 
603 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
604 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]);
605 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
606 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
607 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
608 
609 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
610 		fsleep(10);
611 		rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
612 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
613 		rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
614 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
615 
616 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
617 
618 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
619 			    "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path,
620 			    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
621 			    rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0));
622 
623 		if (gp == 0x3) {
624 			rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
625 			rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
626 			notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
627 			iqk_info->nb_rxcfir[path] =
628 				rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
629 
630 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
631 				    "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
632 				    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
633 		}
634 
635 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
636 
637 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
638 			    "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path,
639 			    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
640 	}
641 
642 	if (!notready)
643 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
644 
645 	if (kfail)
646 		_iqk_sram(rtwdev, path);
647 
648 	if (kfail) {
649 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
650 				       MASKDWORD, iqk_info->nb_rxcfir[path] | 0x2);
651 		iqk_info->is_wb_txiqk[path] = false;
652 	} else {
653 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
654 				       MASKDWORD, 0x40000000);
655 		iqk_info->is_wb_txiqk[path] = true;
656 	}
657 
658 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
659 		    "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
660 		    1 << path, iqk_info->nb_rxcfir[path]);
661 	return kfail;
662 }
663 
664 static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
665 			      enum rtw89_phy_idx phy_idx, u8 path)
666 {
667 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
668 	bool kfail = false;
669 	bool notready;
670 	u32 rf_0;
671 	u8 gp;
672 
673 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
674 
675 	for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) {
676 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
677 
678 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, 0x03ff0, a_idxrxgain[gp]);
679 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
680 
681 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
682 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
683 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
684 
685 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
686 		fsleep(100);
687 		rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
688 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
689 		rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
690 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
691 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
692 
693 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
694 			    "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path,
695 			    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
696 			    rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB));
697 
698 		if (gp == 0x3) {
699 			rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
700 			rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
701 			notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
702 			iqk_info->nb_rxcfir[path] =
703 				rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
704 
705 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
706 				    "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
707 				    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
708 		}
709 
710 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
711 
712 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
713 			    "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path,
714 			    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
715 	}
716 
717 	if (!notready)
718 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
719 
720 	if (kfail)
721 		_iqk_sram(rtwdev, path);
722 
723 	if (kfail) {
724 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
725 				       iqk_info->nb_rxcfir[path] | 0x2);
726 		iqk_info->is_wb_txiqk[path] = false;
727 	} else {
728 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
729 				       0x40000000);
730 		iqk_info->is_wb_txiqk[path] = true;
731 	}
732 
733 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
734 		    "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
735 		    1 << path, iqk_info->nb_rxcfir[path]);
736 	return kfail;
737 }
738 
739 static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
740 			  u8 path)
741 {
742 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
743 	bool kfail = false;
744 	bool notready;
745 	u8 gp = 0x3;
746 	u32 rf_0;
747 
748 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
749 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
750 
751 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
752 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
753 
754 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
755 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
756 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
757 
758 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
759 	fsleep(100);
760 	rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
761 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
762 	rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
763 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
764 	notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
765 
766 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
767 		    "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path,
768 		    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
769 		    rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0));
770 
771 	if (gp == 0x3) {
772 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
773 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
774 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
775 		iqk_info->nb_rxcfir[path] =
776 			rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
777 
778 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
779 			    "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
780 			    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
781 	}
782 
783 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n",
784 		    path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
785 
786 	if (!notready)
787 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
788 
789 	if (kfail) {
790 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
791 				       MASKDWORD, 0x40000002);
792 		iqk_info->is_wb_rxiqk[path] = false;
793 	} else {
794 		iqk_info->is_wb_rxiqk[path] = false;
795 	}
796 
797 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
798 		    "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
799 		    1 << path, iqk_info->nb_rxcfir[path]);
800 
801 	return kfail;
802 }
803 
804 static bool _iqk_2g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
805 			  u8 path)
806 {
807 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
808 	bool kfail = false;
809 	bool notready;
810 	u8 gp = 0x3;
811 	u32 rf_0;
812 
813 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
814 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
815 
816 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
817 	rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]);
818 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
819 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
820 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
821 
822 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
823 	fsleep(10);
824 	rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
825 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
826 	rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
827 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
828 	notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
829 
830 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
831 		    "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n",
832 		    path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
833 		    rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0));
834 
835 	if (gp == 0x3) {
836 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
837 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
838 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
839 		iqk_info->nb_rxcfir[path] =
840 			rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
841 
842 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
843 			    "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
844 			    rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
845 	}
846 
847 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n",
848 		    path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
849 
850 	if (!notready)
851 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
852 
853 	if (kfail) {
854 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
855 				       MASKDWORD, 0x40000002);
856 		iqk_info->is_wb_rxiqk[path] = false;
857 	} else {
858 		iqk_info->is_wb_rxiqk[path] = false;
859 	}
860 
861 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
862 		    "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
863 		    1 << path, iqk_info->nb_rxcfir[path]);
864 	return kfail;
865 }
866 
867 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
868 {
869 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
870 
871 	rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
872 
873 	if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80)
874 		rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_80_defs_tbl);
875 	else
876 		rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_others_defs_tbl);
877 }
878 
879 static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
880 			      enum rtw89_phy_idx phy_idx, u8 path)
881 {
882 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
883 	bool kfail = false;
884 	bool notready;
885 	u8 gp;
886 
887 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
888 
889 	for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
890 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
891 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
892 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
893 
894 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
895 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
896 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
897 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
898 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
899 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
900 
901 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
902 		iqk_info->nb_txcfir[path] =
903 			rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD)  | 0x2;
904 
905 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
906 				       MASKDWORD, a_itqt[gp]);
907 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
908 	}
909 
910 	if (!notready)
911 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
912 
913 	if (kfail) {
914 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
915 				       MASKDWORD, iqk_info->nb_txcfir[path] | 0x2);
916 		iqk_info->is_wb_txiqk[path] = false;
917 	} else {
918 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
919 				       MASKDWORD, 0x40000000);
920 		iqk_info->is_wb_txiqk[path] = true;
921 	}
922 
923 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
924 		    "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
925 		    1 << path, iqk_info->nb_txcfir[path]);
926 	return kfail;
927 }
928 
929 static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
930 			      enum rtw89_phy_idx phy_idx, u8 path)
931 {
932 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
933 	bool kfail = false;
934 	bool notready;
935 	u8 gp;
936 
937 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
938 
939 	for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
940 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
941 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
942 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
943 
944 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]);
945 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
946 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
947 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
948 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
949 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
950 
951 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
952 		iqk_info->nb_txcfir[path] =
953 			rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD)  | 0x2;
954 
955 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
956 				       MASKDWORD, g_itqt[gp]);
957 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
958 	}
959 
960 	if (!notready)
961 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
962 
963 	if (kfail) {
964 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
965 				       MASKDWORD, iqk_info->nb_txcfir[path] | 0x2);
966 		iqk_info->is_wb_txiqk[path] = false;
967 	} else {
968 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
969 				       MASKDWORD, 0x40000000);
970 		iqk_info->is_wb_txiqk[path] = true;
971 	}
972 
973 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
974 		    "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
975 		    1 << path, iqk_info->nb_txcfir[path]);
976 	return kfail;
977 }
978 
979 static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
980 			  u8 path)
981 {
982 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
983 	bool kfail = false;
984 	bool notready;
985 	u8 gp;
986 
987 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
988 
989 	for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
990 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
991 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
992 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
993 
994 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
995 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
996 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
997 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
998 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
999 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
1000 
1001 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1002 		iqk_info->nb_txcfir[path] =
1003 			rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD)  | 0x2;
1004 	}
1005 
1006 	if (!notready)
1007 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1008 
1009 	if (kfail) {
1010 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1011 				       MASKDWORD, 0x40000002);
1012 		iqk_info->is_wb_rxiqk[path] = false;
1013 	} else {
1014 		iqk_info->is_wb_rxiqk[path] = false;
1015 	}
1016 
1017 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1018 		    "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
1019 		    1 << path, iqk_info->nb_txcfir[path]);
1020 	return kfail;
1021 }
1022 
1023 static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1024 			  u8 path)
1025 {
1026 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1027 	bool kfail = false;
1028 	bool notready;
1029 	u8 gp;
1030 
1031 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1032 
1033 	for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
1034 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
1035 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
1036 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
1037 
1038 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]);
1039 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
1040 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
1041 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
1042 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
1043 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1044 
1045 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1046 		iqk_info->nb_txcfir[path] =
1047 			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1048 					      MASKDWORD)  | 0x2;
1049 	}
1050 
1051 	if (!notready)
1052 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1053 
1054 	if (kfail) {
1055 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1056 				       MASKDWORD, 0x40000002);
1057 		iqk_info->is_wb_rxiqk[path] = false;
1058 	} else {
1059 		iqk_info->is_wb_rxiqk[path] = false;
1060 	}
1061 
1062 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1063 		    "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
1064 		    1 << path, iqk_info->nb_txcfir[path]);
1065 	return kfail;
1066 }
1067 
1068 static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1069 			u8 path)
1070 {
1071 	static const u32 g_txbb[RTW8851B_LOK_GRAM] = {
1072 		0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17};
1073 	static const u32 g_itqt[RTW8851B_LOK_GRAM] = {
1074 		0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b};
1075 	static const u32 g_wa[RTW8851B_LOK_GRAM] = {
1076 		0x00, 0x04, 0x08, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17};
1077 	bool fail = false;
1078 	u8 i;
1079 
1080 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1081 
1082 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1083 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0);
1084 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x6);
1085 
1086 	for (i = 0; i < RTW8851B_LOK_GRAM; i++) {
1087 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, g_txbb[i]);
1088 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, g_wa[i]);
1089 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
1090 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]);
1091 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1092 		rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1093 				       0x00000109 | (1 << (4 + path)));
1094 		fail |= _iqk_check_cal(rtwdev, path);
1095 
1096 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1097 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]);
1098 		rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1099 				       0x00000309 | (1 << (4 + path)));
1100 		fail |= _iqk_check_cal(rtwdev, path);
1101 
1102 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1103 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
1104 
1105 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1106 			    "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i,
1107 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000),
1108 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0));
1109 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1110 			    "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i,
1111 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000),
1112 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0));
1113 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1114 			    "[IQK]S0, i = %x, 0x58 = %x\n", i,
1115 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK));
1116 	}
1117 
1118 	return fail;
1119 }
1120 
1121 static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1122 			u8 path)
1123 {
1124 	static const u32 a_txbb[RTW8851B_LOK_GRAM] = {
1125 		0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17};
1126 	static const u32 a_itqt[RTW8851B_LOK_GRAM] = {
1127 		0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b, 0x1b, 0x1b, 0x1b};
1128 	static const u32 a_wa[RTW8851B_LOK_GRAM] = {
1129 		0x80, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x97};
1130 	bool fail = false;
1131 	u8 i;
1132 
1133 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1134 
1135 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1136 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0);
1137 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x7);
1138 
1139 	for (i = 0; i < RTW8851B_LOK_GRAM; i++) {
1140 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, a_txbb[i]);
1141 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, a_wa[i]);
1142 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
1143 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]);
1144 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1145 		rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1146 				       0x00000109 | (1 << (4 + path)));
1147 		fail |= _iqk_check_cal(rtwdev, path);
1148 
1149 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1150 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]);
1151 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1152 		rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1153 				       0x00000309 | (1 << (4 + path)));
1154 		fail |= _iqk_check_cal(rtwdev, path);
1155 
1156 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1157 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
1158 
1159 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1160 			    "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i,
1161 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000),
1162 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0));
1163 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1164 			    "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i,
1165 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000),
1166 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0));
1167 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1168 			    "[IQK]S0, i = %x, 0x58 = %x\n", i,
1169 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK));
1170 	}
1171 
1172 	return fail;
1173 }
1174 
1175 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1176 {
1177 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1178 
1179 	switch (iqk_info->iqk_band[path]) {
1180 	case RTW89_BAND_2G:
1181 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_2G\n");
1182 		rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_2ghz_defs_tbl);
1183 		break;
1184 	case RTW89_BAND_5G:
1185 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_5G\n");
1186 		rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_5ghz_defs_tbl);
1187 		break;
1188 	default:
1189 		break;
1190 	}
1191 }
1192 
1193 #define IQK_LOK_RETRY 1
1194 
1195 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1196 			 u8 path)
1197 {
1198 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1199 	bool lok_is_fail;
1200 	u8 i;
1201 
1202 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1203 
1204 	for (i = 0; i < IQK_LOK_RETRY; i++) {
1205 		_iqk_txk_setting(rtwdev, path);
1206 		if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1207 			lok_is_fail = _iqk_2g_lok(rtwdev, phy_idx, path);
1208 		else
1209 			lok_is_fail = _iqk_5g_lok(rtwdev, phy_idx, path);
1210 
1211 		if (!lok_is_fail)
1212 			break;
1213 	}
1214 
1215 	if (iqk_info->is_nbiqk) {
1216 		if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1217 			iqk_info->iqk_tx_fail[0][path] =
1218 				_iqk_2g_nbtxk(rtwdev, phy_idx, path);
1219 		else
1220 			iqk_info->iqk_tx_fail[0][path] =
1221 				_iqk_5g_nbtxk(rtwdev, phy_idx, path);
1222 	} else {
1223 		if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1224 			iqk_info->iqk_tx_fail[0][path] =
1225 				_txk_2g_group_sel(rtwdev, phy_idx, path);
1226 		else
1227 			iqk_info->iqk_tx_fail[0][path] =
1228 				_txk_5g_group_sel(rtwdev, phy_idx, path);
1229 	}
1230 
1231 	_iqk_rxclk_setting(rtwdev, path);
1232 	_iqk_rxk_setting(rtwdev, path);
1233 	_adc_fifo_rst(rtwdev, phy_idx, path);
1234 
1235 	if (iqk_info->is_nbiqk) {
1236 		if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1237 			iqk_info->iqk_rx_fail[0][path] =
1238 				_iqk_2g_nbrxk(rtwdev, phy_idx, path);
1239 		else
1240 			iqk_info->iqk_rx_fail[0][path] =
1241 				_iqk_5g_nbrxk(rtwdev, phy_idx, path);
1242 	} else {
1243 		if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1244 			iqk_info->iqk_rx_fail[0][path] =
1245 				_rxk_2g_group_sel(rtwdev, phy_idx, path);
1246 		else
1247 			iqk_info->iqk_rx_fail[0][path] =
1248 				_rxk_5g_group_sel(rtwdev, phy_idx, path);
1249 	}
1250 }
1251 
1252 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev,
1253 			       u32 backup_bb_reg_val[])
1254 {
1255 	u32 i;
1256 
1257 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
1258 		backup_bb_reg_val[i] =
1259 			rtw89_phy_read32_mask(rtwdev, rtw8851b_backup_bb_regs[i],
1260 					      MASKDWORD);
1261 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1262 			    "[RFK]backup bb reg : %x, value =%x\n",
1263 			    rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]);
1264 	}
1265 }
1266 
1267 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev,
1268 			       u32 backup_rf_reg_val[], u8 rf_path)
1269 {
1270 	u32 i;
1271 
1272 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
1273 		backup_rf_reg_val[i] =
1274 			rtw89_read_rf(rtwdev, rf_path,
1275 				      rtw8851b_backup_rf_regs[i], RFREG_MASK);
1276 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1277 			    "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
1278 			    rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]);
1279 	}
1280 }
1281 
1282 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
1283 				const u32 backup_bb_reg_val[])
1284 {
1285 	u32 i;
1286 
1287 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
1288 		rtw89_phy_write32_mask(rtwdev, rtw8851b_backup_bb_regs[i],
1289 				       MASKDWORD, backup_bb_reg_val[i]);
1290 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1291 			    "[RFK]restore bb reg : %x, value =%x\n",
1292 			    rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]);
1293 	}
1294 }
1295 
1296 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
1297 				const u32 backup_rf_reg_val[], u8 rf_path)
1298 {
1299 	u32 i;
1300 
1301 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
1302 		rtw89_write_rf(rtwdev, rf_path, rtw8851b_backup_rf_regs[i],
1303 			       RFREG_MASK, backup_rf_reg_val[i]);
1304 
1305 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1306 			    "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
1307 			    rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]);
1308 	}
1309 }
1310 
1311 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1312 			     u8 path)
1313 {
1314 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1315 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1316 	u8 idx = 0;
1317 
1318 	iqk_info->iqk_band[path] = chan->band_type;
1319 	iqk_info->iqk_bw[path] = chan->band_width;
1320 	iqk_info->iqk_ch[path] = chan->channel;
1321 	iqk_info->iqk_table_idx[path] = idx;
1322 
1323 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n",
1324 		    path, phy, rtwdev->dbcc_en ? "on" : "off",
1325 		    iqk_info->iqk_band[path] == 0 ? "2G" :
1326 		    iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1327 		    iqk_info->iqk_ch[path],
1328 		    iqk_info->iqk_bw[path] == 0 ? "20M" :
1329 		    iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1330 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1331 		    iqk_info->iqk_times, idx);
1332 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, iqk_info->syn1to2= 0x%x\n",
1333 		    path, iqk_info->syn1to2);
1334 }
1335 
1336 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1337 			   u8 path)
1338 {
1339 	_iqk_by_path(rtwdev, phy_idx, path);
1340 }
1341 
1342 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1343 {
1344 	bool fail;
1345 
1346 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1347 
1348 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00001219);
1349 	fsleep(10);
1350 	fail = _iqk_check_cal(rtwdev, path);
1351 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail=%d\n", fail);
1352 
1353 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1354 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1355 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1356 }
1357 
1358 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1359 			       enum rtw89_phy_idx phy_idx, u8 path)
1360 {
1361 	rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_afebb_restore_defs_tbl);
1362 }
1363 
1364 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1365 {
1366 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1367 
1368 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1369 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1370 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1371 }
1372 
1373 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1374 			       enum rtw89_phy_idx phy_idx, u8 path)
1375 {
1376 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1377 
1378 	rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_defs_tbl);
1379 }
1380 
1381 static void _iqk_init(struct rtw89_dev *rtwdev)
1382 {
1383 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1384 	u8 idx, path;
1385 
1386 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1387 
1388 	if (iqk_info->is_iqk_init)
1389 		return;
1390 
1391 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1392 
1393 	iqk_info->is_iqk_init = true;
1394 	iqk_info->is_nbiqk = false;
1395 	iqk_info->iqk_fft_en = false;
1396 	iqk_info->iqk_sram_en = false;
1397 	iqk_info->iqk_cfir_en = false;
1398 	iqk_info->iqk_xym_en = false;
1399 	iqk_info->thermal_rek_en = false;
1400 	iqk_info->iqk_times = 0x0;
1401 
1402 	for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1403 		iqk_info->iqk_channel[idx] = 0x0;
1404 		for (path = 0; path < RF_PATH_NUM_8851B; path++) {
1405 			iqk_info->lok_cor_fail[idx][path] = false;
1406 			iqk_info->lok_fin_fail[idx][path] = false;
1407 			iqk_info->iqk_tx_fail[idx][path] = false;
1408 			iqk_info->iqk_rx_fail[idx][path] = false;
1409 			iqk_info->iqk_table_idx[path] = 0x0;
1410 		}
1411 	}
1412 }
1413 
1414 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1415 		   enum rtw89_phy_idx phy_idx, u8 path)
1416 {
1417 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1418 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1419 	u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR];
1420 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1421 
1422 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK,
1423 			      BTC_WRFK_ONESHOT_START);
1424 
1425 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1426 		    "[IQK]==========IQK strat!!!!!==========\n");
1427 	iqk_info->iqk_times++;
1428 	iqk_info->kcount = 0;
1429 	iqk_info->version = RTW8851B_IQK_VER;
1430 
1431 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1432 	_iqk_get_ch_info(rtwdev, phy_idx, path);
1433 
1434 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1435 	_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1436 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1437 	_iqk_preset(rtwdev, path);
1438 	_iqk_start_iqk(rtwdev, phy_idx, path);
1439 	_iqk_restore(rtwdev, path);
1440 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1441 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1442 	_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1443 
1444 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK,
1445 			      BTC_WRFK_ONESHOT_STOP);
1446 }
1447 
1448 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1449 {
1450 	_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1451 }
1452 
1453 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1454 {
1455 	u32 rf_reg5;
1456 	u32 rck_val;
1457 	u32 val;
1458 	int ret;
1459 
1460 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1461 
1462 	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1463 
1464 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1465 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1466 
1467 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
1468 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1469 
1470 	/* RCK trigger */
1471 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1472 
1473 	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
1474 				       false, rtwdev, path, RR_RCKS, BIT(3));
1475 
1476 	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1477 
1478 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
1479 		    rck_val, ret);
1480 
1481 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1482 	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1483 
1484 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
1485 		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
1486 }
1487 
1488 void rtw8851b_aack(struct rtw89_dev *rtwdev)
1489 {
1490 	u32 tmp05, ib[4];
1491 	u32 tmp;
1492 	int ret;
1493 	int rek;
1494 	int i;
1495 
1496 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]DO AACK\n");
1497 
1498 	tmp05 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK);
1499 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0x3);
1500 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0);
1501 
1502 	for (rek = 0; rek < 4; rek++) {
1503 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201e);
1504 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201f);
1505 		fsleep(100);
1506 
1507 		ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp,
1508 					       1, 1000, false,
1509 					       rtwdev, RF_PATH_A, 0xd0, BIT(16));
1510 		if (ret)
1511 			rtw89_warn(rtwdev, "[LCK]AACK timeout\n");
1512 
1513 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x1);
1514 		for (i = 0; i < 4; i++) {
1515 			rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCO, RR_VCO_SEL, i);
1516 			ib[i] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_IBD, RR_IBD_VAL);
1517 		}
1518 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x0);
1519 
1520 		if (ib[0] != 0 && ib[1] != 0 && ib[2] != 0 && ib[3] != 0)
1521 			break;
1522 	}
1523 
1524 	if (rek != 0)
1525 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]AACK rek = %d\n", rek);
1526 
1527 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, tmp05);
1528 }
1529 
1530 void rtw8851b_rck(struct rtw89_dev *rtwdev)
1531 {
1532 	_rck(rtwdev, RF_PATH_A);
1533 }
1534 
1535 void rtw8851b_dack(struct rtw89_dev *rtwdev)
1536 {
1537 	_dac_cal(rtwdev, false);
1538 }
1539 
1540 void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
1541 {
1542 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
1543 	u32 tx_en;
1544 
1545 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
1546 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
1547 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
1548 
1549 	_iqk_init(rtwdev);
1550 	_iqk(rtwdev, phy_idx, false);
1551 
1552 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
1553 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
1554 }
1555 
1556 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
1557 			enum rtw89_bandwidth bw, bool dav)
1558 {
1559 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
1560 	u32 rf_reg18;
1561 
1562 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
1563 
1564 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
1565 	if (rf_reg18 == INV_RF_DATA) {
1566 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1567 			    "[RFK]Invalid RF_0x18 for Path-%d\n", path);
1568 		return;
1569 	}
1570 	rf_reg18 &= ~RR_CFGCH_BW;
1571 
1572 	switch (bw) {
1573 	case RTW89_CHANNEL_WIDTH_5:
1574 	case RTW89_CHANNEL_WIDTH_10:
1575 	case RTW89_CHANNEL_WIDTH_20:
1576 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
1577 		break;
1578 	case RTW89_CHANNEL_WIDTH_40:
1579 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
1580 		break;
1581 	case RTW89_CHANNEL_WIDTH_80:
1582 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
1583 		break;
1584 	default:
1585 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
1586 	}
1587 
1588 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
1589 		      RR_CFGCH_BW2) & RFREG_MASK;
1590 	rf_reg18 |= RR_CFGCH_BW2;
1591 	rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
1592 
1593 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
1594 		    bw, path, reg18_addr,
1595 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
1596 }
1597 
1598 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1599 		     enum rtw89_bandwidth bw)
1600 {
1601 	_bw_setting(rtwdev, RF_PATH_A, bw, true);
1602 	_bw_setting(rtwdev, RF_PATH_A, bw, false);
1603 }
1604 
1605 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
1606 {
1607 	u32 bak;
1608 	u32 tmp;
1609 	int ret;
1610 
1611 	bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
1612 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
1613 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
1614 
1615 	ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
1616 				       false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
1617 	if (ret)
1618 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
1619 
1620 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
1621 
1622 	return !!ret;
1623 }
1624 
1625 static void _lck_check(struct rtw89_dev *rtwdev)
1626 {
1627 	u32 tmp;
1628 
1629 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
1630 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
1631 
1632 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
1633 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
1634 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
1635 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
1636 	}
1637 
1638 	udelay(10);
1639 
1640 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
1641 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
1642 
1643 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
1644 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
1645 		_set_s0_arfc18(rtwdev, tmp);
1646 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
1647 	}
1648 
1649 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
1650 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
1651 
1652 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
1653 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
1654 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
1655 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
1656 
1657 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
1658 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
1659 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
1660 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
1661 
1662 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
1663 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
1664 		_set_s0_arfc18(rtwdev, tmp);
1665 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
1666 
1667 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
1668 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
1669 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
1670 	}
1671 }
1672 
1673 static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
1674 {
1675 	bool timeout;
1676 
1677 	timeout = _set_s0_arfc18(rtwdev, val);
1678 	if (!timeout)
1679 		_lck_check(rtwdev);
1680 }
1681 
1682 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
1683 			u8 central_ch, bool dav)
1684 {
1685 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
1686 	bool is_2g_ch = central_ch <= 14;
1687 	u32 rf_reg18;
1688 
1689 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
1690 
1691 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
1692 	rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
1693 		      RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
1694 	rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
1695 
1696 	if (!is_2g_ch)
1697 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
1698 			    FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
1699 
1700 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
1701 		      RR_CFGCH_BW2) & RFREG_MASK;
1702 	rf_reg18 |= RR_CFGCH_BW2;
1703 
1704 	if (path == RF_PATH_A && dav)
1705 		_set_ch(rtwdev, rf_reg18);
1706 	else
1707 		rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
1708 
1709 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
1710 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
1711 
1712 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1713 		    "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
1714 		    central_ch, path, reg18_addr,
1715 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
1716 }
1717 
1718 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
1719 {
1720 	_ch_setting(rtwdev, RF_PATH_A, central_ch, true);
1721 	_ch_setting(rtwdev, RF_PATH_A, central_ch, false);
1722 }
1723 
1724 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
1725 			 enum rtw89_rf_path path)
1726 {
1727 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
1728 	rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
1729 
1730 	if (bw == RTW89_CHANNEL_WIDTH_20)
1731 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
1732 	else if (bw == RTW89_CHANNEL_WIDTH_40)
1733 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
1734 	else if (bw == RTW89_CHANNEL_WIDTH_80)
1735 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
1736 	else
1737 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
1738 
1739 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
1740 		    rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
1741 
1742 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
1743 }
1744 
1745 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1746 		     enum rtw89_bandwidth bw)
1747 {
1748 	u8 kpath, path;
1749 
1750 	kpath = _kpath(rtwdev, phy);
1751 
1752 	for (path = 0; path < RF_PATH_NUM_8851B; path++) {
1753 		if (!(kpath & BIT(path)))
1754 			continue;
1755 
1756 		_set_rxbb_bw(rtwdev, bw, path);
1757 	}
1758 }
1759 
1760 static void rtw8851b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
1761 				enum rtw89_phy_idx phy, u8 central_ch,
1762 				enum rtw89_band band, enum rtw89_bandwidth bw)
1763 {
1764 	_ctrl_ch(rtwdev, central_ch);
1765 	_ctrl_bw(rtwdev, phy, bw);
1766 	_rxbb_bw(rtwdev, phy, bw);
1767 }
1768 
1769 void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev,
1770 			     const struct rtw89_chan *chan,
1771 			     enum rtw89_phy_idx phy_idx)
1772 {
1773 	rtw8851b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
1774 			    chan->band_width);
1775 }
1776