1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2022-2023 Realtek Corporation 3 */ 4 5 #include "coex.h" 6 #include "debug.h" 7 #include "mac.h" 8 #include "phy.h" 9 #include "reg.h" 10 #include "rtw8851b.h" 11 #include "rtw8851b_rfk.h" 12 #include "rtw8851b_rfk_table.h" 13 #include "rtw8851b_table.h" 14 15 #define DPK_VER_8851B 0x5 16 #define DPK_KIP_REG_NUM_8851B 7 17 #define DPK_RF_REG_NUM_8851B 4 18 #define DPK_KSET_NUM 4 19 #define RTW8851B_RXK_GROUP_NR 4 20 #define RTW8851B_TXK_GROUP_NR 1 21 #define RTW8851B_IQK_VER 0x2a 22 #define RTW8851B_IQK_SS 1 23 #define RTW8851B_LOK_GRAM 10 24 #define RTW8851B_TSSI_PATH_NR 1 25 26 #define _TSSI_DE_MASK GENMASK(21, 12) 27 28 enum dpk_id { 29 LBK_RXIQK = 0x06, 30 SYNC = 0x10, 31 MDPK_IDL = 0x11, 32 MDPK_MPA = 0x12, 33 GAIN_LOSS = 0x13, 34 GAIN_CAL = 0x14, 35 DPK_RXAGC = 0x15, 36 KIP_PRESET = 0x16, 37 KIP_RESTORE = 0x17, 38 DPK_TXAGC = 0x19, 39 D_KIP_PRESET = 0x28, 40 D_TXAGC = 0x29, 41 D_RXAGC = 0x2a, 42 D_SYNC = 0x2b, 43 D_GAIN_LOSS = 0x2c, 44 D_MDPK_IDL = 0x2d, 45 D_MDPK_LDL = 0x2e, 46 D_GAIN_NORM = 0x2f, 47 D_KIP_THERMAL = 0x30, 48 D_KIP_RESTORE = 0x31 49 }; 50 51 enum dpk_agc_step { 52 DPK_AGC_STEP_SYNC_DGAIN, 53 DPK_AGC_STEP_GAIN_LOSS_IDX, 54 DPK_AGC_STEP_GL_GT_CRITERION, 55 DPK_AGC_STEP_GL_LT_CRITERION, 56 DPK_AGC_STEP_SET_TX_GAIN, 57 }; 58 59 enum rtw8851b_iqk_type { 60 ID_TXAGC = 0x0, 61 ID_FLOK_COARSE = 0x1, 62 ID_FLOK_FINE = 0x2, 63 ID_TXK = 0x3, 64 ID_RXAGC = 0x4, 65 ID_RXK = 0x5, 66 ID_NBTXK = 0x6, 67 ID_NBRXK = 0x7, 68 ID_FLOK_VBUFFER = 0x8, 69 ID_A_FLOK_COARSE = 0x9, 70 ID_G_FLOK_COARSE = 0xa, 71 ID_A_FLOK_FINE = 0xb, 72 ID_G_FLOK_FINE = 0xc, 73 ID_IQK_RESTORE = 0x10, 74 }; 75 76 enum rf_mode { 77 RF_SHUT_DOWN = 0x0, 78 RF_STANDBY = 0x1, 79 RF_TX = 0x2, 80 RF_RX = 0x3, 81 RF_TXIQK = 0x4, 82 RF_DPK = 0x5, 83 RF_RXK1 = 0x6, 84 RF_RXK2 = 0x7, 85 }; 86 87 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8851B] = {0x5858}; 88 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8851B] = {0x5860}; 89 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8851B] = {0x5838}; 90 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8851B] = {0x5840}; 91 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8851B] = {0x5848}; 92 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8851B] = {0x5850}; 93 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8851B] = {0x5828}; 94 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8851B] = {0x5830}; 95 static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296}; 96 static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf}; 97 static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3}; 98 static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10C, 0x112, 0x28c, 0x292}; 99 static const u32 a_idxattc2[RTW8851B_RXK_GROUP_NR] = {0xf, 0xf, 0xf, 0xf}; 100 static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x4, 0x5, 0x6, 0x7}; 101 static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; 102 static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; 103 static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a}; 104 static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; 105 static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; 106 static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; 107 static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10}; 108 static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; 109 110 static const u32 rtw8851b_backup_bb_regs[] = {0xc0ec, 0xc0e8}; 111 static const u32 rtw8851b_backup_rf_regs[] = { 112 0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5}; 113 114 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8851b_backup_bb_regs) 115 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8851b_backup_rf_regs) 116 117 static const u32 dpk_kip_reg[DPK_KIP_REG_NUM_8851B] = { 118 0x813c, 0x8124, 0xc0ec, 0xc0e8, 0xc0c4, 0xc0d4, 0xc0d8}; 119 static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005}; 120 121 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 122 { 123 return RF_A; 124 } 125 126 static void _adc_fifo_rst(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 127 u8 path) 128 { 129 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); 130 fsleep(10); 131 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1111); 132 } 133 134 static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev, 135 enum rtw89_rf_path path, bool is_bybb) 136 { 137 if (is_bybb) 138 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); 139 else 140 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 141 } 142 143 static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev, 144 enum rtw89_rf_path path, bool is_bybb) 145 { 146 if (is_bybb) 147 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); 148 else 149 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); 150 } 151 152 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) 153 { 154 u32 rf_mode; 155 u8 path; 156 int ret; 157 158 for (path = 0; path < RF_PATH_MAX; path++) { 159 if (!(kpath & BIT(path))) 160 continue; 161 162 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, 163 rf_mode != 2, 2, 5000, false, 164 rtwdev, path, 0x00, RR_MOD_MASK); 165 rtw89_debug(rtwdev, RTW89_DBG_RFK, 166 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", 167 path, ret); 168 } 169 } 170 171 static void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 172 { 173 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0); 174 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1); 175 } 176 177 static void _drck(struct rtw89_dev *rtwdev) 178 { 179 u32 rck_d; 180 u32 val; 181 int ret; 182 183 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n"); 184 185 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x1); 186 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); 187 188 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 189 1, 10000, false, 190 rtwdev, R_DRCK_RES, B_DRCK_POL); 191 if (ret) 192 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); 193 194 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0); 195 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1); 196 udelay(1); 197 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0); 198 199 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00); 200 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); 201 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d); 202 203 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", 204 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); 205 } 206 207 static void _addck_backup(struct rtw89_dev *rtwdev) 208 { 209 struct rtw89_dack_info *dack = &rtwdev->dack; 210 211 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); 212 213 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0); 214 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1); 215 } 216 217 static void _addck_reload(struct rtw89_dev *rtwdev) 218 { 219 struct rtw89_dack_info *dack = &rtwdev->dack; 220 221 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]); 222 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]); 223 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); 224 } 225 226 static void _dack_backup_s0(struct rtw89_dev *rtwdev) 227 { 228 struct rtw89_dack_info *dack = &rtwdev->dack; 229 u8 i; 230 231 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 232 233 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 234 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); 235 dack->msbk_d[0][0][i] = 236 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0); 237 238 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); 239 dack->msbk_d[0][1][i] = 240 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1); 241 } 242 243 dack->biask_d[0][0] = 244 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00); 245 dack->biask_d[0][1] = 246 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01); 247 dack->dadck_d[0][0] = 248 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00) + 24; 249 dack->dadck_d[0][1] = 250 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01) + 24; 251 } 252 253 static void _dack_reload_by_path(struct rtw89_dev *rtwdev, 254 enum rtw89_rf_path path, u8 index) 255 { 256 struct rtw89_dack_info *dack = &rtwdev->dack; 257 u32 idx_offset, path_offset; 258 u32 offset, reg; 259 u32 tmp; 260 u8 i; 261 262 if (index == 0) 263 idx_offset = 0; 264 else 265 idx_offset = 0x14; 266 267 if (path == RF_PATH_A) 268 path_offset = 0; 269 else 270 path_offset = 0x28; 271 272 offset = idx_offset + path_offset; 273 274 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1); 275 rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1); 276 277 /* msbk_d: 15/14/13/12 */ 278 tmp = 0x0; 279 for (i = 0; i < 4; i++) 280 tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); 281 reg = 0xc200 + offset; 282 rtw89_phy_write32(rtwdev, reg, tmp); 283 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 284 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 285 286 /* msbk_d: 11/10/9/8 */ 287 tmp = 0x0; 288 for (i = 0; i < 4; i++) 289 tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); 290 reg = 0xc204 + offset; 291 rtw89_phy_write32(rtwdev, reg, tmp); 292 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 293 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 294 295 /* msbk_d: 7/6/5/4 */ 296 tmp = 0x0; 297 for (i = 0; i < 4; i++) 298 tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); 299 reg = 0xc208 + offset; 300 rtw89_phy_write32(rtwdev, reg, tmp); 301 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 302 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 303 304 /* msbk_d: 3/2/1/0 */ 305 tmp = 0x0; 306 for (i = 0; i < 4; i++) 307 tmp |= dack->msbk_d[path][index][i] << (i * 8); 308 reg = 0xc20c + offset; 309 rtw89_phy_write32(rtwdev, reg, tmp); 310 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 311 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 312 313 /* dadak_d/biask_d */ 314 tmp = 0x0; 315 tmp = (dack->biask_d[path][index] << 22) | 316 (dack->dadck_d[path][index] << 14); 317 reg = 0xc210 + offset; 318 rtw89_phy_write32(rtwdev, reg, tmp); 319 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg, 320 rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD)); 321 322 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + offset, B_DACKN0_EN, 0x1); 323 } 324 325 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 326 { 327 u8 index; 328 329 for (index = 0; index < 2; index++) 330 _dack_reload_by_path(rtwdev, path, index); 331 } 332 333 static void _addck(struct rtw89_dev *rtwdev) 334 { 335 struct rtw89_dack_info *dack = &rtwdev->dack; 336 u32 val; 337 int ret; 338 339 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1); 340 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1); 341 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0); 342 udelay(1); 343 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); 344 345 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 346 1, 10000, false, 347 rtwdev, R_ADDCKR0, BIT(0)); 348 if (ret) { 349 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); 350 dack->addck_timeout[0] = true; 351 } 352 353 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); 354 355 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0); 356 } 357 358 static void _new_dadck(struct rtw89_dev *rtwdev) 359 { 360 struct rtw89_dack_info *dack = &rtwdev->dack; 361 u32 i_dc, q_dc, ic, qc; 362 u32 val; 363 int ret; 364 365 rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_setup_defs_tbl); 366 367 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 368 1, 10000, false, 369 rtwdev, R_ADDCKR0, BIT(0)); 370 if (ret) { 371 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n"); 372 dack->addck_timeout[0] = true; 373 } 374 375 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DADCK ret = %d\n", ret); 376 377 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x0); 378 i_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); 379 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x1); 380 q_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); 381 382 ic = 0x80 - sign_extend32(i_dc, 11) * 6; 383 qc = 0x80 - sign_extend32(q_dc, 11) * 6; 384 385 rtw89_debug(rtwdev, RTW89_DBG_RFK, 386 "[DACK]before DADCK, i_dc=0x%x, q_dc=0x%x\n", i_dc, q_dc); 387 388 dack->dadck_d[0][0] = ic; 389 dack->dadck_d[0][1] = qc; 390 391 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_V, dack->dadck_d[0][0]); 392 rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_V, dack->dadck_d[0][1]); 393 rtw89_debug(rtwdev, RTW89_DBG_RFK, 394 "[DACK]after DADCK, 0xc210=0x%x, 0xc224=0x%x\n", 395 rtw89_phy_read32_mask(rtwdev, R_DACKN0_CTL, MASKDWORD), 396 rtw89_phy_read32_mask(rtwdev, R_DACKN1_CTL, MASKDWORD)); 397 398 rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_post_defs_tbl); 399 } 400 401 static bool _dack_s0_poll(struct rtw89_dev *rtwdev) 402 { 403 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || 404 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || 405 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || 406 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) 407 return false; 408 409 return true; 410 } 411 412 static void _dack_s0(struct rtw89_dev *rtwdev) 413 { 414 struct rtw89_dack_info *dack = &rtwdev->dack; 415 bool done; 416 int ret; 417 418 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_1_defs_tbl); 419 _dack_reset(rtwdev, RF_PATH_A); 420 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); 421 422 ret = read_poll_timeout_atomic(_dack_s0_poll, done, done, 423 1, 10000, false, rtwdev); 424 if (ret) { 425 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); 426 dack->msbk_timeout[0] = true; 427 } 428 429 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); 430 431 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_2_defs_tbl); 432 433 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); 434 435 _dack_backup_s0(rtwdev); 436 _dack_reload(rtwdev, RF_PATH_A); 437 438 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); 439 } 440 441 static void _dack(struct rtw89_dev *rtwdev) 442 { 443 _dack_s0(rtwdev); 444 } 445 446 static void _dack_dump(struct rtw89_dev *rtwdev) 447 { 448 struct rtw89_dack_info *dack = &rtwdev->dack; 449 u8 i; 450 u8 t; 451 452 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", 453 dack->addck_d[0][0], dack->addck_d[0][1]); 454 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 455 dack->dadck_d[0][0], dack->dadck_d[0][1]); 456 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", 457 dack->biask_d[0][0], dack->biask_d[0][1]); 458 459 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 460 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 461 t = dack->msbk_d[0][0][i]; 462 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 463 } 464 465 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 466 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 467 t = dack->msbk_d[0][1][i]; 468 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 469 } 470 } 471 472 static void _dack_manual_off(struct rtw89_dev *rtwdev) 473 { 474 rtw89_rfk_parser(rtwdev, &rtw8851b_dack_manual_off_defs_tbl); 475 } 476 477 static void _dac_cal(struct rtw89_dev *rtwdev, bool force) 478 { 479 struct rtw89_dack_info *dack = &rtwdev->dack; 480 u32 rf0_0; 481 482 dack->dack_done = false; 483 484 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x2\n"); 485 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); 486 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); 487 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]RF0=0x%x\n", rf0_0); 488 489 _drck(rtwdev); 490 _dack_manual_off(rtwdev); 491 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); 492 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); 493 494 _addck(rtwdev); 495 _addck_backup(rtwdev); 496 _addck_reload(rtwdev); 497 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001); 498 499 _dack(rtwdev); 500 _new_dadck(rtwdev); 501 _dack_dump(rtwdev); 502 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); 503 504 dack->dack_done = true; 505 dack->dack_cnt++; 506 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); 507 } 508 509 static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 510 enum rtw89_rf_path path, bool is_afe) 511 { 512 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 513 514 rtw89_debug(rtwdev, RTW89_DBG_RFK, 515 "[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path, 516 chan->band_type == RTW89_BAND_2G ? "2G" : 517 chan->band_type == RTW89_BAND_5G ? "5G" : "6G", 518 chan->channel, 519 chan->band_width == RTW89_CHANNEL_WIDTH_20 ? "20M" : 520 chan->band_width == RTW89_CHANNEL_WIDTH_40 ? "40M" : "80M", 521 is_afe ? "AFE" : "RFC"); 522 } 523 524 static void _rxbb_ofst_swap(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) 525 { 526 u32 val, val_i, val_q; 527 528 val_i = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_S1); 529 val_q = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_S1); 530 531 val = val_q << 4 | val_i; 532 533 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_DIS, 0x1); 534 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, rf_mode); 535 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); 536 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_DIS, 0x0); 537 538 rtw89_debug(rtwdev, RTW89_DBG_RFK, 539 "[RX_DCK] val_i = 0x%x, val_q = 0x%x, 0x3F = 0x%x\n", 540 val_i, val_q, val); 541 } 542 543 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) 544 { 545 u32 val; 546 int ret; 547 548 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 549 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); 550 551 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 552 2, 2000, false, 553 rtwdev, path, RR_DCK, BIT(8)); 554 555 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 556 557 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish (ret = %d)\n", 558 path, ret); 559 560 _rxbb_ofst_swap(rtwdev, path, rf_mode); 561 } 562 563 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe) 564 { 565 u32 rf_reg5; 566 u8 path; 567 568 rtw89_debug(rtwdev, RTW89_DBG_RFK, 569 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", 570 0x2, rtwdev->hal.cv); 571 572 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 573 _rx_dck_info(rtwdev, phy, path, is_afe); 574 575 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 576 577 if (rtwdev->is_tssi_mode[path]) 578 rtw89_phy_write32_mask(rtwdev, 579 R_P0_TSSI_TRK + (path << 13), 580 B_P0_TSSI_TRK_EN, 0x1); 581 582 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 583 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); 584 _set_rx_dck(rtwdev, path, RF_RX); 585 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 586 587 if (rtwdev->is_tssi_mode[path]) 588 rtw89_phy_write32_mask(rtwdev, 589 R_P0_TSSI_TRK + (path << 13), 590 B_P0_TSSI_TRK_EN, 0x0); 591 } 592 } 593 594 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path) 595 { 596 u32 i; 597 598 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 599 600 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000); 601 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000); 602 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080); 603 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000); 604 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); 605 606 for (i = 0; i <= 0x9f; i++) { 607 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 608 0x00010000 + i); 609 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", 610 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI)); 611 } 612 613 for (i = 0; i <= 0x9f; i++) { 614 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 615 0x00010000 + i); 616 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", 617 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ)); 618 } 619 620 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000000); 621 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00000000); 622 } 623 624 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) 625 { 626 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 627 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); 628 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1); 629 } 630 631 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path) 632 { 633 bool fail1 = false, fail2 = false; 634 u32 val; 635 int ret; 636 637 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 638 10, 8200, false, 639 rtwdev, 0xbff8, MASKBYTE0); 640 if (ret) { 641 fail1 = true; 642 rtw89_debug(rtwdev, RTW89_DBG_RFK, 643 "[IQK]NCTL1 IQK timeout!!!\n"); 644 } 645 646 fsleep(10); 647 648 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, 649 10, 200, false, 650 rtwdev, R_RPT_COM, B_RPT_COM_RDY); 651 if (ret) { 652 fail2 = true; 653 rtw89_debug(rtwdev, RTW89_DBG_RFK, 654 "[IQK]NCTL2 IQK timeout!!!\n"); 655 } 656 657 fsleep(10); 658 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 659 660 rtw89_debug(rtwdev, RTW89_DBG_RFK, 661 "[IQK]S%x, ret = %d, notready = %x fail=%d,%d\n", 662 path, ret, fail1 || fail2, fail1, fail2); 663 664 return fail1 || fail2; 665 } 666 667 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 668 u8 path, u8 ktype) 669 { 670 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 671 bool notready; 672 u32 iqk_cmd; 673 674 switch (ktype) { 675 case ID_A_FLOK_COARSE: 676 rtw89_debug(rtwdev, RTW89_DBG_RFK, 677 "[IQK]============ S%d ID_A_FLOK_COARSE ============\n", path); 678 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 679 iqk_cmd = 0x108 | (1 << (4 + path)); 680 break; 681 case ID_G_FLOK_COARSE: 682 rtw89_debug(rtwdev, RTW89_DBG_RFK, 683 "[IQK]============ S%d ID_G_FLOK_COARSE ============\n", path); 684 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 685 iqk_cmd = 0x108 | (1 << (4 + path)); 686 break; 687 case ID_A_FLOK_FINE: 688 rtw89_debug(rtwdev, RTW89_DBG_RFK, 689 "[IQK]============ S%d ID_A_FLOK_FINE ============\n", path); 690 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 691 iqk_cmd = 0x308 | (1 << (4 + path)); 692 break; 693 case ID_G_FLOK_FINE: 694 rtw89_debug(rtwdev, RTW89_DBG_RFK, 695 "[IQK]============ S%d ID_G_FLOK_FINE ============\n", path); 696 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 697 iqk_cmd = 0x308 | (1 << (4 + path)); 698 break; 699 case ID_TXK: 700 rtw89_debug(rtwdev, RTW89_DBG_RFK, 701 "[IQK]============ S%d ID_TXK ============\n", path); 702 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 703 iqk_cmd = 0x008 | (1 << (path + 4)) | 704 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); 705 break; 706 case ID_RXAGC: 707 rtw89_debug(rtwdev, RTW89_DBG_RFK, 708 "[IQK]============ S%d ID_RXAGC ============\n", path); 709 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 710 iqk_cmd = 0x708 | (1 << (4 + path)) | (path << 1); 711 break; 712 case ID_RXK: 713 rtw89_debug(rtwdev, RTW89_DBG_RFK, 714 "[IQK]============ S%d ID_RXK ============\n", path); 715 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 716 iqk_cmd = 0x008 | (1 << (path + 4)) | 717 (((0xc + iqk_info->iqk_bw[path]) & 0xf) << 8); 718 break; 719 case ID_NBTXK: 720 rtw89_debug(rtwdev, RTW89_DBG_RFK, 721 "[IQK]============ S%d ID_NBTXK ============\n", path); 722 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 723 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 724 0x00b); 725 iqk_cmd = 0x408 | (1 << (4 + path)); 726 break; 727 case ID_NBRXK: 728 rtw89_debug(rtwdev, RTW89_DBG_RFK, 729 "[IQK]============ S%d ID_NBRXK ============\n", path); 730 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 731 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 732 0x011); 733 iqk_cmd = 0x608 | (1 << (4 + path)); 734 break; 735 default: 736 return false; 737 } 738 739 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); 740 notready = _iqk_check_cal(rtwdev, path); 741 if (iqk_info->iqk_sram_en && 742 (ktype == ID_NBRXK || ktype == ID_RXK)) 743 _iqk_sram(rtwdev, path); 744 745 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 746 rtw89_debug(rtwdev, RTW89_DBG_RFK, 747 "[IQK]S%x, ktype= %x, id = %x, notready = %x\n", 748 path, ktype, iqk_cmd + 1, notready); 749 750 return notready; 751 } 752 753 static bool _rxk_2g_group_sel(struct rtw89_dev *rtwdev, 754 enum rtw89_phy_idx phy_idx, u8 path) 755 { 756 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 757 bool kfail = false; 758 bool notready; 759 u32 rf_0; 760 u8 gp; 761 762 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 763 764 for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) { 765 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 766 767 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); 768 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]); 769 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 770 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 771 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 772 773 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 774 fsleep(10); 775 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 776 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 777 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); 778 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 779 780 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 781 782 rtw89_debug(rtwdev, RTW89_DBG_RFK, 783 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 784 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 785 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 786 787 if (gp == 0x3) { 788 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 789 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 790 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 791 iqk_info->nb_rxcfir[path] = 792 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 793 794 rtw89_debug(rtwdev, RTW89_DBG_RFK, 795 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 796 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 797 } 798 799 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 800 801 rtw89_debug(rtwdev, RTW89_DBG_RFK, 802 "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path, 803 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 804 } 805 806 if (!notready) 807 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 808 809 if (kfail) 810 _iqk_sram(rtwdev, path); 811 812 if (kfail) { 813 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 814 MASKDWORD, iqk_info->nb_rxcfir[path] | 0x2); 815 iqk_info->is_wb_txiqk[path] = false; 816 } else { 817 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 818 MASKDWORD, 0x40000000); 819 iqk_info->is_wb_txiqk[path] = true; 820 } 821 822 rtw89_debug(rtwdev, RTW89_DBG_RFK, 823 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 824 1 << path, iqk_info->nb_rxcfir[path]); 825 return kfail; 826 } 827 828 static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev, 829 enum rtw89_phy_idx phy_idx, u8 path) 830 { 831 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 832 bool kfail = false; 833 bool notready; 834 u32 rf_0; 835 u8 gp; 836 837 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 838 839 for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) { 840 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 841 842 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, 0x03ff0, a_idxrxgain[gp]); 843 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]); 844 845 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 846 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 847 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 848 849 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 850 fsleep(100); 851 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 852 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 853 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); 854 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 855 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 856 857 rtw89_debug(rtwdev, RTW89_DBG_RFK, 858 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 859 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 860 rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB)); 861 862 if (gp == 0x3) { 863 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 864 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 865 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 866 iqk_info->nb_rxcfir[path] = 867 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 868 869 rtw89_debug(rtwdev, RTW89_DBG_RFK, 870 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 871 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 872 } 873 874 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 875 876 rtw89_debug(rtwdev, RTW89_DBG_RFK, 877 "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path, 878 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 879 } 880 881 if (!notready) 882 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 883 884 if (kfail) 885 _iqk_sram(rtwdev, path); 886 887 if (kfail) { 888 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 889 iqk_info->nb_rxcfir[path] | 0x2); 890 iqk_info->is_wb_txiqk[path] = false; 891 } else { 892 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 893 0x40000000); 894 iqk_info->is_wb_txiqk[path] = true; 895 } 896 897 rtw89_debug(rtwdev, RTW89_DBG_RFK, 898 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 899 1 << path, iqk_info->nb_rxcfir[path]); 900 return kfail; 901 } 902 903 static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 904 u8 path) 905 { 906 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 907 bool kfail = false; 908 bool notready; 909 u8 gp = 0x3; 910 u32 rf_0; 911 912 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 913 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 914 915 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]); 916 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]); 917 918 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 919 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 920 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 921 922 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 923 fsleep(100); 924 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 925 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 926 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); 927 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 928 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 929 930 rtw89_debug(rtwdev, RTW89_DBG_RFK, 931 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path, 932 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 933 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 934 935 if (gp == 0x3) { 936 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 937 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 938 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 939 iqk_info->nb_rxcfir[path] = 940 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 941 942 rtw89_debug(rtwdev, RTW89_DBG_RFK, 943 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 944 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 945 } 946 947 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", 948 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 949 950 if (!notready) 951 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 952 953 if (kfail) { 954 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 955 MASKDWORD, 0x40000002); 956 iqk_info->is_wb_rxiqk[path] = false; 957 } else { 958 iqk_info->is_wb_rxiqk[path] = false; 959 } 960 961 rtw89_debug(rtwdev, RTW89_DBG_RFK, 962 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 963 1 << path, iqk_info->nb_rxcfir[path]); 964 965 return kfail; 966 } 967 968 static bool _iqk_2g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 969 u8 path) 970 { 971 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 972 bool kfail = false; 973 bool notready; 974 u8 gp = 0x3; 975 u32 rf_0; 976 977 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 978 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 979 980 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); 981 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]); 982 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 983 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); 984 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp); 985 986 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); 987 fsleep(10); 988 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 989 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 990 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); 991 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 992 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 993 994 rtw89_debug(rtwdev, RTW89_DBG_RFK, 995 "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", 996 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), 997 rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0)); 998 999 if (gp == 0x3) { 1000 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); 1001 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); 1002 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); 1003 iqk_info->nb_rxcfir[path] = 1004 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; 1005 1006 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1007 "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path, 1008 rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1009 } 1010 1011 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", 1012 path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); 1013 1014 if (!notready) 1015 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1016 1017 if (kfail) { 1018 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 1019 MASKDWORD, 0x40000002); 1020 iqk_info->is_wb_rxiqk[path] = false; 1021 } else { 1022 iqk_info->is_wb_rxiqk[path] = false; 1023 } 1024 1025 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1026 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, 1027 1 << path, iqk_info->nb_rxcfir[path]); 1028 return kfail; 1029 } 1030 1031 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) 1032 { 1033 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1034 1035 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); 1036 1037 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) 1038 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_80_defs_tbl); 1039 else 1040 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_others_defs_tbl); 1041 } 1042 1043 static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev, 1044 enum rtw89_phy_idx phy_idx, u8 path) 1045 { 1046 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1047 bool kfail = false; 1048 bool notready; 1049 u8 gp; 1050 1051 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1052 1053 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1054 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1055 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1056 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1057 1058 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1059 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1060 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1061 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1062 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1063 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1064 1065 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1066 iqk_info->nb_txcfir[path] = 1067 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1068 1069 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1070 MASKDWORD, a_itqt[gp]); 1071 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 1072 } 1073 1074 if (!notready) 1075 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1076 1077 if (kfail) { 1078 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1079 MASKDWORD, iqk_info->nb_txcfir[path] | 0x2); 1080 iqk_info->is_wb_txiqk[path] = false; 1081 } else { 1082 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1083 MASKDWORD, 0x40000000); 1084 iqk_info->is_wb_txiqk[path] = true; 1085 } 1086 1087 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1088 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1089 1 << path, iqk_info->nb_txcfir[path]); 1090 return kfail; 1091 } 1092 1093 static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev, 1094 enum rtw89_phy_idx phy_idx, u8 path) 1095 { 1096 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1097 bool kfail = false; 1098 bool notready; 1099 u8 gp; 1100 1101 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1102 1103 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1104 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1105 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1106 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); 1107 1108 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]); 1109 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1110 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1111 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1112 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1113 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1114 1115 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1116 iqk_info->nb_txcfir[path] = 1117 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1118 1119 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1120 MASKDWORD, g_itqt[gp]); 1121 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 1122 } 1123 1124 if (!notready) 1125 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1126 1127 if (kfail) { 1128 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1129 MASKDWORD, iqk_info->nb_txcfir[path] | 0x2); 1130 iqk_info->is_wb_txiqk[path] = false; 1131 } else { 1132 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1133 MASKDWORD, 0x40000000); 1134 iqk_info->is_wb_txiqk[path] = true; 1135 } 1136 1137 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1138 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1139 1 << path, iqk_info->nb_txcfir[path]); 1140 return kfail; 1141 } 1142 1143 static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1144 u8 path) 1145 { 1146 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1147 bool kfail = false; 1148 bool notready; 1149 u8 gp; 1150 1151 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1152 1153 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1154 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1155 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1156 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1157 1158 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1159 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1160 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1161 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1162 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1163 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1164 1165 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1166 iqk_info->nb_txcfir[path] = 1167 rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1168 } 1169 1170 if (!notready) 1171 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1172 1173 if (kfail) { 1174 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1175 MASKDWORD, 0x40000002); 1176 iqk_info->is_wb_rxiqk[path] = false; 1177 } else { 1178 iqk_info->is_wb_rxiqk[path] = false; 1179 } 1180 1181 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1182 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1183 1 << path, iqk_info->nb_txcfir[path]); 1184 return kfail; 1185 } 1186 1187 static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1188 u8 path) 1189 { 1190 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1191 bool kfail = false; 1192 bool notready; 1193 u8 gp; 1194 1195 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1196 1197 for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1198 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1199 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1200 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); 1201 1202 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]); 1203 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1204 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1205 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1206 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1207 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1208 1209 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1210 iqk_info->nb_txcfir[path] = 1211 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), 1212 MASKDWORD) | 0x2; 1213 } 1214 1215 if (!notready) 1216 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1217 1218 if (kfail) { 1219 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), 1220 MASKDWORD, 0x40000002); 1221 iqk_info->is_wb_rxiqk[path] = false; 1222 } else { 1223 iqk_info->is_wb_rxiqk[path] = false; 1224 } 1225 1226 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1227 "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail, 1228 1 << path, iqk_info->nb_txcfir[path]); 1229 return kfail; 1230 } 1231 1232 static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1233 u8 path) 1234 { 1235 static const u32 g_txbb[RTW8851B_LOK_GRAM] = { 1236 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1237 static const u32 g_itqt[RTW8851B_LOK_GRAM] = { 1238 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b}; 1239 static const u32 g_wa[RTW8851B_LOK_GRAM] = { 1240 0x00, 0x04, 0x08, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1241 bool fail = false; 1242 u8 i; 1243 1244 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1245 1246 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); 1247 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0); 1248 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x6); 1249 1250 for (i = 0; i < RTW8851B_LOK_GRAM; i++) { 1251 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, g_txbb[i]); 1252 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, g_wa[i]); 1253 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 1254 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]); 1255 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1256 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1257 0x00000109 | (1 << (4 + path))); 1258 fail |= _iqk_check_cal(rtwdev, path); 1259 1260 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1261 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]); 1262 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1263 0x00000309 | (1 << (4 + path))); 1264 fail |= _iqk_check_cal(rtwdev, path); 1265 1266 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1267 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 1268 1269 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1270 "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i, 1271 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000), 1272 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0)); 1273 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1274 "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i, 1275 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000), 1276 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0)); 1277 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1278 "[IQK]S0, i = %x, 0x58 = %x\n", i, 1279 rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK)); 1280 } 1281 1282 return fail; 1283 } 1284 1285 static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1286 u8 path) 1287 { 1288 static const u32 a_txbb[RTW8851B_LOK_GRAM] = { 1289 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; 1290 static const u32 a_itqt[RTW8851B_LOK_GRAM] = { 1291 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b, 0x1b, 0x1b, 0x1b}; 1292 static const u32 a_wa[RTW8851B_LOK_GRAM] = { 1293 0x80, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x97}; 1294 bool fail = false; 1295 u8 i; 1296 1297 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1298 1299 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); 1300 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0); 1301 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x7); 1302 1303 for (i = 0; i < RTW8851B_LOK_GRAM; i++) { 1304 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, a_txbb[i]); 1305 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, a_wa[i]); 1306 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1); 1307 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]); 1308 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1309 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1310 0x00000109 | (1 << (4 + path))); 1311 fail |= _iqk_check_cal(rtwdev, path); 1312 1313 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1314 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]); 1315 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); 1316 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1317 0x00000309 | (1 << (4 + path))); 1318 fail |= _iqk_check_cal(rtwdev, path); 1319 1320 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1321 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 1322 1323 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1324 "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i, 1325 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000), 1326 rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0)); 1327 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1328 "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i, 1329 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000), 1330 rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0)); 1331 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1332 "[IQK]S0, i = %x, 0x58 = %x\n", i, 1333 rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK)); 1334 } 1335 1336 return fail; 1337 } 1338 1339 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) 1340 { 1341 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1342 1343 switch (iqk_info->iqk_band[path]) { 1344 case RTW89_BAND_2G: 1345 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_2G\n"); 1346 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_2ghz_defs_tbl); 1347 break; 1348 case RTW89_BAND_5G: 1349 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_5G\n"); 1350 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_5ghz_defs_tbl); 1351 break; 1352 default: 1353 break; 1354 } 1355 } 1356 1357 #define IQK_LOK_RETRY 1 1358 1359 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1360 u8 path) 1361 { 1362 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1363 bool lok_is_fail; 1364 u8 i; 1365 1366 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1367 1368 for (i = 0; i < IQK_LOK_RETRY; i++) { 1369 _iqk_txk_setting(rtwdev, path); 1370 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1371 lok_is_fail = _iqk_2g_lok(rtwdev, phy_idx, path); 1372 else 1373 lok_is_fail = _iqk_5g_lok(rtwdev, phy_idx, path); 1374 1375 if (!lok_is_fail) 1376 break; 1377 } 1378 1379 if (iqk_info->is_nbiqk) { 1380 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1381 iqk_info->iqk_tx_fail[0][path] = 1382 _iqk_2g_nbtxk(rtwdev, phy_idx, path); 1383 else 1384 iqk_info->iqk_tx_fail[0][path] = 1385 _iqk_5g_nbtxk(rtwdev, phy_idx, path); 1386 } else { 1387 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1388 iqk_info->iqk_tx_fail[0][path] = 1389 _txk_2g_group_sel(rtwdev, phy_idx, path); 1390 else 1391 iqk_info->iqk_tx_fail[0][path] = 1392 _txk_5g_group_sel(rtwdev, phy_idx, path); 1393 } 1394 1395 _iqk_rxclk_setting(rtwdev, path); 1396 _iqk_rxk_setting(rtwdev, path); 1397 _adc_fifo_rst(rtwdev, phy_idx, path); 1398 1399 if (iqk_info->is_nbiqk) { 1400 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1401 iqk_info->iqk_rx_fail[0][path] = 1402 _iqk_2g_nbrxk(rtwdev, phy_idx, path); 1403 else 1404 iqk_info->iqk_rx_fail[0][path] = 1405 _iqk_5g_nbrxk(rtwdev, phy_idx, path); 1406 } else { 1407 if (iqk_info->iqk_band[path] == RTW89_BAND_2G) 1408 iqk_info->iqk_rx_fail[0][path] = 1409 _rxk_2g_group_sel(rtwdev, phy_idx, path); 1410 else 1411 iqk_info->iqk_rx_fail[0][path] = 1412 _rxk_5g_group_sel(rtwdev, phy_idx, path); 1413 } 1414 } 1415 1416 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, 1417 u32 backup_bb_reg_val[]) 1418 { 1419 u32 i; 1420 1421 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 1422 backup_bb_reg_val[i] = 1423 rtw89_phy_read32_mask(rtwdev, rtw8851b_backup_bb_regs[i], 1424 MASKDWORD); 1425 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1426 "[RFK]backup bb reg : %x, value =%x\n", 1427 rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); 1428 } 1429 } 1430 1431 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, 1432 u32 backup_rf_reg_val[], u8 rf_path) 1433 { 1434 u32 i; 1435 1436 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 1437 backup_rf_reg_val[i] = 1438 rtw89_read_rf(rtwdev, rf_path, 1439 rtw8851b_backup_rf_regs[i], RFREG_MASK); 1440 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1441 "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path, 1442 rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); 1443 } 1444 } 1445 1446 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, 1447 const u32 backup_bb_reg_val[]) 1448 { 1449 u32 i; 1450 1451 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 1452 rtw89_phy_write32_mask(rtwdev, rtw8851b_backup_bb_regs[i], 1453 MASKDWORD, backup_bb_reg_val[i]); 1454 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1455 "[RFK]restore bb reg : %x, value =%x\n", 1456 rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); 1457 } 1458 } 1459 1460 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, 1461 const u32 backup_rf_reg_val[], u8 rf_path) 1462 { 1463 u32 i; 1464 1465 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 1466 rtw89_write_rf(rtwdev, rf_path, rtw8851b_backup_rf_regs[i], 1467 RFREG_MASK, backup_rf_reg_val[i]); 1468 1469 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1470 "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path, 1471 rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); 1472 } 1473 } 1474 1475 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1476 u8 path) 1477 { 1478 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1479 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1480 u8 idx = 0; 1481 1482 iqk_info->iqk_band[path] = chan->band_type; 1483 iqk_info->iqk_bw[path] = chan->band_width; 1484 iqk_info->iqk_ch[path] = chan->channel; 1485 iqk_info->iqk_table_idx[path] = idx; 1486 1487 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", 1488 path, phy, rtwdev->dbcc_en ? "on" : "off", 1489 iqk_info->iqk_band[path] == 0 ? "2G" : 1490 iqk_info->iqk_band[path] == 1 ? "5G" : "6G", 1491 iqk_info->iqk_ch[path], 1492 iqk_info->iqk_bw[path] == 0 ? "20M" : 1493 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); 1494 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n", 1495 iqk_info->iqk_times, idx); 1496 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", 1497 path, iqk_info->syn1to2); 1498 } 1499 1500 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1501 u8 path) 1502 { 1503 _iqk_by_path(rtwdev, phy_idx, path); 1504 } 1505 1506 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) 1507 { 1508 bool fail; 1509 1510 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1511 1512 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00001219); 1513 fsleep(10); 1514 fail = _iqk_check_cal(rtwdev, path); 1515 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail=%d\n", fail); 1516 1517 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1518 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 1519 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 1520 } 1521 1522 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, 1523 enum rtw89_phy_idx phy_idx, u8 path) 1524 { 1525 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_afebb_restore_defs_tbl); 1526 } 1527 1528 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) 1529 { 1530 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1531 1532 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 1533 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 1534 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); 1535 } 1536 1537 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, 1538 enum rtw89_phy_idx phy_idx, u8 path) 1539 { 1540 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1541 1542 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_defs_tbl); 1543 } 1544 1545 static void _iqk_init(struct rtw89_dev *rtwdev) 1546 { 1547 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1548 u8 idx, path; 1549 1550 rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0); 1551 1552 if (iqk_info->is_iqk_init) 1553 return; 1554 1555 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1556 1557 iqk_info->is_iqk_init = true; 1558 iqk_info->is_nbiqk = false; 1559 iqk_info->iqk_fft_en = false; 1560 iqk_info->iqk_sram_en = false; 1561 iqk_info->iqk_cfir_en = false; 1562 iqk_info->iqk_xym_en = false; 1563 iqk_info->thermal_rek_en = false; 1564 iqk_info->iqk_times = 0x0; 1565 1566 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { 1567 iqk_info->iqk_channel[idx] = 0x0; 1568 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 1569 iqk_info->lok_cor_fail[idx][path] = false; 1570 iqk_info->lok_fin_fail[idx][path] = false; 1571 iqk_info->iqk_tx_fail[idx][path] = false; 1572 iqk_info->iqk_rx_fail[idx][path] = false; 1573 iqk_info->iqk_table_idx[path] = 0x0; 1574 } 1575 } 1576 } 1577 1578 static void _doiqk(struct rtw89_dev *rtwdev, bool force, 1579 enum rtw89_phy_idx phy_idx, u8 path) 1580 { 1581 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1582 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); 1583 u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR]; 1584 u32 backup_bb_val[BACKUP_BB_REGS_NR]; 1585 1586 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, 1587 BTC_WRFK_ONESHOT_START); 1588 1589 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1590 "[IQK]==========IQK strat!!!!!==========\n"); 1591 iqk_info->iqk_times++; 1592 iqk_info->kcount = 0; 1593 iqk_info->version = RTW8851B_IQK_VER; 1594 1595 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); 1596 _iqk_get_ch_info(rtwdev, phy_idx, path); 1597 1598 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); 1599 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1600 _iqk_macbb_setting(rtwdev, phy_idx, path); 1601 _iqk_preset(rtwdev, path); 1602 _iqk_start_iqk(rtwdev, phy_idx, path); 1603 _iqk_restore(rtwdev, path); 1604 _iqk_afebb_restore(rtwdev, phy_idx, path); 1605 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); 1606 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); 1607 1608 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, 1609 BTC_WRFK_ONESHOT_STOP); 1610 } 1611 1612 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) 1613 { 1614 _doiqk(rtwdev, force, phy_idx, RF_PATH_A); 1615 } 1616 1617 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg, 1618 u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) 1619 { 1620 u8 i; 1621 1622 for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { 1623 reg_bkup[path][i] = 1624 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); 1625 1626 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", 1627 reg[i] + (path << 8), reg_bkup[path][i]); 1628 } 1629 } 1630 1631 static void _dpk_bkup_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, 1632 u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) 1633 { 1634 u8 i; 1635 1636 for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { 1637 rf_bkup[path][i] = rtw89_read_rf(rtwdev, path, rf_reg[i], RFREG_MASK); 1638 1639 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n", 1640 path, rf_reg[i], rf_bkup[path][i]); 1641 } 1642 } 1643 1644 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 *reg, 1645 u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) 1646 { 1647 u8 i; 1648 1649 for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { 1650 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD, 1651 reg_bkup[path][i]); 1652 1653 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1654 "[DPK] Reload 0x%x = %x\n", 1655 reg[i] + (path << 8), reg_bkup[path][i]); 1656 } 1657 } 1658 1659 static void _dpk_reload_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, 1660 u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) 1661 { 1662 u8 i; 1663 1664 for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { 1665 rtw89_write_rf(rtwdev, path, rf_reg[i], RFREG_MASK, rf_bkup[path][i]); 1666 1667 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1668 "[DPK] Reload RF S%d 0x%x = %x\n", path, 1669 rf_reg[i], rf_bkup[path][i]); 1670 } 1671 } 1672 1673 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1674 enum rtw89_rf_path path, enum dpk_id id) 1675 { 1676 u16 dpk_cmd; 1677 u32 val; 1678 int ret; 1679 1680 dpk_cmd = ((id << 8) | (0x19 + path * 0x12)); 1681 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); 1682 1683 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1684 10, 20000, false, 1685 rtwdev, 0xbff8, MASKBYTE0); 1686 if (ret) 1687 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 timeout\n"); 1688 1689 udelay(1); 1690 1691 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, 1692 1, 2000, false, 1693 rtwdev, R_RPT_COM, MASKLWORD); 1694 if (ret) 1695 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 timeout\n"); 1696 1697 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); 1698 1699 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1700 "[DPK] one-shot for %s = 0x%04x\n", 1701 id == 0x28 ? "KIP_PRESET" : 1702 id == 0x29 ? "DPK_TXAGC" : 1703 id == 0x2a ? "DPK_RXAGC" : 1704 id == 0x2b ? "SYNC" : 1705 id == 0x2c ? "GAIN_LOSS" : 1706 id == 0x2d ? "MDPK_IDL" : 1707 id == 0x2f ? "DPK_GAIN_NORM" : 1708 id == 0x31 ? "KIP_RESTORE" : 1709 id == 0x6 ? "LBK_RXIQK" : "Unknown id", 1710 dpk_cmd); 1711 } 1712 1713 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1714 bool off) 1715 { 1716 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1717 u8 kidx = dpk->cur_idx[path]; 1718 u8 off_reverse = off ? 0 : 1; 1719 u8 val; 1720 1721 val = dpk->is_dpk_enable * off_reverse * dpk->bp[path][kidx].path_ok; 1722 1723 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 1724 0xf0000000, val); 1725 1726 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, 1727 kidx, val == 0 ? "disable" : "enable"); 1728 } 1729 1730 static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1731 { 1732 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1733 1734 u8 kidx = dpk->cur_idx[path]; 1735 1736 dpk->bp[path][kidx].path_ok = 0; 1737 } 1738 1739 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1740 enum rtw89_rf_path path) 1741 { 1742 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1743 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1744 1745 u8 kidx = dpk->cur_idx[path]; 1746 1747 dpk->bp[path][kidx].band = chan->band_type; 1748 dpk->bp[path][kidx].ch = chan->band_width; 1749 dpk->bp[path][kidx].bw = chan->channel; 1750 1751 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1752 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", 1753 path, dpk->cur_idx[path], phy, 1754 rtwdev->is_tssi_mode[path] ? "on" : "off", 1755 rtwdev->dbcc_en ? "on" : "off", 1756 dpk->bp[path][kidx].band == 0 ? "2G" : 1757 dpk->bp[path][kidx].band == 1 ? "5G" : "6G", 1758 dpk->bp[path][kidx].ch, 1759 dpk->bp[path][kidx].bw == 0 ? "20M" : 1760 dpk->bp[path][kidx].bw == 1 ? "40M" : 1761 dpk->bp[path][kidx].bw == 2 ? "80M" : "160M"); 1762 } 1763 1764 static void _dpk_rxagc_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1765 bool turn_on) 1766 { 1767 if (path == RF_PATH_A) 1768 rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, turn_on); 1769 else 1770 rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, turn_on); 1771 1772 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXAGC is %s\n", path, 1773 turn_on ? "turn_on" : "turn_off"); 1774 } 1775 1776 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1777 { 1778 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x1); 1779 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x0); 1780 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x1); 1781 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x0); 1782 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd); 1783 1784 rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_bb_afe_defs_tbl); 1785 1786 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x1); 1787 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x1); 1788 1789 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path); 1790 } 1791 1792 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1793 { 1794 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x0); 1795 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x1); 1796 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x0); 1797 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x1); 1798 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x0); 1799 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000); 1800 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00); 1801 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), 0x0); 1802 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), 0x0); 1803 1804 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path); 1805 } 1806 1807 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 1808 bool is_pause) 1809 { 1810 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), 1811 B_P0_TSSI_TRK_EN, is_pause); 1812 1813 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, 1814 is_pause ? "pause" : "resume"); 1815 } 1816 1817 static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1818 { 1819 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1820 1821 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) { 1822 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); 1823 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00); 1824 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) { 1825 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); 1826 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0); 1827 } else { 1828 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); 1829 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0); 1830 } 1831 1832 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG Select for %s\n", 1833 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : 1834 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); 1835 } 1836 1837 static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, 1838 enum rtw89_rf_path path, bool force) 1839 { 1840 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force); 1841 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force); 1842 1843 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n", 1844 path, force ? "on" : "off"); 1845 } 1846 1847 static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on) 1848 { 1849 if (turn_on) { 1850 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 1851 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); 1852 } else { 1853 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 1854 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 1855 rtw89_phy_write32_mask(rtwdev, R_DPK_WR, BIT(18), 0x1); 1856 } 1857 } 1858 1859 static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, 1860 enum rtw89_rf_path path, bool ctrl_by_kip) 1861 { 1862 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), 1863 B_IQK_RFC_ON, ctrl_by_kip); 1864 1865 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n", 1866 ctrl_by_kip ? "KIP" : "BB"); 1867 } 1868 1869 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1870 enum rtw89_rf_path path, u8 kidx) 1871 { 1872 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 1873 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 1874 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 1875 B_DPD_SEL, 0x01); 1876 1877 _dpk_kip_control_rfc(rtwdev, path, true); 1878 _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET); 1879 } 1880 1881 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1882 enum rtw89_rf_path path) 1883 { 1884 _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE); 1885 _dpk_kip_control_rfc(rtwdev, path, false); 1886 _dpk_txpwr_bb_force(rtwdev, path, false); 1887 1888 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); 1889 } 1890 1891 static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1892 { 1893 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1894 1895 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10); 1896 1897 dpk->cur_k_set = 1898 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_KSET) - 1; 1899 } 1900 1901 static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1902 { 1903 static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { 1904 {0x8190, 0x8194, 0x8198, 0x81a4}, 1905 {0x81a8, 0x81c4, 0x81c8, 0x81e8} 1906 }; 1907 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1908 u8 cur_k_set = dpk->cur_k_set; 1909 u32 para; 1910 1911 if (cur_k_set >= DPK_KSET_NUM) { 1912 rtw89_warn(rtwdev, "DPK cur_k_set = %d\n", cur_k_set); 1913 cur_k_set = 2; 1914 } 1915 1916 para = rtw89_phy_read32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 1917 MASKDWORD); 1918 1919 dpk->bp[path][kidx].txagc_dpk = (para >> 10) & 0x3f; 1920 dpk->bp[path][kidx].ther_dpk = (para >> 26) & 0x3f; 1921 1922 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1923 "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n", 1924 dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, 1925 dpk->bp[path][kidx].txagc_dpk); 1926 } 1927 1928 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1929 { 1930 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1931 u8 corr_val, corr_idx, rxbb; 1932 u16 dc_i, dc_q; 1933 u8 rxbb_ov; 1934 1935 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 1936 1937 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); 1938 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); 1939 dpk->corr_idx[path][kidx] = corr_idx; 1940 dpk->corr_val[path][kidx] = corr_val; 1941 1942 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); 1943 1944 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 1945 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); 1946 1947 dc_i = abs(sign_extend32(dc_i, 11)); 1948 dc_q = abs(sign_extend32(dc_q, 11)); 1949 1950 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1951 "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n", 1952 path, corr_idx, corr_val, dc_i, dc_q); 1953 1954 dpk->dc_i[path][kidx] = dc_i; 1955 dpk->dc_q[path][kidx] = dc_q; 1956 1957 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8); 1958 rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB); 1959 1960 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31); 1961 rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV); 1962 1963 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1964 "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n", 1965 path, rxbb, 1966 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE), 1967 rxbb_ov); 1968 1969 if (dc_i > 200 || dc_q > 200 || corr_val < 170) 1970 return true; 1971 else 1972 return false; 1973 } 1974 1975 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1976 enum rtw89_rf_path path, u8 dbm, 1977 bool set_from_bb) 1978 { 1979 if (set_from_bb) { 1980 dbm = clamp_t(u8, dbm, 7, 24); 1981 1982 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1983 "[DPK] set S%d txagc to %ddBm\n", path, dbm); 1984 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), 1985 B_TXPWRB_VAL, dbm << 2); 1986 } 1987 1988 _dpk_one_shot(rtwdev, phy, path, D_TXAGC); 1989 _dpk_kset_query(rtwdev, path); 1990 } 1991 1992 static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1993 enum rtw89_rf_path path, u8 kidx) 1994 { 1995 _dpk_kip_control_rfc(rtwdev, path, false); 1996 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 1997 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 1998 _dpk_kip_control_rfc(rtwdev, path, true); 1999 2000 _dpk_one_shot(rtwdev, phy, path, D_RXAGC); 2001 return _dpk_sync_check(rtwdev, path, kidx); 2002 } 2003 2004 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2005 enum rtw89_rf_path path) 2006 { 2007 u32 rf_11, reg_81cc; 2008 u8 cur_rxbb; 2009 2010 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 2011 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); 2012 2013 _dpk_kip_control_rfc(rtwdev, path, false); 2014 2015 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB); 2016 rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK); 2017 reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8), 2018 B_KIP_IQP_SW); 2019 2020 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 2021 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3); 2022 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd); 2023 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, 0x1f); 2024 2025 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12); 2026 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3); 2027 2028 _dpk_kip_control_rfc(rtwdev, path, true); 2029 2030 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, 0x00250025); 2031 2032 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); 2033 2034 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, 2035 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); 2036 2037 _dpk_kip_control_rfc(rtwdev, path, false); 2038 2039 rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11); 2040 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, cur_rxbb); 2041 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc); 2042 2043 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); 2044 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); 2045 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); 2046 2047 _dpk_kip_control_rfc(rtwdev, path, true); 2048 } 2049 2050 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 2051 { 2052 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2053 2054 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { 2055 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50521); 2056 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 2057 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0); 2058 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x7); 2059 } else { 2060 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 2061 0x50521 | BIT(rtwdev->dbcc_en)); 2062 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 2063 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SATT, 0x3); 2064 } 2065 2066 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); 2067 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); 2068 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); 2069 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0); 2070 } 2071 2072 static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2073 { 2074 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 2075 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002); 2076 2077 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n"); 2078 } 2079 2080 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) 2081 { 2082 u16 dgain; 2083 2084 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 2085 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 2086 2087 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain); 2088 2089 return dgain; 2090 } 2091 2092 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) 2093 { 2094 u8 result; 2095 2096 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); 2097 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); 2098 result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); 2099 2100 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result); 2101 2102 return result; 2103 } 2104 2105 static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2106 enum rtw89_rf_path path, u8 kidx) 2107 { 2108 _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS); 2109 _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false); 2110 2111 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0xf078); 2112 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0); 2113 2114 return _dpk_gainloss_read(rtwdev); 2115 } 2116 2117 static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, u8 is_check) 2118 { 2119 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; 2120 u32 val1_sqrt_sum, val2_sqrt_sum; 2121 u8 i; 2122 2123 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); 2124 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); 2125 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); 2126 2127 if (is_check) { 2128 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); 2129 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2130 val1_i = abs(sign_extend32(val1_i, 11)); 2131 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2132 val1_q = abs(sign_extend32(val1_q, 11)); 2133 2134 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); 2135 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2136 val2_i = abs(sign_extend32(val2_i, 11)); 2137 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2138 val2_q = abs(sign_extend32(val2_q, 11)); 2139 2140 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", 2141 phy_div(val1_i * val1_i + val1_q * val1_q, 2142 val2_i * val2_i + val2_q * val2_q)); 2143 } else { 2144 for (i = 0; i < 32; i++) { 2145 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); 2146 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2147 "[DPK] PAS_Read[%02d]= 0x%08x\n", i, 2148 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); 2149 } 2150 } 2151 2152 val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q; 2153 val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q; 2154 2155 if (val1_sqrt_sum < val2_sqrt_sum) 2156 return 2; 2157 else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5) 2158 return 1; 2159 else 2160 return 0; 2161 } 2162 2163 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2164 enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only) 2165 { 2166 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2167 u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0; 2168 u8 step = DPK_AGC_STEP_SYNC_DGAIN; 2169 u8 goout = 0, agc_cnt = 0; 2170 bool is_fail = false; 2171 int limit = 200; 2172 u8 tmp_rxbb; 2173 u16 dgain; 2174 2175 do { 2176 switch (step) { 2177 case DPK_AGC_STEP_SYNC_DGAIN: 2178 is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx); 2179 2180 if (is_fail) { 2181 goout = 1; 2182 break; 2183 } 2184 2185 dgain = _dpk_dgain_read(rtwdev); 2186 2187 if (dgain > 0x5fc || dgain < 0x556) { 2188 _dpk_one_shot(rtwdev, phy, path, D_SYNC); 2189 dgain = _dpk_dgain_read(rtwdev); 2190 } 2191 2192 if (agc_cnt == 0) { 2193 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) 2194 _dpk_bypass_rxiqc(rtwdev, path); 2195 else 2196 _dpk_lbk_rxiqk(rtwdev, phy, path); 2197 } 2198 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2199 break; 2200 2201 case DPK_AGC_STEP_GAIN_LOSS_IDX: 2202 tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx); 2203 2204 if (_dpk_pas_read(rtwdev, true) == 2 && tmp_gl_idx > 0) 2205 step = DPK_AGC_STEP_GL_LT_CRITERION; 2206 else if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true) == 1) || 2207 tmp_gl_idx >= 7) 2208 step = DPK_AGC_STEP_GL_GT_CRITERION; 2209 else if (tmp_gl_idx == 0) 2210 step = DPK_AGC_STEP_GL_LT_CRITERION; 2211 else 2212 step = DPK_AGC_STEP_SET_TX_GAIN; 2213 break; 2214 2215 case DPK_AGC_STEP_GL_GT_CRITERION: 2216 if (tmp_dbm <= 7) { 2217 goout = 1; 2218 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2219 "[DPK] Txagc@lower bound!!\n"); 2220 } else { 2221 tmp_dbm = max_t(u8, tmp_dbm - 3, 7); 2222 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2223 } 2224 step = DPK_AGC_STEP_SYNC_DGAIN; 2225 agc_cnt++; 2226 break; 2227 2228 case DPK_AGC_STEP_GL_LT_CRITERION: 2229 if (tmp_dbm >= 24) { 2230 goout = 1; 2231 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2232 "[DPK] Txagc@upper bound!!\n"); 2233 } else { 2234 tmp_dbm = min_t(u8, tmp_dbm + 2, 24); 2235 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2236 } 2237 step = DPK_AGC_STEP_SYNC_DGAIN; 2238 agc_cnt++; 2239 break; 2240 2241 case DPK_AGC_STEP_SET_TX_GAIN: 2242 _dpk_kip_control_rfc(rtwdev, path, false); 2243 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB); 2244 tmp_rxbb = min_t(u8, tmp_rxbb + tmp_gl_idx, 0x1f); 2245 2246 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RXB, tmp_rxbb); 2247 2248 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2249 "[DPK] Adjust RXBB (%+d) = 0x%x\n", 2250 tmp_gl_idx, tmp_rxbb); 2251 _dpk_kip_control_rfc(rtwdev, path, true); 2252 goout = 1; 2253 break; 2254 default: 2255 goout = 1; 2256 break; 2257 } 2258 } while (!goout && agc_cnt < 6 && limit-- > 0); 2259 2260 return is_fail; 2261 } 2262 2263 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) 2264 { 2265 switch (order) { 2266 case 0: /* (5,3,1) */ 2267 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x0); 2268 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x2); 2269 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x4); 2270 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x1); 2271 break; 2272 case 1: /* (5,3,0) */ 2273 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x1); 2274 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x1); 2275 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); 2276 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x0); 2277 break; 2278 case 2: /* (5,0,0) */ 2279 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x2); 2280 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x0); 2281 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); 2282 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x0); 2283 break; 2284 case 3: /* (7,3,1) */ 2285 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x3); 2286 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x3); 2287 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x4); 2288 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x1); 2289 break; 2290 default: 2291 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2292 "[DPK] Wrong MDPD order!!(0x%x)\n", order); 2293 break; 2294 } 2295 2296 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", 2297 order == 0x0 ? "(5,3,1)" : 2298 order == 0x1 ? "(5,3,0)" : 2299 order == 0x2 ? "(5,0,0)" : "(7,3,1)"); 2300 } 2301 2302 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2303 enum rtw89_rf_path path, u8 kidx) 2304 { 2305 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1); 2306 2307 if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD500) == 0x1) 2308 _dpk_set_mdpd_para(rtwdev, 0x2); 2309 else if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD530) == 0x1) 2310 _dpk_set_mdpd_para(rtwdev, 0x1); 2311 else 2312 _dpk_set_mdpd_para(rtwdev, 0x0); 2313 2314 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0); 2315 fsleep(1000); 2316 2317 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); 2318 } 2319 2320 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) 2321 { 2322 u32 order; 2323 u8 val; 2324 2325 order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); 2326 2327 switch (order) { 2328 case 0: /* (5,3,1) */ 2329 val = 0x6; 2330 break; 2331 case 1: /* (5,3,0) */ 2332 val = 0x2; 2333 break; 2334 case 2: /* (5,0,0) */ 2335 val = 0x0; 2336 break; 2337 default: 2338 val = 0xff; 2339 break; 2340 } 2341 2342 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val); 2343 2344 return val; 2345 } 2346 2347 static void _dpk_gain_normalize(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2348 enum rtw89_rf_path path, u8 kidx, bool is_execute) 2349 { 2350 static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { 2351 {0x8190, 0x8194, 0x8198, 0x81a4}, 2352 {0x81a8, 0x81c4, 0x81c8, 0x81e8} 2353 }; 2354 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2355 u8 cur_k_set = dpk->cur_k_set; 2356 2357 if (cur_k_set >= DPK_KSET_NUM) { 2358 rtw89_warn(rtwdev, "DPK cur_k_set = %d\n", cur_k_set); 2359 cur_k_set = 2; 2360 } 2361 2362 if (is_execute) { 2363 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), 2364 B_DPK_GN_AG, 0x200); 2365 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), 2366 B_DPK_GN_EN, 0x3); 2367 2368 _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM); 2369 } else { 2370 rtw89_phy_write32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 2371 0x0000007F, 0x5b); 2372 } 2373 2374 dpk->bp[path][kidx].gs = 2375 rtw89_phy_read32_mask(rtwdev, reg[kidx][cur_k_set] + (path << 8), 2376 0x0000007F); 2377 } 2378 2379 static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2380 enum rtw89_rf_path path, u8 kidx) 2381 { 2382 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2383 2384 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); 2385 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); 2386 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2387 B_DPD_ORDER, _dpk_order_convert(rtwdev)); 2388 2389 dpk->bp[path][kidx].path_ok = 2390 dpk->bp[path][kidx].path_ok | BIT(dpk->cur_k_set); 2391 2392 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n", 2393 path, kidx, dpk->bp[path][kidx].path_ok); 2394 2395 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2396 B_DPD_MEN, dpk->bp[path][kidx].path_ok); 2397 2398 _dpk_gain_normalize(rtwdev, phy, path, kidx, false); 2399 } 2400 2401 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2402 enum rtw89_rf_path path) 2403 { 2404 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2405 u8 kidx = dpk->cur_idx[path]; 2406 u8 init_xdbm = 17; 2407 bool is_fail; 2408 2409 if (dpk->bp[path][kidx].band != RTW89_BAND_2G) 2410 init_xdbm = 15; 2411 2412 _dpk_kip_control_rfc(rtwdev, path, false); 2413 _rfk_rf_direct_cntrl(rtwdev, path, false); 2414 rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd); 2415 2416 _dpk_rf_setting(rtwdev, path, kidx); 2417 _set_rx_dck(rtwdev, path, RF_DPK); 2418 2419 _dpk_kip_pwr_clk_onoff(rtwdev, true); 2420 _dpk_kip_preset(rtwdev, phy, path, kidx); 2421 _dpk_txpwr_bb_force(rtwdev, path, true); 2422 _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true); 2423 _dpk_tpg_sel(rtwdev, path, kidx); 2424 is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false); 2425 if (is_fail) 2426 goto _error; 2427 2428 _dpk_idl_mpa(rtwdev, phy, path, kidx); 2429 _dpk_para_query(rtwdev, path, kidx); 2430 2431 _dpk_on(rtwdev, phy, path, kidx); 2432 _error: 2433 _dpk_kip_control_rfc(rtwdev, path, false); 2434 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); 2435 2436 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx, 2437 dpk->cur_k_set, is_fail ? "need Check" : "is Success"); 2438 2439 return is_fail; 2440 } 2441 2442 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, 2443 enum rtw89_phy_idx phy, u8 kpath) 2444 { 2445 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2446 u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {}; 2447 u32 rf_bkup[RF_PATH_NUM_8851B][DPK_RF_REG_NUM_8851B] = {}; 2448 bool is_fail; 2449 u8 path; 2450 2451 for (path = 0; path < RF_PATH_NUM_8851B; path++) 2452 dpk->cur_idx[path] = 0; 2453 2454 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2455 if (!(kpath & BIT(path))) 2456 continue; 2457 _dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path); 2458 _dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path); 2459 _dpk_information(rtwdev, phy, path); 2460 _dpk_init(rtwdev, path); 2461 2462 if (rtwdev->is_tssi_mode[path]) 2463 _dpk_tssi_pause(rtwdev, path, true); 2464 } 2465 2466 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2467 if (!(kpath & BIT(path))) 2468 continue; 2469 2470 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2471 "[DPK] ========= S%d[%d] DPK Start =========\n", 2472 path, dpk->cur_idx[path]); 2473 2474 _dpk_rxagc_onoff(rtwdev, path, false); 2475 _rfk_drf_direct_cntrl(rtwdev, path, false); 2476 _dpk_bb_afe_setting(rtwdev, path); 2477 2478 is_fail = _dpk_main(rtwdev, phy, path); 2479 _dpk_onoff(rtwdev, path, is_fail); 2480 } 2481 2482 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2483 if (!(kpath & BIT(path))) 2484 continue; 2485 2486 _dpk_kip_restore(rtwdev, phy, path); 2487 _dpk_reload_kip(rtwdev, dpk_kip_reg, kip_bkup, path); 2488 _dpk_reload_rf(rtwdev, dpk_rf_reg, rf_bkup, path); 2489 _dpk_bb_afe_restore(rtwdev, path); 2490 _dpk_rxagc_onoff(rtwdev, path, true); 2491 2492 if (rtwdev->is_tssi_mode[path]) 2493 _dpk_tssi_pause(rtwdev, path, false); 2494 } 2495 2496 _dpk_kip_pwr_clk_onoff(rtwdev, false); 2497 } 2498 2499 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) 2500 { 2501 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2502 "[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n", 2503 DPK_VER_8851B, rtwdev->hal.cv); 2504 2505 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); 2506 } 2507 2508 static void _dpk_track(struct rtw89_dev *rtwdev) 2509 { 2510 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2511 s8 txagc_bb, txagc_bb_tp, txagc_ofst; 2512 s16 pwsf_tssi_ofst; 2513 s8 delta_ther = 0; 2514 u8 path, kidx; 2515 u8 txagc_rf; 2516 u8 cur_ther; 2517 2518 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 2519 kidx = dpk->cur_idx[path]; 2520 2521 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2522 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", 2523 path, kidx, dpk->bp[path][kidx].ch); 2524 2525 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2526 B_TXAGC_RF); 2527 txagc_bb = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 2528 MASKBYTE2); 2529 txagc_bb_tp = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), 2530 B_TXAGC_BTP); 2531 2532 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), 2533 B_KIP_RPT_SEL, 0xf); 2534 cur_ther = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2535 B_RPT_PER_TH); 2536 txagc_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2537 B_RPT_PER_OF); 2538 pwsf_tssi_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 2539 B_RPT_PER_TSSI); 2540 pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12); 2541 2542 delta_ther = cur_ther - dpk->bp[path][kidx].ther_dpk; 2543 2544 delta_ther = delta_ther * 2 / 3; 2545 2546 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2547 "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n", 2548 delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk); 2549 2550 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2551 "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n", 2552 txagc_rf - dpk->bp[path][kidx].txagc_dpk, 2553 txagc_rf, dpk->bp[path][kidx].txagc_dpk); 2554 2555 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2556 "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n", 2557 txagc_ofst, pwsf_tssi_ofst); 2558 2559 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2560 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", 2561 txagc_bb_tp, txagc_bb); 2562 2563 if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_DN) == 0x0 && 2564 txagc_rf != 0) { 2565 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2566 "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther); 2567 2568 rtw89_phy_write32_mask(rtwdev, 2569 R_DPD_BND + (path << 8) + (kidx << 2), 2570 0x07FC0000, 0x78 - delta_ther); 2571 } 2572 } 2573 } 2574 2575 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2576 { 2577 u32 rf_reg5; 2578 u32 rck_val; 2579 u32 val; 2580 int ret; 2581 2582 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); 2583 2584 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 2585 2586 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 2587 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 2588 2589 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n", 2590 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 2591 2592 /* RCK trigger */ 2593 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); 2594 2595 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30, 2596 false, rtwdev, path, RR_RCKS, BIT(3)); 2597 2598 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); 2599 2600 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n", 2601 rck_val, ret); 2602 2603 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); 2604 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 2605 2606 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n", 2607 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK)); 2608 } 2609 2610 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2611 enum rtw89_rf_path path) 2612 { 2613 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2614 enum rtw89_band band = chan->band_type; 2615 2616 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl); 2617 2618 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2619 &rtw8851b_tssi_sys_a_defs_2g_tbl, 2620 &rtw8851b_tssi_sys_a_defs_5g_tbl); 2621 } 2622 2623 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, 2624 enum rtw89_phy_idx phy, 2625 enum rtw89_rf_path path) 2626 { 2627 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_init_txpwr_defs_a_tbl); 2628 } 2629 2630 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, 2631 enum rtw89_phy_idx phy, 2632 enum rtw89_rf_path path) 2633 { 2634 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_init_txpwr_he_tb_defs_a_tbl); 2635 } 2636 2637 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2638 enum rtw89_rf_path path) 2639 { 2640 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_dck_defs_a_tbl); 2641 } 2642 2643 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2644 enum rtw89_rf_path path) 2645 { 2646 #define RTW8851B_TSSI_GET_VAL(ptr, idx) \ 2647 ({ \ 2648 s8 *__ptr = (ptr); \ 2649 u8 __idx = (idx), __i, __v; \ 2650 u32 __val = 0; \ 2651 for (__i = 0; __i < 4; __i++) { \ 2652 __v = (__ptr[__idx + __i]); \ 2653 __val |= (__v << (8 * __i)); \ 2654 } \ 2655 __val; \ 2656 }) 2657 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2658 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2659 u8 ch = chan->channel; 2660 u8 subband = chan->subband_type; 2661 const s8 *thm_up_a = NULL; 2662 const s8 *thm_down_a = NULL; 2663 u8 thermal = 0xff; 2664 s8 thm_ofst[64] = {0}; 2665 u32 tmp = 0; 2666 u8 i, j; 2667 2668 switch (subband) { 2669 default: 2670 case RTW89_CH_2G: 2671 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_p; 2672 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_n; 2673 break; 2674 case RTW89_CH_5G_BAND_1: 2675 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[0]; 2676 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[0]; 2677 break; 2678 case RTW89_CH_5G_BAND_3: 2679 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[1]; 2680 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[1]; 2681 break; 2682 case RTW89_CH_5G_BAND_4: 2683 thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[2]; 2684 thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[2]; 2685 break; 2686 } 2687 2688 if (path == RF_PATH_A) { 2689 thermal = tssi_info->thermal[RF_PATH_A]; 2690 2691 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2692 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); 2693 2694 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); 2695 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); 2696 2697 if (thermal == 0xff) { 2698 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); 2699 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); 2700 2701 for (i = 0; i < 64; i += 4) { 2702 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); 2703 2704 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2705 "[TSSI] write 0x%x val=0x%08x\n", 2706 R_P0_TSSI_BASE + i, 0x0); 2707 } 2708 2709 } else { 2710 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 2711 thermal); 2712 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 2713 thermal); 2714 2715 i = 0; 2716 for (j = 0; j < 32; j++) 2717 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2718 -thm_down_a[i++] : 2719 -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; 2720 2721 i = 1; 2722 for (j = 63; j >= 32; j--) 2723 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2724 thm_up_a[i++] : 2725 thm_up_a[DELTA_SWINGIDX_SIZE - 1]; 2726 2727 for (i = 0; i < 64; i += 4) { 2728 tmp = RTW8851B_TSSI_GET_VAL(thm_ofst, i); 2729 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); 2730 2731 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2732 "[TSSI] write 0x%x val=0x%08x\n", 2733 0x5c00 + i, tmp); 2734 } 2735 } 2736 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); 2737 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); 2738 } 2739 #undef RTW8851B_TSSI_GET_VAL 2740 } 2741 2742 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2743 enum rtw89_rf_path path) 2744 { 2745 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_dac_gain_defs_a_tbl); 2746 } 2747 2748 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2749 enum rtw89_rf_path path) 2750 { 2751 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2752 enum rtw89_band band = chan->band_type; 2753 2754 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2755 &rtw8851b_tssi_slope_a_defs_2g_tbl, 2756 &rtw8851b_tssi_slope_a_defs_5g_tbl); 2757 } 2758 2759 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2760 enum rtw89_rf_path path, bool all) 2761 { 2762 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2763 enum rtw89_band band = chan->band_type; 2764 2765 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2766 &rtw8851b_tssi_align_a_2g_defs_tbl, 2767 &rtw8851b_tssi_align_a_5g_defs_tbl); 2768 } 2769 2770 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2771 enum rtw89_rf_path path) 2772 { 2773 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_slope_defs_a_tbl); 2774 } 2775 2776 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2777 enum rtw89_rf_path path) 2778 { 2779 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_track_defs_a_tbl); 2780 } 2781 2782 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, 2783 enum rtw89_phy_idx phy, 2784 enum rtw89_rf_path path) 2785 { 2786 rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_mv_avg_defs_a_tbl); 2787 } 2788 2789 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2790 { 2791 _tssi_set_tssi_track(rtwdev, phy, RF_PATH_A); 2792 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, RF_PATH_A); 2793 2794 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x0); 2795 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); 2796 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x1); 2797 rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXGA_V1, RR_TXGA_V1_TRK_EN, 0x1); 2798 2799 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2800 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x3); 2801 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); 2802 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2803 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 2804 2805 rtwdev->is_tssi_mode[RF_PATH_A] = true; 2806 } 2807 2808 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2809 { 2810 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); 2811 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2812 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 2813 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 2814 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1); 2815 2816 rtwdev->is_tssi_mode[RF_PATH_A] = false; 2817 } 2818 2819 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) 2820 { 2821 switch (ch) { 2822 case 1 ... 2: 2823 return 0; 2824 case 3 ... 5: 2825 return 1; 2826 case 6 ... 8: 2827 return 2; 2828 case 9 ... 11: 2829 return 3; 2830 case 12 ... 13: 2831 return 4; 2832 case 14: 2833 return 5; 2834 } 2835 2836 return 0; 2837 } 2838 2839 #define TSSI_EXTRA_GROUP_BIT (BIT(31)) 2840 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) 2841 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) 2842 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) 2843 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 2844 2845 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) 2846 { 2847 switch (ch) { 2848 case 1 ... 2: 2849 return 0; 2850 case 3 ... 5: 2851 return 1; 2852 case 6 ... 8: 2853 return 2; 2854 case 9 ... 11: 2855 return 3; 2856 case 12 ... 14: 2857 return 4; 2858 case 36 ... 40: 2859 return 5; 2860 case 41 ... 43: 2861 return TSSI_EXTRA_GROUP(5); 2862 case 44 ... 48: 2863 return 6; 2864 case 49 ... 51: 2865 return TSSI_EXTRA_GROUP(6); 2866 case 52 ... 56: 2867 return 7; 2868 case 57 ... 59: 2869 return TSSI_EXTRA_GROUP(7); 2870 case 60 ... 64: 2871 return 8; 2872 case 100 ... 104: 2873 return 9; 2874 case 105 ... 107: 2875 return TSSI_EXTRA_GROUP(9); 2876 case 108 ... 112: 2877 return 10; 2878 case 113 ... 115: 2879 return TSSI_EXTRA_GROUP(10); 2880 case 116 ... 120: 2881 return 11; 2882 case 121 ... 123: 2883 return TSSI_EXTRA_GROUP(11); 2884 case 124 ... 128: 2885 return 12; 2886 case 129 ... 131: 2887 return TSSI_EXTRA_GROUP(12); 2888 case 132 ... 136: 2889 return 13; 2890 case 137 ... 139: 2891 return TSSI_EXTRA_GROUP(13); 2892 case 140 ... 144: 2893 return 14; 2894 case 149 ... 153: 2895 return 15; 2896 case 154 ... 156: 2897 return TSSI_EXTRA_GROUP(15); 2898 case 157 ... 161: 2899 return 16; 2900 case 162 ... 164: 2901 return TSSI_EXTRA_GROUP(16); 2902 case 165 ... 169: 2903 return 17; 2904 case 170 ... 172: 2905 return TSSI_EXTRA_GROUP(17); 2906 case 173 ... 177: 2907 return 18; 2908 } 2909 2910 return 0; 2911 } 2912 2913 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) 2914 { 2915 switch (ch) { 2916 case 1 ... 8: 2917 return 0; 2918 case 9 ... 14: 2919 return 1; 2920 case 36 ... 48: 2921 return 2; 2922 case 52 ... 64: 2923 return 3; 2924 case 100 ... 112: 2925 return 4; 2926 case 116 ... 128: 2927 return 5; 2928 case 132 ... 144: 2929 return 6; 2930 case 149 ... 177: 2931 return 7; 2932 } 2933 2934 return 0; 2935 } 2936 2937 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2938 enum rtw89_rf_path path) 2939 { 2940 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2941 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2942 u32 gidx, gidx_1st, gidx_2nd; 2943 u8 ch = chan->channel; 2944 s8 de_1st; 2945 s8 de_2nd; 2946 s8 val; 2947 2948 gidx = _tssi_get_ofdm_group(rtwdev, ch); 2949 2950 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2951 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx); 2952 2953 if (IS_TSSI_EXTRA_GROUP(gidx)) { 2954 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); 2955 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); 2956 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 2957 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 2958 val = (de_1st + de_2nd) / 2; 2959 2960 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2961 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 2962 path, val, de_1st, de_2nd); 2963 } else { 2964 val = tssi_info->tssi_mcs[path][gidx]; 2965 2966 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2967 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 2968 } 2969 2970 return val; 2971 } 2972 2973 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2974 enum rtw89_rf_path path) 2975 { 2976 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2977 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2978 u32 tgidx, tgidx_1st, tgidx_2nd; 2979 u8 ch = chan->channel; 2980 s8 tde_1st; 2981 s8 tde_2nd; 2982 s8 val; 2983 2984 tgidx = _tssi_get_trim_group(rtwdev, ch); 2985 2986 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2987 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 2988 path, tgidx); 2989 2990 if (IS_TSSI_EXTRA_GROUP(tgidx)) { 2991 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 2992 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 2993 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 2994 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 2995 val = (tde_1st + tde_2nd) / 2; 2996 2997 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2998 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 2999 path, val, tde_1st, tde_2nd); 3000 } else { 3001 val = tssi_info->tssi_trim[path][tgidx]; 3002 3003 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3004 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3005 path, val); 3006 } 3007 3008 return val; 3009 } 3010 3011 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3012 { 3013 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3014 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3015 u8 ch = chan->channel; 3016 u8 gidx; 3017 s8 ofdm_de; 3018 s8 trim_de; 3019 s32 val; 3020 u32 i; 3021 3022 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 3023 phy, ch); 3024 3025 for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) { 3026 gidx = _tssi_get_cck_group(rtwdev, ch); 3027 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); 3028 val = tssi_info->tssi_cck[i][gidx] + trim_de; 3029 3030 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3031 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", 3032 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); 3033 3034 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); 3035 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); 3036 3037 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3038 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", 3039 _tssi_de_cck_long[i], 3040 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], 3041 _TSSI_DE_MASK)); 3042 3043 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); 3044 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); 3045 val = ofdm_de + trim_de; 3046 3047 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3048 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", 3049 i, ofdm_de, trim_de); 3050 3051 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); 3052 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); 3053 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); 3054 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val); 3055 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); 3056 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); 3057 3058 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3059 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", 3060 _tssi_de_mcs_20m[i], 3061 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], 3062 _TSSI_DE_MASK)); 3063 } 3064 } 3065 3066 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 3067 { 3068 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3069 "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" 3070 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n", 3071 R_TSSI_PA_K1 + (path << 13), 3072 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD), 3073 R_TSSI_PA_K2 + (path << 13), 3074 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD), 3075 R_P0_TSSI_ALIM1 + (path << 13), 3076 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD), 3077 R_P0_TSSI_ALIM3 + (path << 13), 3078 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD), 3079 R_TSSI_PA_K5 + (path << 13), 3080 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD), 3081 R_P0_TSSI_ALIM2 + (path << 13), 3082 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD), 3083 R_P0_TSSI_ALIM4 + (path << 13), 3084 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD), 3085 R_TSSI_PA_K8 + (path << 13), 3086 rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD)); 3087 } 3088 3089 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, 3090 enum rtw89_phy_idx phy, enum rtw89_rf_path path) 3091 { 3092 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3093 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3094 u8 channel = chan->channel; 3095 u8 band; 3096 3097 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3098 "======>%s phy=%d path=%d\n", __func__, phy, path); 3099 3100 if (channel >= 1 && channel <= 14) 3101 band = TSSI_ALIMK_2G; 3102 else if (channel >= 36 && channel <= 64) 3103 band = TSSI_ALIMK_5GL; 3104 else if (channel >= 100 && channel <= 144) 3105 band = TSSI_ALIMK_5GM; 3106 else if (channel >= 149 && channel <= 177) 3107 band = TSSI_ALIMK_5GH; 3108 else 3109 band = TSSI_ALIMK_2G; 3110 3111 if (tssi_info->alignment_done[path][band]) { 3112 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, 3113 tssi_info->alignment_value[path][band][0]); 3114 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, 3115 tssi_info->alignment_value[path][band][1]); 3116 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, 3117 tssi_info->alignment_value[path][band][2]); 3118 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, 3119 tssi_info->alignment_value[path][band][3]); 3120 } 3121 3122 _tssi_alimentk_dump_result(rtwdev, path); 3123 } 3124 3125 static void rtw8851b_by_rate_dpd(struct rtw89_dev *rtwdev) 3126 { 3127 rtw89_write32_mask(rtwdev, R_AX_PWR_SWING_OTHER_CTRL0, 3128 B_AX_CFIR_BY_RATE_OFF_MASK, 0x21861); 3129 } 3130 3131 void rtw8851b_dpk_init(struct rtw89_dev *rtwdev) 3132 { 3133 rtw8851b_by_rate_dpd(rtwdev); 3134 } 3135 3136 void rtw8851b_aack(struct rtw89_dev *rtwdev) 3137 { 3138 u32 tmp05, ib[4]; 3139 u32 tmp; 3140 int ret; 3141 int rek; 3142 int i; 3143 3144 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]DO AACK\n"); 3145 3146 tmp05 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK); 3147 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0x3); 3148 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0); 3149 3150 for (rek = 0; rek < 4; rek++) { 3151 rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201e); 3152 rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201f); 3153 fsleep(100); 3154 3155 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp, 3156 1, 1000, false, 3157 rtwdev, RF_PATH_A, 0xd0, BIT(16)); 3158 if (ret) 3159 rtw89_warn(rtwdev, "[LCK]AACK timeout\n"); 3160 3161 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x1); 3162 for (i = 0; i < 4; i++) { 3163 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCO, RR_VCO_SEL, i); 3164 ib[i] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_IBD, RR_IBD_VAL); 3165 } 3166 rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x0); 3167 3168 if (ib[0] != 0 && ib[1] != 0 && ib[2] != 0 && ib[3] != 0) 3169 break; 3170 } 3171 3172 if (rek != 0) 3173 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]AACK rek = %d\n", rek); 3174 3175 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, tmp05); 3176 } 3177 3178 void rtw8851b_rck(struct rtw89_dev *rtwdev) 3179 { 3180 _rck(rtwdev, RF_PATH_A); 3181 } 3182 3183 void rtw8851b_dack(struct rtw89_dev *rtwdev) 3184 { 3185 _dac_cal(rtwdev, false); 3186 } 3187 3188 void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3189 { 3190 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3191 u32 tx_en; 3192 3193 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); 3194 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3195 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3196 3197 _iqk_init(rtwdev); 3198 _iqk(rtwdev, phy_idx, false); 3199 3200 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3201 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); 3202 } 3203 3204 void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3205 { 3206 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3207 u32 tx_en; 3208 3209 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); 3210 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3211 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3212 3213 _rx_dck(rtwdev, phy_idx, false); 3214 3215 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3216 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); 3217 } 3218 3219 void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3220 { 3221 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3222 u32 tx_en; 3223 3224 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); 3225 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3226 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3227 3228 rtwdev->dpk.is_dpk_enable = true; 3229 rtwdev->dpk.is_dpk_reload_en = false; 3230 _dpk(rtwdev, phy_idx, false); 3231 3232 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3233 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); 3234 } 3235 3236 void rtw8851b_dpk_track(struct rtw89_dev *rtwdev) 3237 { 3238 _dpk_track(rtwdev); 3239 } 3240 3241 void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) 3242 { 3243 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A); 3244 u8 i; 3245 3246 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); 3247 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); 3248 3249 _tssi_disable(rtwdev, phy); 3250 3251 for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { 3252 _tssi_set_sys(rtwdev, phy, i); 3253 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); 3254 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); 3255 _tssi_set_dck(rtwdev, phy, i); 3256 _tssi_set_tmeter_tbl(rtwdev, phy, i); 3257 _tssi_set_dac_gain_tbl(rtwdev, phy, i); 3258 _tssi_slope_cal_org(rtwdev, phy, i); 3259 _tssi_alignment_default(rtwdev, phy, i, true); 3260 _tssi_set_tssi_slope(rtwdev, phy, i); 3261 } 3262 3263 _tssi_enable(rtwdev, phy); 3264 _tssi_set_efuse_to_de(rtwdev, phy); 3265 3266 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); 3267 } 3268 3269 void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3270 { 3271 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3272 u8 channel = chan->channel; 3273 u32 i; 3274 3275 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3276 "======>%s phy=%d channel=%d\n", __func__, phy, channel); 3277 3278 _tssi_disable(rtwdev, phy); 3279 3280 for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { 3281 _tssi_set_sys(rtwdev, phy, i); 3282 _tssi_set_tmeter_tbl(rtwdev, phy, i); 3283 _tssi_slope_cal_org(rtwdev, phy, i); 3284 _tssi_alignment_default(rtwdev, phy, i, true); 3285 } 3286 3287 _tssi_enable(rtwdev, phy); 3288 _tssi_set_efuse_to_de(rtwdev, phy); 3289 } 3290 3291 static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev, 3292 enum rtw89_phy_idx phy, bool enable) 3293 { 3294 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3295 u8 channel = chan->channel; 3296 3297 rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", 3298 __func__, channel); 3299 3300 if (enable) 3301 return; 3302 3303 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3304 "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x\n", 3305 __func__, 3306 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); 3307 3308 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); 3309 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 3310 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 3311 3312 _tssi_alimentk_done(rtwdev, phy, RF_PATH_A); 3313 3314 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3315 "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n", 3316 __func__, 3317 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); 3318 3319 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3320 "======> %s SCAN_END\n", __func__); 3321 } 3322 3323 void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, 3324 enum rtw89_phy_idx phy_idx) 3325 { 3326 if (scan_start) 3327 rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true); 3328 else 3329 rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false); 3330 } 3331 3332 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3333 enum rtw89_bandwidth bw, bool dav) 3334 { 3335 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 3336 u32 rf_reg18; 3337 3338 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 3339 3340 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 3341 if (rf_reg18 == INV_RF_DATA) { 3342 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3343 "[RFK]Invalid RF_0x18 for Path-%d\n", path); 3344 return; 3345 } 3346 rf_reg18 &= ~RR_CFGCH_BW; 3347 3348 switch (bw) { 3349 case RTW89_CHANNEL_WIDTH_5: 3350 case RTW89_CHANNEL_WIDTH_10: 3351 case RTW89_CHANNEL_WIDTH_20: 3352 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); 3353 break; 3354 case RTW89_CHANNEL_WIDTH_40: 3355 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); 3356 break; 3357 case RTW89_CHANNEL_WIDTH_80: 3358 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); 3359 break; 3360 default: 3361 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n"); 3362 } 3363 3364 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 3365 RR_CFGCH_BW2) & RFREG_MASK; 3366 rf_reg18 |= RR_CFGCH_BW2; 3367 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 3368 3369 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n", 3370 bw, path, reg18_addr, 3371 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 3372 } 3373 3374 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3375 enum rtw89_bandwidth bw) 3376 { 3377 _bw_setting(rtwdev, RF_PATH_A, bw, true); 3378 _bw_setting(rtwdev, RF_PATH_A, bw, false); 3379 } 3380 3381 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val) 3382 { 3383 u32 bak; 3384 u32 tmp; 3385 int ret; 3386 3387 bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK); 3388 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1); 3389 rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val); 3390 3391 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000, 3392 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY); 3393 if (ret) 3394 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n"); 3395 3396 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak); 3397 3398 return !!ret; 3399 } 3400 3401 static void _lck_check(struct rtw89_dev *rtwdev) 3402 { 3403 u32 tmp; 3404 3405 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3406 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n"); 3407 3408 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1); 3409 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0); 3410 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1); 3411 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0); 3412 } 3413 3414 udelay(10); 3415 3416 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3417 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n"); 3418 3419 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3420 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3421 _set_s0_arfc18(rtwdev, tmp); 3422 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 3423 } 3424 3425 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { 3426 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n"); 3427 3428 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK); 3429 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp); 3430 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK); 3431 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp); 3432 3433 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1); 3434 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0); 3435 rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 3436 rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0); 3437 3438 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3439 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3440 _set_s0_arfc18(rtwdev, tmp); 3441 rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 3442 3443 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n", 3444 rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK), 3445 rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK)); 3446 } 3447 } 3448 3449 static void _set_ch(struct rtw89_dev *rtwdev, u32 val) 3450 { 3451 bool timeout; 3452 3453 timeout = _set_s0_arfc18(rtwdev, val); 3454 if (!timeout) 3455 _lck_check(rtwdev); 3456 } 3457 3458 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3459 u8 central_ch, bool dav) 3460 { 3461 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; 3462 bool is_2g_ch = central_ch <= 14; 3463 u32 rf_reg18; 3464 3465 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); 3466 3467 rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); 3468 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | 3469 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH); 3470 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); 3471 3472 if (!is_2g_ch) 3473 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) | 3474 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); 3475 3476 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | 3477 RR_CFGCH_BW2) & RFREG_MASK; 3478 rf_reg18 |= RR_CFGCH_BW2; 3479 3480 if (path == RF_PATH_A && dav) 3481 _set_ch(rtwdev, rf_reg18); 3482 else 3483 rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); 3484 3485 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0); 3486 rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1); 3487 3488 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3489 "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n", 3490 central_ch, path, reg18_addr, 3491 rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); 3492 } 3493 3494 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch) 3495 { 3496 _ch_setting(rtwdev, RF_PATH_A, central_ch, true); 3497 _ch_setting(rtwdev, RF_PATH_A, central_ch, false); 3498 } 3499 3500 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw, 3501 enum rtw89_rf_path path) 3502 { 3503 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); 3504 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12); 3505 3506 if (bw == RTW89_CHANNEL_WIDTH_20) 3507 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b); 3508 else if (bw == RTW89_CHANNEL_WIDTH_40) 3509 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13); 3510 else if (bw == RTW89_CHANNEL_WIDTH_80) 3511 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb); 3512 else 3513 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3); 3514 3515 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path, 3516 rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB)); 3517 3518 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); 3519 } 3520 3521 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3522 enum rtw89_bandwidth bw) 3523 { 3524 u8 kpath, path; 3525 3526 kpath = _kpath(rtwdev, phy); 3527 3528 for (path = 0; path < RF_PATH_NUM_8851B; path++) { 3529 if (!(kpath & BIT(path))) 3530 continue; 3531 3532 _set_rxbb_bw(rtwdev, bw, path); 3533 } 3534 } 3535 3536 static void rtw8851b_ctrl_bw_ch(struct rtw89_dev *rtwdev, 3537 enum rtw89_phy_idx phy, u8 central_ch, 3538 enum rtw89_band band, enum rtw89_bandwidth bw) 3539 { 3540 _ctrl_ch(rtwdev, central_ch); 3541 _ctrl_bw(rtwdev, phy, bw); 3542 _rxbb_bw(rtwdev, phy, bw); 3543 } 3544 3545 void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev, 3546 const struct rtw89_chan *chan, 3547 enum rtw89_phy_idx phy_idx) 3548 { 3549 rtw8851b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type, 3550 chan->band_width); 3551 } 3552