1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2022 Realtek Corporation 3 */ 4 5 #include "coex.h" 6 #include "debug.h" 7 #include "phy.h" 8 #include "reg.h" 9 #include "rtw8852c.h" 10 #include "rtw8852c_rfk.h" 11 #include "rtw8852c_rfk_table.h" 12 #include "rtw8852c_table.h" 13 14 #define _TSSI_DE_MASK GENMASK(21, 12) 15 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858}; 16 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860}; 17 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838}; 18 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840}; 19 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848}; 20 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850}; 21 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828}; 22 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830}; 23 24 static const u32 rtw8852c_backup_bb_regs[] = { 25 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x8220, 0xc1d4, 0xc1d8, 0xc1e8 26 }; 27 28 static const u32 rtw8852c_backup_rf_regs[] = { 29 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005 30 }; 31 32 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs) 33 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs) 34 35 #define RXK_GROUP_NR 4 36 static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316}; 37 static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00}; 38 static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318}; 39 static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00}; 40 static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360}; 41 static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3}; 42 43 #define TXK_GROUP_NR 3 44 static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; 45 static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7}; 46 static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e}; 47 static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12}; 48 static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; 49 static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7}; 50 static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e}; 51 static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12}; 52 static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; 53 static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6}; 54 static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e}; 55 static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12}; 56 57 static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = { 58 {0x8190, 0x8194, 0x8198, 0x81a4}, 59 {0x81a8, 0x81c4, 0x81c8, 0x81e8}, 60 }; 61 62 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 63 { 64 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n", 65 rtwdev->dbcc_en, phy_idx); 66 67 if (!rtwdev->dbcc_en) 68 return RF_AB; 69 70 if (phy_idx == RTW89_PHY_0) 71 return RF_A; 72 else 73 return RF_B; 74 } 75 76 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) 77 { 78 u32 i; 79 80 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 81 backup_bb_reg_val[i] = 82 rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i], 83 MASKDWORD); 84 rtw89_debug(rtwdev, RTW89_DBG_RFK, 85 "[IQK]backup bb reg : %x, value =%x\n", 86 rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]); 87 } 88 } 89 90 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], 91 u8 rf_path) 92 { 93 u32 i; 94 95 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 96 backup_rf_reg_val[i] = 97 rtw89_read_rf(rtwdev, rf_path, 98 rtw8852c_backup_rf_regs[i], RFREG_MASK); 99 rtw89_debug(rtwdev, RTW89_DBG_RFK, 100 "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path, 101 rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]); 102 } 103 } 104 105 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) 106 { 107 u32 i; 108 109 for (i = 0; i < BACKUP_BB_REGS_NR; i++) { 110 rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i], 111 MASKDWORD, backup_bb_reg_val[i]); 112 rtw89_debug(rtwdev, RTW89_DBG_RFK, 113 "[IQK]restore bb reg : %x, value =%x\n", 114 rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]); 115 } 116 } 117 118 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], 119 u8 rf_path) 120 { 121 u32 i; 122 123 for (i = 0; i < BACKUP_RF_REGS_NR; i++) { 124 rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i], 125 RFREG_MASK, backup_rf_reg_val[i]); 126 127 rtw89_debug(rtwdev, RTW89_DBG_RFK, 128 "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path, 129 rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]); 130 } 131 } 132 133 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) 134 { 135 u8 path; 136 u32 rf_mode; 137 int ret; 138 139 for (path = 0; path < RF_PATH_MAX; path++) { 140 if (!(kpath & BIT(path))) 141 continue; 142 143 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2, 144 2, 5000, false, rtwdev, path, 0x00, 145 RR_MOD_MASK); 146 rtw89_debug(rtwdev, RTW89_DBG_RFK, 147 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", 148 path, ret); 149 } 150 } 151 152 static void _dack_dump(struct rtw89_dev *rtwdev) 153 { 154 struct rtw89_dack_info *dack = &rtwdev->dack; 155 u8 i; 156 u8 t; 157 158 rtw89_debug(rtwdev, RTW89_DBG_RFK, 159 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", 160 dack->addck_d[0][0], dack->addck_d[0][1]); 161 rtw89_debug(rtwdev, RTW89_DBG_RFK, 162 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", 163 dack->addck_d[1][0], dack->addck_d[1][1]); 164 rtw89_debug(rtwdev, RTW89_DBG_RFK, 165 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 166 dack->dadck_d[0][0], dack->dadck_d[0][1]); 167 rtw89_debug(rtwdev, RTW89_DBG_RFK, 168 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", 169 dack->dadck_d[1][0], dack->dadck_d[1][1]); 170 171 rtw89_debug(rtwdev, RTW89_DBG_RFK, 172 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", 173 dack->biask_d[0][0], dack->biask_d[0][1]); 174 rtw89_debug(rtwdev, RTW89_DBG_RFK, 175 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", 176 dack->biask_d[1][0], dack->biask_d[1][1]); 177 178 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 179 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 180 t = dack->msbk_d[0][0][i]; 181 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 182 } 183 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 184 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 185 t = dack->msbk_d[0][1][i]; 186 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 187 } 188 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); 189 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 190 t = dack->msbk_d[1][0][i]; 191 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 192 } 193 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); 194 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 195 t = dack->msbk_d[1][1][i]; 196 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); 197 } 198 } 199 200 static void _addck_backup(struct rtw89_dev *rtwdev) 201 { 202 struct rtw89_dack_info *dack = &rtwdev->dack; 203 204 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); 205 dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, 206 B_ADDCKR0_A0); 207 dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, 208 B_ADDCKR0_A1); 209 210 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0); 211 dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, 212 B_ADDCKR1_A0); 213 dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, 214 B_ADDCKR1_A1); 215 } 216 217 static void _addck_reload(struct rtw89_dev *rtwdev) 218 { 219 struct rtw89_dack_info *dack = &rtwdev->dack; 220 221 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, 222 dack->addck_d[0][0]); 223 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, 224 dack->addck_d[0][1]); 225 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); 226 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, 227 dack->addck_d[1][0]); 228 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, 229 dack->addck_d[1][1]); 230 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3); 231 } 232 233 static void _dack_backup_s0(struct rtw89_dev *rtwdev) 234 { 235 struct rtw89_dack_info *dack = &rtwdev->dack; 236 u8 i; 237 238 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); 239 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 240 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); 241 dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev, 242 R_DACK_S0P2, 243 B_DACK_S0M0); 244 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); 245 dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev, 246 R_DACK_S0P3, 247 B_DACK_S0M1); 248 } 249 dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, 250 B_DACK_BIAS00); 251 dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, 252 B_DACK_BIAS01); 253 dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, 254 B_DACK_DADCK00); 255 dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, 256 B_DACK_DADCK01); 257 } 258 259 static void _dack_backup_s1(struct rtw89_dev *rtwdev) 260 { 261 struct rtw89_dack_info *dack = &rtwdev->dack; 262 u8 i; 263 264 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); 265 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { 266 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i); 267 dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev, 268 R_DACK10S, 269 B_DACK10S); 270 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i); 271 dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev, 272 R_DACK11S, 273 B_DACK11S); 274 } 275 dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, 276 B_DACK_BIAS10); 277 dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, 278 B_DACK_BIAS11); 279 dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, 280 B_DACK_DADCK10); 281 dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, 282 B_DACK_DADCK11); 283 } 284 285 static void _dack_reload_by_path(struct rtw89_dev *rtwdev, 286 enum rtw89_rf_path path, u8 index) 287 { 288 struct rtw89_dack_info *dack = &rtwdev->dack; 289 u32 idx_offset, path_offset; 290 u32 val32, offset, addr; 291 u8 i; 292 293 idx_offset = (index == 0 ? 0 : 0x14); 294 path_offset = (path == RF_PATH_A ? 0 : 0x28); 295 offset = idx_offset + path_offset; 296 297 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl); 298 299 /* msbk_d: 15/14/13/12 */ 300 val32 = 0x0; 301 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) 302 val32 |= dack->msbk_d[path][index][i + 12] << (i * 8); 303 addr = 0xc200 + offset; 304 rtw89_phy_write32(rtwdev, addr, val32); 305 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, 306 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); 307 308 /* msbk_d: 11/10/9/8 */ 309 val32 = 0x0; 310 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) 311 val32 |= dack->msbk_d[path][index][i + 8] << (i * 8); 312 addr = 0xc204 + offset; 313 rtw89_phy_write32(rtwdev, addr, val32); 314 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, 315 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); 316 317 /* msbk_d: 7/6/5/4 */ 318 val32 = 0x0; 319 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) 320 val32 |= dack->msbk_d[path][index][i + 4] << (i * 8); 321 addr = 0xc208 + offset; 322 rtw89_phy_write32(rtwdev, addr, val32); 323 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, 324 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); 325 326 /* msbk_d: 3/2/1/0 */ 327 val32 = 0x0; 328 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) 329 val32 |= dack->msbk_d[path][index][i] << (i * 8); 330 addr = 0xc20c + offset; 331 rtw89_phy_write32(rtwdev, addr, val32); 332 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, 333 rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); 334 335 /* dadak_d/biask_d */ 336 val32 = (dack->biask_d[path][index] << 22) | 337 (dack->dadck_d[path][index] << 14); 338 addr = 0xc210 + offset; 339 rtw89_phy_write32(rtwdev, addr, val32); 340 rtw89_phy_write32_set(rtwdev, addr, BIT(1)); 341 } 342 343 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 344 { 345 u8 i; 346 347 for (i = 0; i < 2; i++) 348 _dack_reload_by_path(rtwdev, path, i); 349 } 350 351 static void _addck(struct rtw89_dev *rtwdev) 352 { 353 struct rtw89_dack_info *dack = &rtwdev->dack; 354 u32 val; 355 int ret; 356 357 /* S0 */ 358 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1); 359 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1); 360 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0); 361 fsleep(1); 362 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); 363 364 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 365 1, 10000, false, rtwdev, 0xc0fc, BIT(0)); 366 if (ret) { 367 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); 368 dack->addck_timeout[0] = true; 369 } 370 371 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0); 372 373 /* S1 */ 374 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1); 375 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1); 376 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0); 377 udelay(1); 378 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1); 379 380 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 381 1, 10000, false, rtwdev, 0xc1fc, BIT(0)); 382 if (ret) { 383 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); 384 dack->addck_timeout[0] = true; 385 } 386 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0); 387 } 388 389 static void _dack_reset(struct rtw89_dev *rtwdev, u8 path) 390 { 391 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 392 &rtw8852c_dack_reset_defs_a_tbl, 393 &rtw8852c_dack_reset_defs_b_tbl); 394 } 395 396 enum adc_ck { 397 ADC_NA = 0, 398 ADC_480M = 1, 399 ADC_960M = 2, 400 ADC_1920M = 3, 401 }; 402 403 enum dac_ck { 404 DAC_40M = 0, 405 DAC_80M = 1, 406 DAC_120M = 2, 407 DAC_160M = 3, 408 DAC_240M = 4, 409 DAC_320M = 5, 410 DAC_480M = 6, 411 DAC_960M = 7, 412 }; 413 414 enum rf_mode { 415 RF_SHUT_DOWN = 0x0, 416 RF_STANDBY = 0x1, 417 RF_TX = 0x2, 418 RF_RX = 0x3, 419 RF_TXIQK = 0x4, 420 RF_DPK = 0x5, 421 RF_RXK1 = 0x6, 422 RF_RXK2 = 0x7, 423 }; 424 425 static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force, 426 enum dac_ck ck) 427 { 428 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0); 429 430 if (!force) 431 return; 432 433 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck); 434 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1); 435 } 436 437 static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force, 438 enum adc_ck ck) 439 { 440 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0); 441 442 if (!force) 443 return; 444 445 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck); 446 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1); 447 } 448 449 static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0) 450 { 451 if (s0) { 452 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || 453 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || 454 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || 455 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) 456 return false; 457 } else { 458 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 || 459 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 || 460 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 || 461 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0) 462 return false; 463 } 464 465 return true; 466 } 467 468 static void _dack_s0(struct rtw89_dev *rtwdev) 469 { 470 struct rtw89_dack_info *dack = &rtwdev->dack; 471 bool done; 472 int ret; 473 474 rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M); 475 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl); 476 477 _dack_reset(rtwdev, RF_PATH_A); 478 479 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); 480 ret = read_poll_timeout_atomic(_check_dack_done, done, done, 481 1, 10000, false, rtwdev, true); 482 if (ret) { 483 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); 484 dack->msbk_timeout[0] = true; 485 } 486 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0); 487 rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M); 488 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); 489 490 _dack_backup_s0(rtwdev); 491 _dack_reload(rtwdev, RF_PATH_A); 492 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); 493 } 494 495 static void _dack_s1(struct rtw89_dev *rtwdev) 496 { 497 struct rtw89_dack_info *dack = &rtwdev->dack; 498 bool done; 499 int ret; 500 501 rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M); 502 rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl); 503 504 _dack_reset(rtwdev, RF_PATH_B); 505 506 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1); 507 ret = read_poll_timeout_atomic(_check_dack_done, done, done, 508 1, 10000, false, rtwdev, false); 509 if (ret) { 510 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n"); 511 dack->msbk_timeout[0] = true; 512 } 513 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0); 514 rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M); 515 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n"); 516 517 _dack_backup_s1(rtwdev); 518 _dack_reload(rtwdev, RF_PATH_B); 519 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); 520 } 521 522 static void _dack(struct rtw89_dev *rtwdev) 523 { 524 _dack_s0(rtwdev); 525 _dack_s1(rtwdev); 526 } 527 528 static void _drck(struct rtw89_dev *rtwdev) 529 { 530 u32 val; 531 int ret; 532 533 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); 534 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 535 1, 10000, false, rtwdev, 0xc0c8, BIT(3)); 536 if (ret) 537 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); 538 539 rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl); 540 541 val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES); 542 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); 543 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val); 544 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", 545 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); 546 } 547 548 static void _dac_cal(struct rtw89_dev *rtwdev, bool force) 549 { 550 struct rtw89_dack_info *dack = &rtwdev->dack; 551 u32 rf0_0, rf1_0; 552 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB); 553 554 dack->dack_done = false; 555 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n"); 556 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); 557 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); 558 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK); 559 _drck(rtwdev); 560 561 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); 562 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); 563 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); 564 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1); 565 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); 566 _addck(rtwdev); 567 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); 568 569 _addck_backup(rtwdev); 570 _addck_reload(rtwdev); 571 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); 572 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); 573 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); 574 _dack(rtwdev); 575 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); 576 577 _dack_dump(rtwdev); 578 dack->dack_done = true; 579 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0); 580 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0); 581 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); 582 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); 583 dack->dack_cnt++; 584 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); 585 } 586 587 #define RTW8852C_NCTL_VER 0xd 588 #define RTW8852C_IQK_VER 0x2a 589 #define RTW8852C_IQK_SS 2 590 #define RTW8852C_IQK_THR_REK 8 591 #define RTW8852C_IQK_CFIR_GROUP_NR 4 592 593 enum rtw8852c_iqk_type { 594 ID_TXAGC, 595 ID_G_FLOK_COARSE, 596 ID_A_FLOK_COARSE, 597 ID_G_FLOK_FINE, 598 ID_A_FLOK_FINE, 599 ID_FLOK_VBUFFER, 600 ID_TXK, 601 ID_RXAGC, 602 ID_RXK, 603 ID_NBTXK, 604 ID_NBRXK, 605 }; 606 607 static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac) 608 { 609 if (path == RF_PATH_A) 610 rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac); 611 else 612 rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac); 613 } 614 615 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) 616 { 617 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 618 619 if (path == RF_PATH_A) 620 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); 621 else 622 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202); 623 624 switch (iqk_info->iqk_bw[path]) { 625 case RTW89_CHANNEL_WIDTH_20: 626 case RTW89_CHANNEL_WIDTH_40: 627 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); 628 rtw8852c_rxck_force(rtwdev, path, true, ADC_480M); 629 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0); 630 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3); 631 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf); 632 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); 633 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); 634 break; 635 case RTW89_CHANNEL_WIDTH_80: 636 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); 637 rtw8852c_rxck_force(rtwdev, path, true, ADC_960M); 638 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1); 639 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2); 640 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd); 641 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); 642 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); 643 break; 644 case RTW89_CHANNEL_WIDTH_160: 645 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); 646 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); 647 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2); 648 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1); 649 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb); 650 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); 651 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); 652 break; 653 default: 654 break; 655 } 656 657 rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl); 658 659 if (path == RF_PATH_A) 660 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101); 661 else 662 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202); 663 } 664 665 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) 666 { 667 u32 tmp; 668 u32 val; 669 int ret; 670 671 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 672 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0); 673 if (ret) 674 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n"); 675 676 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); 677 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret); 678 tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD); 679 rtw89_debug(rtwdev, RTW89_DBG_RFK, 680 "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp); 681 682 return false; 683 } 684 685 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, 686 enum rtw89_phy_idx phy_idx, u8 path, u8 ktype) 687 { 688 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 689 u32 addr_rfc_ctl = R_UPD_CLK + (path << 13); 690 u32 iqk_cmd; 691 bool fail; 692 693 switch (ktype) { 694 case ID_TXAGC: 695 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1); 696 break; 697 case ID_A_FLOK_COARSE: 698 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); 699 iqk_cmd = 0x008 | (1 << (4 + path)); 700 break; 701 case ID_G_FLOK_COARSE: 702 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); 703 iqk_cmd = 0x108 | (1 << (4 + path)); 704 break; 705 case ID_A_FLOK_FINE: 706 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); 707 iqk_cmd = 0x508 | (1 << (4 + path)); 708 break; 709 case ID_G_FLOK_FINE: 710 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); 711 iqk_cmd = 0x208 | (1 << (4 + path)); 712 break; 713 case ID_FLOK_VBUFFER: 714 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); 715 iqk_cmd = 0x308 | (1 << (4 + path)); 716 break; 717 case ID_TXK: 718 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); 719 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8); 720 break; 721 case ID_RXAGC: 722 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); 723 break; 724 case ID_RXK: 725 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); 726 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8); 727 break; 728 case ID_NBTXK: 729 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); 730 iqk_cmd = 0x408 | (1 << (4 + path)); 731 break; 732 case ID_NBRXK: 733 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); 734 iqk_cmd = 0x608 | (1 << (4 + path)); 735 break; 736 default: 737 return false; 738 } 739 740 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); 741 fsleep(15); 742 fail = _iqk_check_cal(rtwdev, path, ktype); 743 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); 744 745 return fail; 746 } 747 748 static bool _rxk_group_sel(struct rtw89_dev *rtwdev, 749 enum rtw89_phy_idx phy_idx, u8 path) 750 { 751 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 752 bool fail; 753 u32 tmp; 754 u32 bkrf0; 755 u8 gp; 756 757 bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW); 758 if (path == RF_PATH_B) { 759 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3); 760 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD); 761 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp); 762 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX); 763 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp); 764 } 765 766 switch (iqk_info->iqk_band[path]) { 767 case RTW89_BAND_2G: 768 default: 769 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 770 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); 771 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9); 772 break; 773 case RTW89_BAND_5G: 774 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 775 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); 776 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8); 777 break; 778 case RTW89_BAND_6G: 779 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 780 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); 781 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9); 782 break; 783 } 784 785 fsleep(10); 786 787 for (gp = 0; gp < RXK_GROUP_NR; gp++) { 788 switch (iqk_info->iqk_band[path]) { 789 case RTW89_BAND_2G: 790 default: 791 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, 792 _rxk_g_idxrxgain[gp]); 793 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, 794 _rxk_g_idxattc2[gp]); 795 break; 796 case RTW89_BAND_5G: 797 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, 798 _rxk_a_idxrxgain[gp]); 799 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, 800 _rxk_a_idxattc2[gp]); 801 break; 802 case RTW89_BAND_6G: 803 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, 804 _rxk_a6_idxrxgain[gp]); 805 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, 806 _rxk_a6_idxattc2[gp]); 807 break; 808 } 809 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 810 B_CFIR_LUT_SEL, 0x1); 811 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 812 B_CFIR_LUT_SET, 0x0); 813 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 814 B_CFIR_LUT_GP_V1, gp); 815 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 816 } 817 818 if (path == RF_PATH_B) 819 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0); 820 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0); 821 822 if (fail) { 823 iqk_info->nb_rxcfir[path] = 0x40000002; 824 iqk_info->is_wb_rxiqk[path] = false; 825 } else { 826 iqk_info->nb_rxcfir[path] = 0x40000000; 827 iqk_info->is_wb_rxiqk[path] = true; 828 } 829 830 return false; 831 } 832 833 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, 834 enum rtw89_phy_idx phy_idx, u8 path) 835 { 836 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 837 bool fail; 838 u32 tmp; 839 u32 bkrf0; 840 u8 gp = 0x2; 841 842 bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW); 843 if (path == RF_PATH_B) { 844 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3); 845 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD); 846 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp); 847 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX); 848 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp); 849 } 850 851 switch (iqk_info->iqk_band[path]) { 852 case RTW89_BAND_2G: 853 default: 854 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 855 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); 856 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9); 857 break; 858 case RTW89_BAND_5G: 859 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 860 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); 861 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8); 862 break; 863 case RTW89_BAND_6G: 864 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); 865 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); 866 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9); 867 break; 868 } 869 870 fsleep(10); 871 872 switch (iqk_info->iqk_band[path]) { 873 case RTW89_BAND_2G: 874 default: 875 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]); 876 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]); 877 break; 878 case RTW89_BAND_5G: 879 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]); 880 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]); 881 break; 882 case RTW89_BAND_6G: 883 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]); 884 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]); 885 break; 886 } 887 888 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); 889 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0); 890 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp); 891 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); 892 893 if (path == RF_PATH_B) 894 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0); 895 896 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0); 897 898 if (fail) 899 iqk_info->nb_rxcfir[path] = 900 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), 901 MASKDWORD) | 0x2; 902 else 903 iqk_info->nb_rxcfir[path] = 0x40000002; 904 905 iqk_info->is_wb_rxiqk[path] = false; 906 return fail; 907 } 908 909 static bool _txk_group_sel(struct rtw89_dev *rtwdev, 910 enum rtw89_phy_idx phy_idx, u8 path) 911 { 912 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 913 bool fail; 914 u8 gp; 915 916 for (gp = 0; gp < TXK_GROUP_NR; gp++) { 917 switch (iqk_info->iqk_band[path]) { 918 case RTW89_BAND_2G: 919 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 920 _txk_g_power_range[gp]); 921 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 922 _txk_g_track_range[gp]); 923 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 924 _txk_g_gain_bb[gp]); 925 rtw89_phy_write32_mask(rtwdev, 926 R_KIP_IQP + (path << 8), 927 MASKDWORD, _txk_g_itqt[gp]); 928 break; 929 case RTW89_BAND_5G: 930 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 931 _txk_a_power_range[gp]); 932 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 933 _txk_a_track_range[gp]); 934 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 935 _txk_a_gain_bb[gp]); 936 rtw89_phy_write32_mask(rtwdev, 937 R_KIP_IQP + (path << 8), 938 MASKDWORD, _txk_a_itqt[gp]); 939 break; 940 case RTW89_BAND_6G: 941 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 942 _txk_a6_power_range[gp]); 943 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 944 _txk_a6_track_range[gp]); 945 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 946 _txk_a6_gain_bb[gp]); 947 rtw89_phy_write32_mask(rtwdev, 948 R_KIP_IQP + (path << 8), 949 MASKDWORD, _txk_a6_itqt[gp]); 950 break; 951 default: 952 break; 953 } 954 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 955 B_CFIR_LUT_SEL, 0x1); 956 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 957 B_CFIR_LUT_SET, 0x1); 958 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 959 B_CFIR_LUT_G2, 0x0); 960 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 961 B_CFIR_LUT_GP, gp + 1); 962 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b); 963 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 964 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); 965 } 966 967 if (fail) { 968 iqk_info->nb_txcfir[path] = 0x40000002; 969 iqk_info->is_wb_txiqk[path] = false; 970 } else { 971 iqk_info->nb_txcfir[path] = 0x40000000; 972 iqk_info->is_wb_txiqk[path] = true; 973 } 974 975 return fail; 976 } 977 978 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, 979 enum rtw89_phy_idx phy_idx, u8 path) 980 { 981 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 982 bool fail; 983 u8 gp = 0x2; 984 985 switch (iqk_info->iqk_band[path]) { 986 case RTW89_BAND_2G: 987 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]); 988 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]); 989 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]); 990 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 991 MASKDWORD, _txk_g_itqt[gp]); 992 break; 993 case RTW89_BAND_5G: 994 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]); 995 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]); 996 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]); 997 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 998 MASKDWORD, _txk_a_itqt[gp]); 999 break; 1000 case RTW89_BAND_6G: 1001 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]); 1002 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]); 1003 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]); 1004 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1005 MASKDWORD, _txk_a6_itqt[gp]); 1006 break; 1007 default: 1008 break; 1009 } 1010 1011 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); 1012 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1); 1013 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0); 1014 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1); 1015 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b); 1016 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1017 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1018 1019 if (!fail) 1020 iqk_info->nb_txcfir[path] = 1021 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), 1022 MASKDWORD) | 0x2; 1023 else 1024 iqk_info->nb_txcfir[path] = 0x40000002; 1025 1026 iqk_info->is_wb_txiqk[path] = false; 1027 1028 return fail; 1029 } 1030 1031 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) 1032 { 1033 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 1034 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1035 u8 idx = rfk_mcc->table_idx; 1036 bool is_fail1, is_fail2; 1037 u32 val; 1038 u32 core_i; 1039 u32 core_q; 1040 u32 vbuff_i; 1041 u32 vbuff_q; 1042 1043 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1044 val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK); 1045 core_i = FIELD_GET(RR_TXMO_COI, val); 1046 core_q = FIELD_GET(RR_TXMO_COQ, val); 1047 1048 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d) 1049 is_fail1 = true; 1050 else 1051 is_fail1 = false; 1052 1053 iqk_info->lok_idac[idx][path] = val; 1054 1055 val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK); 1056 vbuff_i = FIELD_GET(RR_LOKVB_COI, val); 1057 vbuff_q = FIELD_GET(RR_LOKVB_COQ, val); 1058 1059 if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d) 1060 is_fail2 = true; 1061 else 1062 is_fail2 = false; 1063 1064 iqk_info->lok_vbuf[idx][path] = val; 1065 1066 return is_fail1 || is_fail2; 1067 } 1068 1069 static bool _iqk_lok(struct rtw89_dev *rtwdev, 1070 enum rtw89_phy_idx phy_idx, u8 path) 1071 { 1072 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1073 u8 tmp_id = 0x0; 1074 bool fail = false; 1075 bool tmp = false; 1076 1077 /* Step 0: Init RF gain & tone idx= 8.25Mhz */ 1078 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ); 1079 1080 /* Step 1 START: _lok_coarse_fine_wi_swap */ 1081 switch (iqk_info->iqk_band[path]) { 1082 case RTW89_BAND_2G: 1083 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); 1084 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1085 B_KIP_IQP_IQSW, 0x9); 1086 tmp_id = ID_G_FLOK_COARSE; 1087 break; 1088 case RTW89_BAND_5G: 1089 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); 1090 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1091 B_KIP_IQP_IQSW, 0x9); 1092 tmp_id = ID_A_FLOK_COARSE; 1093 break; 1094 case RTW89_BAND_6G: 1095 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); 1096 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1097 B_KIP_IQP_IQSW, 0x9); 1098 tmp_id = ID_A_FLOK_COARSE; 1099 break; 1100 default: 1101 break; 1102 } 1103 tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id); 1104 iqk_info->lok_cor_fail[0][path] = tmp; 1105 1106 /* Step 2 */ 1107 switch (iqk_info->iqk_band[path]) { 1108 case RTW89_BAND_2G: 1109 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1110 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1111 B_KIP_IQP_IQSW, 0x1b); 1112 break; 1113 case RTW89_BAND_5G: 1114 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1115 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1116 B_KIP_IQP_IQSW, 0x1b); 1117 break; 1118 case RTW89_BAND_6G: 1119 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1120 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1121 B_KIP_IQP_IQSW, 0x1b); 1122 break; 1123 default: 1124 break; 1125 } 1126 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); 1127 1128 /* Step 3 */ 1129 switch (iqk_info->iqk_band[path]) { 1130 case RTW89_BAND_2G: 1131 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); 1132 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1133 B_KIP_IQP_IQSW, 0x9); 1134 tmp_id = ID_G_FLOK_FINE; 1135 break; 1136 case RTW89_BAND_5G: 1137 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); 1138 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1139 B_KIP_IQP_IQSW, 0x9); 1140 tmp_id = ID_A_FLOK_FINE; 1141 break; 1142 case RTW89_BAND_6G: 1143 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); 1144 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1145 B_KIP_IQP_IQSW, 0x9); 1146 tmp_id = ID_A_FLOK_FINE; 1147 break; 1148 default: 1149 break; 1150 } 1151 tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id); 1152 iqk_info->lok_fin_fail[0][path] = tmp; 1153 1154 /* Step 4 large rf gain */ 1155 switch (iqk_info->iqk_band[path]) { 1156 case RTW89_BAND_2G: 1157 default: 1158 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1159 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1160 B_KIP_IQP_IQSW, 0x1b); 1161 break; 1162 case RTW89_BAND_5G: 1163 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1164 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1165 B_KIP_IQP_IQSW, 0x1b); 1166 break; 1167 case RTW89_BAND_6G: 1168 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); 1169 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), 1170 B_KIP_IQP_IQSW, 0x1b); 1171 break; 1172 } 1173 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); 1174 fail = _lok_finetune_check(rtwdev, path); 1175 1176 return fail; 1177 } 1178 1179 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) 1180 { 1181 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1182 1183 switch (iqk_info->iqk_band[path]) { 1184 case RTW89_BAND_2G: 1185 default: 1186 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); 1187 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0); 1188 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); 1189 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); 1190 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); 1191 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); 1192 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 1193 0x403e0 | iqk_info->syn1to2); 1194 fsleep(10); 1195 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 1196 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); 1197 break; 1198 case RTW89_BAND_5G: 1199 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0); 1200 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1); 1201 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); 1202 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); 1203 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); 1204 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 1205 0x403e0 | iqk_info->syn1to2); 1206 fsleep(10); 1207 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 1208 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); 1209 break; 1210 case RTW89_BAND_6G: 1211 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0); 1212 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1); 1213 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); 1214 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); 1215 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); 1216 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 1217 0x403e0 | iqk_info->syn1to2); 1218 fsleep(10); 1219 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 1220 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); 1221 break; 1222 } 1223 } 1224 1225 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1226 u8 path) 1227 { 1228 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1229 u32 tmp; 1230 bool flag; 1231 1232 iqk_info->thermal[path] = 1233 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); 1234 iqk_info->thermal_rek_en = false; 1235 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path, 1236 iqk_info->thermal[path]); 1237 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path, 1238 iqk_info->lok_cor_fail[0][path]); 1239 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path, 1240 iqk_info->lok_fin_fail[0][path]); 1241 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path, 1242 iqk_info->iqk_tx_fail[0][path]); 1243 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path, 1244 iqk_info->iqk_rx_fail[0][path]); 1245 1246 flag = iqk_info->lok_cor_fail[0][path]; 1247 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag); 1248 flag = iqk_info->lok_fin_fail[0][path]; 1249 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag); 1250 flag = iqk_info->iqk_tx_fail[0][path]; 1251 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag); 1252 flag = iqk_info->iqk_rx_fail[0][path]; 1253 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag); 1254 1255 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD); 1256 iqk_info->bp_iqkenable[path] = tmp; 1257 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); 1258 iqk_info->bp_txkresult[path] = tmp; 1259 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); 1260 iqk_info->bp_rxkresult[path] = tmp; 1261 1262 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, 1263 iqk_info->iqk_times); 1264 1265 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4)); 1266 if (tmp != 0x0) 1267 iqk_info->iqk_fail_cnt++; 1268 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4), 1269 iqk_info->iqk_fail_cnt); 1270 } 1271 1272 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) 1273 { 1274 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1275 1276 _iqk_txk_setting(rtwdev, path); 1277 iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path); 1278 1279 if (iqk_info->is_nbiqk) 1280 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path); 1281 else 1282 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path); 1283 1284 _iqk_rxk_setting(rtwdev, path); 1285 if (iqk_info->is_nbiqk) 1286 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path); 1287 else 1288 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path); 1289 1290 _iqk_info_iqk(rtwdev, phy_idx, path); 1291 } 1292 1293 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, 1294 enum rtw89_phy_idx phy, u8 path) 1295 { 1296 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1297 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1298 1299 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1300 1301 iqk_info->iqk_band[path] = chan->band_type; 1302 iqk_info->iqk_bw[path] = chan->band_width; 1303 iqk_info->iqk_ch[path] = chan->channel; 1304 1305 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1306 "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path, 1307 iqk_info->iqk_band[path]); 1308 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n", 1309 path, iqk_info->iqk_bw[path]); 1310 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n", 1311 path, iqk_info->iqk_ch[path]); 1312 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1313 "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy, 1314 rtwdev->dbcc_en ? "on" : "off", 1315 iqk_info->iqk_band[path] == 0 ? "2G" : 1316 iqk_info->iqk_band[path] == 1 ? "5G" : "6G", 1317 iqk_info->iqk_ch[path], 1318 iqk_info->iqk_bw[path] == 0 ? "20M" : 1319 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); 1320 if (!rtwdev->dbcc_en) 1321 iqk_info->syn1to2 = 0x1; 1322 else 1323 iqk_info->syn1to2 = 0x3; 1324 1325 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER); 1326 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16), 1327 iqk_info->iqk_band[path]); 1328 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16), 1329 iqk_info->iqk_bw[path]); 1330 rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16), 1331 iqk_info->iqk_ch[path]); 1332 1333 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER); 1334 } 1335 1336 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1337 u8 path) 1338 { 1339 _iqk_by_path(rtwdev, phy_idx, path); 1340 } 1341 1342 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) 1343 { 1344 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1345 bool fail; 1346 1347 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 1348 iqk_info->nb_txcfir[path]); 1349 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 1350 iqk_info->nb_rxcfir[path]); 1351 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 1352 0x00001219 + (path << 4)); 1353 fsleep(200); 1354 fail = _iqk_check_cal(rtwdev, path, 0x12); 1355 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail); 1356 1357 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1358 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); 1359 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); 1360 1361 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); 1362 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 1363 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); 1364 } 1365 1366 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, 1367 enum rtw89_phy_idx phy_idx, u8 path) 1368 { 1369 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 1370 &rtw8852c_iqk_afebb_restore_defs_a_tbl, 1371 &rtw8852c_iqk_afebb_restore_defs_b_tbl); 1372 1373 rtw8852c_disable_rxagc(rtwdev, path, 0x1); 1374 } 1375 1376 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) 1377 { 1378 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 1379 u8 idx = 0; 1380 1381 idx = rfk_mcc->table_idx; 1382 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx); 1383 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx); 1384 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 1385 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); 1386 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); 1387 } 1388 1389 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, 1390 enum rtw89_phy_idx phy_idx, u8 path) 1391 { 1392 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__); 1393 1394 /* 01_BB_AFE_for DPK_S0_20210820 */ 1395 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); 1396 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); 1397 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); 1398 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); 1399 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); 1400 1401 /* disable rxgac */ 1402 rtw8852c_disable_rxagc(rtwdev, path, 0x0); 1403 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd); 1404 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1); 1405 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1); 1406 1407 rtw8852c_txck_force(rtwdev, path, true, DAC_960M); 1408 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1); 1409 1410 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); 1411 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2); 1412 1413 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1); 1414 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb); 1415 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1); 1416 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); 1417 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); 1418 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); 1419 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); 1420 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1); 1421 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1); 1422 } 1423 1424 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1425 { 1426 u32 rf_reg5, rck_val = 0; 1427 u32 val; 1428 int ret; 1429 1430 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); 1431 1432 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 1433 1434 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 1435 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 1436 1437 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n", 1438 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 1439 1440 /* RCK trigger */ 1441 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); 1442 1443 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20, 1444 false, rtwdev, path, 0x1c, BIT(3)); 1445 if (ret) 1446 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n"); 1447 1448 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); 1449 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); 1450 1451 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 1452 1453 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1454 "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n", 1455 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK), 1456 rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK)); 1457 } 1458 1459 static void _iqk_init(struct rtw89_dev *rtwdev) 1460 { 1461 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1462 u8 ch, path; 1463 1464 rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD); 1465 if (iqk_info->is_iqk_init) 1466 return; 1467 1468 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1469 iqk_info->is_iqk_init = true; 1470 iqk_info->is_nbiqk = false; 1471 iqk_info->iqk_fft_en = false; 1472 iqk_info->iqk_sram_en = false; 1473 iqk_info->iqk_cfir_en = false; 1474 iqk_info->iqk_xym_en = false; 1475 iqk_info->thermal_rek_en = false; 1476 iqk_info->iqk_times = 0x0; 1477 1478 for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) { 1479 iqk_info->iqk_channel[ch] = 0x0; 1480 for (path = 0; path < RTW8852C_IQK_SS; path++) { 1481 iqk_info->lok_cor_fail[ch][path] = false; 1482 iqk_info->lok_fin_fail[ch][path] = false; 1483 iqk_info->iqk_tx_fail[ch][path] = false; 1484 iqk_info->iqk_rx_fail[ch][path] = false; 1485 iqk_info->iqk_mcc_ch[ch][path] = 0x0; 1486 iqk_info->iqk_table_idx[path] = 0x0; 1487 } 1488 } 1489 } 1490 1491 static void _doiqk(struct rtw89_dev *rtwdev, bool force, 1492 enum rtw89_phy_idx phy_idx, u8 path) 1493 { 1494 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1495 u32 backup_bb_val[BACKUP_BB_REGS_NR]; 1496 u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR]; 1497 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); 1498 1499 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); 1500 1501 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1502 "[IQK]==========IQK strat!!!!!==========\n"); 1503 iqk_info->iqk_times++; 1504 iqk_info->kcount = 0; 1505 iqk_info->version = RTW8852C_IQK_VER; 1506 1507 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); 1508 _iqk_get_ch_info(rtwdev, phy_idx, path); 1509 _rfk_backup_bb_reg(rtwdev, backup_bb_val); 1510 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); 1511 _iqk_macbb_setting(rtwdev, phy_idx, path); 1512 _iqk_preset(rtwdev, path); 1513 _iqk_start_iqk(rtwdev, phy_idx, path); 1514 _iqk_restore(rtwdev, path); 1515 _iqk_afebb_restore(rtwdev, phy_idx, path); 1516 _rfk_restore_bb_reg(rtwdev, backup_bb_val); 1517 _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path); 1518 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); 1519 } 1520 1521 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) 1522 { 1523 switch (_kpath(rtwdev, phy_idx)) { 1524 case RF_A: 1525 _doiqk(rtwdev, force, phy_idx, RF_PATH_A); 1526 break; 1527 case RF_B: 1528 _doiqk(rtwdev, force, phy_idx, RF_PATH_B); 1529 break; 1530 case RF_AB: 1531 _doiqk(rtwdev, force, phy_idx, RF_PATH_A); 1532 _doiqk(rtwdev, force, phy_idx, RF_PATH_B); 1533 break; 1534 default: 1535 break; 1536 } 1537 } 1538 1539 static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path) 1540 { 1541 int ret; 1542 u32 val; 1543 1544 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 1545 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); 1546 1547 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 1548 2, 2000, false, rtwdev, path, 1549 RR_DCK1, RR_DCK1_DONE); 1550 if (ret) 1551 rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path); 1552 else 1553 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path); 1554 1555 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); 1556 } 1557 1558 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path, 1559 bool is_afe) 1560 { 1561 u8 res; 1562 1563 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0); 1564 1565 _rx_dck_toggle(rtwdev, path); 1566 if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0) 1567 return; 1568 res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE); 1569 if (res > 1) { 1570 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res); 1571 _rx_dck_toggle(rtwdev, path); 1572 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1); 1573 } 1574 } 1575 1576 #define RTW8852C_RF_REL_VERSION 34 1577 #define RTW8852C_DPK_VER 0x10 1578 #define RTW8852C_DPK_TH_AVG_NUM 4 1579 #define RTW8852C_DPK_RF_PATH 2 1580 #define RTW8852C_DPK_KIP_REG_NUM 5 1581 #define RTW8852C_DPK_RXSRAM_DBG 0 1582 1583 enum rtw8852c_dpk_id { 1584 LBK_RXIQK = 0x06, 1585 SYNC = 0x10, 1586 MDPK_IDL = 0x11, 1587 MDPK_MPA = 0x12, 1588 GAIN_LOSS = 0x13, 1589 GAIN_CAL = 0x14, 1590 DPK_RXAGC = 0x15, 1591 KIP_PRESET = 0x16, 1592 KIP_RESTORE = 0x17, 1593 DPK_TXAGC = 0x19, 1594 D_KIP_PRESET = 0x28, 1595 D_TXAGC = 0x29, 1596 D_RXAGC = 0x2a, 1597 D_SYNC = 0x2b, 1598 D_GAIN_LOSS = 0x2c, 1599 D_MDPK_IDL = 0x2d, 1600 D_GAIN_NORM = 0x2f, 1601 D_KIP_THERMAL = 0x30, 1602 D_KIP_RESTORE = 0x31 1603 }; 1604 1605 #define DPK_TXAGC_LOWER 0x2e 1606 #define DPK_TXAGC_UPPER 0x3f 1607 #define DPK_TXAGC_INVAL 0xff 1608 1609 enum dpk_agc_step { 1610 DPK_AGC_STEP_SYNC_DGAIN, 1611 DPK_AGC_STEP_GAIN_LOSS_IDX, 1612 DPK_AGC_STEP_GL_GT_CRITERION, 1613 DPK_AGC_STEP_GL_LT_CRITERION, 1614 DPK_AGC_STEP_SET_TX_GAIN, 1615 }; 1616 1617 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev, 1618 enum rtw89_rf_path path, bool is_bybb) 1619 { 1620 if (is_bybb) 1621 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); 1622 else 1623 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 1624 } 1625 1626 static void _dpk_onoff(struct rtw89_dev *rtwdev, 1627 enum rtw89_rf_path path, bool off); 1628 1629 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[], 1630 u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path) 1631 { 1632 u8 i; 1633 1634 for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) { 1635 reg_bkup[path][i] = 1636 rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); 1637 1638 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", 1639 reg[i] + (path << 8), reg_bkup[path][i]); 1640 } 1641 } 1642 1643 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[], 1644 u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path) 1645 { 1646 u8 i; 1647 1648 for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) { 1649 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), 1650 MASKDWORD, reg_bkup[path][i]); 1651 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n", 1652 reg[i] + (path << 8), reg_bkup[path][i]); 1653 } 1654 } 1655 1656 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1657 enum rtw89_rf_path path, enum rtw8852c_dpk_id id) 1658 { 1659 u16 dpk_cmd; 1660 u32 val; 1661 int ret; 1662 1663 dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12)); 1664 1665 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); 1666 1667 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1668 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0); 1669 udelay(10); 1670 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); 1671 1672 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1673 "[DPK] one-shot for %s = 0x%x (ret=%d)\n", 1674 id == 0x06 ? "LBK_RXIQK" : 1675 id == 0x10 ? "SYNC" : 1676 id == 0x11 ? "MDPK_IDL" : 1677 id == 0x12 ? "MDPK_MPA" : 1678 id == 0x13 ? "GAIN_LOSS" : "PWR_CAL", 1679 dpk_cmd, ret); 1680 1681 if (ret) { 1682 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1683 "[DPK] one-shot over 20ms!!!!\n"); 1684 return 1; 1685 } 1686 1687 return 0; 1688 } 1689 1690 static void _dpk_information(struct rtw89_dev *rtwdev, 1691 enum rtw89_phy_idx phy, 1692 enum rtw89_rf_path path) 1693 { 1694 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1695 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1696 1697 u8 kidx = dpk->cur_idx[path]; 1698 1699 dpk->bp[path][kidx].band = chan->band_type; 1700 dpk->bp[path][kidx].ch = chan->channel; 1701 dpk->bp[path][kidx].bw = chan->band_width; 1702 1703 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1704 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", 1705 path, dpk->cur_idx[path], phy, 1706 rtwdev->is_tssi_mode[path] ? "on" : "off", 1707 rtwdev->dbcc_en ? "on" : "off", 1708 dpk->bp[path][kidx].band == 0 ? "2G" : 1709 dpk->bp[path][kidx].band == 1 ? "5G" : "6G", 1710 dpk->bp[path][kidx].ch, 1711 dpk->bp[path][kidx].bw == 0 ? "20M" : 1712 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); 1713 } 1714 1715 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, 1716 enum rtw89_phy_idx phy, 1717 enum rtw89_rf_path path, u8 kpath) 1718 { 1719 /*1. Keep ADC_fifo reset*/ 1720 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); 1721 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); 1722 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); 1723 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); 1724 1725 /*2. BB for IQK DBG mode*/ 1726 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd); 1727 1728 /*3.Set DAC clk*/ 1729 rtw8852c_txck_force(rtwdev, path, true, DAC_960M); 1730 1731 /*4. Set ADC clk*/ 1732 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); 1733 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1); 1734 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb); 1735 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), 1736 B_P0_NRBW_DBG, 0x1); 1737 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f); 1738 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13); 1739 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001); 1740 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041); 1741 1742 /*5. ADDA fifo rst*/ 1743 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1); 1744 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1); 1745 1746 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path); 1747 } 1748 1749 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path) 1750 { 1751 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), 1752 B_P0_NRBW_DBG, 0x0); 1753 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); 1754 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); 1755 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); 1756 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); 1757 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000); 1758 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00); 1759 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0); 1760 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0); 1761 1762 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path); 1763 } 1764 1765 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, 1766 enum rtw89_rf_path path, bool is_pause) 1767 { 1768 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), 1769 B_P0_TSSI_TRK_EN, is_pause); 1770 1771 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, 1772 is_pause ? "pause" : "resume"); 1773 } 1774 1775 static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip) 1776 { 1777 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip); 1778 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n", 1779 ctrl_by_kip ? "KIP" : "BB"); 1780 } 1781 1782 static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force) 1783 { 1784 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force); 1785 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force); 1786 1787 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n", 1788 path, force ? "on" : "off"); 1789 } 1790 1791 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 1792 enum rtw89_rf_path path) 1793 { 1794 _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE); 1795 _dpk_kip_control_rfc(rtwdev, path, false); 1796 _dpk_txpwr_bb_force(rtwdev, path, false); 1797 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); 1798 } 1799 1800 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, 1801 enum rtw89_phy_idx phy, 1802 enum rtw89_rf_path path) 1803 { 1804 #define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */ 1805 u8 cur_rxbb; 1806 u32 rf_11, reg_81cc; 1807 1808 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 1809 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); 1810 1811 _dpk_kip_control_rfc(rtwdev, path, false); 1812 1813 cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); 1814 rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK); 1815 reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8), 1816 B_KIP_IQP_SW); 1817 1818 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); 1819 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3); 1820 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd); 1821 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f); 1822 1823 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12); 1824 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3); 1825 1826 _dpk_kip_control_rfc(rtwdev, path, true); 1827 1828 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX); 1829 1830 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); 1831 1832 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, 1833 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); 1834 1835 _dpk_kip_control_rfc(rtwdev, path, false); 1836 1837 rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11); 1838 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb); 1839 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc); 1840 1841 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); 1842 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); 1843 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); 1844 1845 _dpk_kip_control_rfc(rtwdev, path, true); 1846 } 1847 1848 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, 1849 enum rtw89_rf_path path, u8 kidx) 1850 { 1851 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1852 1853 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { 1854 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 1855 0x50121 | BIT(rtwdev->dbcc_en)); 1856 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 1857 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2); 1858 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4); 1859 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); 1860 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); 1861 1862 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1863 "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n", 1864 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK), 1865 rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK), 1866 rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK), 1867 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK), 1868 rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK), 1869 rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK)); 1870 } else { 1871 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 1872 0x50101 | BIT(rtwdev->dbcc_en)); 1873 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); 1874 1875 if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) { 1876 rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8); 1877 rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd); 1878 } else { 1879 rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd); 1880 } 1881 1882 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0); 1883 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3); 1884 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); 1885 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); 1886 1887 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) 1888 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0); 1889 } 1890 } 1891 1892 static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1893 { 1894 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1895 1896 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) { 1897 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3); 1898 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30); 1899 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) { 1900 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); 1901 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00); 1902 } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) { 1903 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); 1904 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0); 1905 } else { 1906 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); 1907 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0); 1908 } 1909 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", 1910 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" : 1911 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : 1912 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); 1913 } 1914 1915 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 1916 { 1917 #define DPK_SYNC_TH_DC_I 200 1918 #define DPK_SYNC_TH_DC_Q 200 1919 #define DPK_SYNC_TH_CORR 170 1920 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1921 u16 dc_i, dc_q; 1922 u8 corr_val, corr_idx, rxbb; 1923 u8 rxbb_ov; 1924 1925 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 1926 1927 corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); 1928 corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); 1929 1930 dpk->corr_idx[path][kidx] = corr_idx; 1931 dpk->corr_val[path][kidx] = corr_val; 1932 1933 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); 1934 1935 dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 1936 dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); 1937 1938 dc_i = abs(sign_extend32(dc_i, 11)); 1939 dc_q = abs(sign_extend32(dc_q, 11)); 1940 1941 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1942 "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n", 1943 path, corr_idx, corr_val, dc_i, dc_q); 1944 1945 dpk->dc_i[path][kidx] = dc_i; 1946 dpk->dc_q[path][kidx] = dc_q; 1947 1948 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8); 1949 rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB); 1950 1951 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31); 1952 rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV); 1953 1954 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1955 "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n", 1956 path, rxbb, 1957 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE), 1958 rxbb_ov); 1959 1960 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || 1961 corr_val < DPK_SYNC_TH_CORR) 1962 return true; 1963 else 1964 return false; 1965 } 1966 1967 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) 1968 { 1969 u16 dgain = 0x0; 1970 1971 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); 1972 1973 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); 1974 1975 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain); 1976 1977 return dgain; 1978 } 1979 1980 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) 1981 { 1982 u8 result; 1983 1984 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); 1985 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); 1986 1987 result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); 1988 1989 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result); 1990 1991 return result; 1992 } 1993 1994 static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 1995 { 1996 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1997 1998 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10); 1999 dpk->cur_k_set = 2000 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1; 2001 } 2002 2003 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2004 enum rtw89_rf_path path, u8 dbm, bool set_from_bb) 2005 { 2006 if (set_from_bb) { 2007 dbm = clamp_t(u8, dbm, 7, 24); 2008 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm); 2009 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2); 2010 } 2011 _dpk_one_shot(rtwdev, phy, path, D_TXAGC); 2012 _dpk_kset_query(rtwdev, path); 2013 } 2014 2015 static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2016 enum rtw89_rf_path path, u8 kidx) 2017 { 2018 _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS); 2019 _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false); 2020 2021 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0); 2022 rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0); 2023 2024 return _dpk_gainloss_read(rtwdev); 2025 } 2026 2027 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) 2028 { 2029 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; 2030 u8 i; 2031 2032 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); 2033 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); 2034 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); 2035 2036 if (is_check) { 2037 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); 2038 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2039 val1_i = abs(sign_extend32(val1_i, 11)); 2040 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2041 val1_q = abs(sign_extend32(val1_q, 11)); 2042 2043 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); 2044 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); 2045 val2_i = abs(sign_extend32(val2_i, 11)); 2046 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); 2047 val2_q = abs(sign_extend32(val2_q, 11)); 2048 2049 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", 2050 phy_div(val1_i * val1_i + val1_q * val1_q, 2051 val2_i * val2_i + val2_q * val2_q)); 2052 } else { 2053 for (i = 0; i < 32; i++) { 2054 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); 2055 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i, 2056 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); 2057 } 2058 } 2059 2060 if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5) 2061 return true; 2062 else 2063 return false; 2064 } 2065 2066 static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2067 enum rtw89_rf_path path, u8 kidx) 2068 { 2069 _dpk_one_shot(rtwdev, phy, path, D_RXAGC); 2070 2071 return _dpk_sync_check(rtwdev, path, kidx); 2072 } 2073 2074 static void _dpk_read_rxsram(struct rtw89_dev *rtwdev) 2075 { 2076 u32 addr; 2077 2078 rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl); 2079 2080 for (addr = 0; addr < 0x200; addr++) { 2081 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr); 2082 2083 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr, 2084 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); 2085 } 2086 2087 rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl); 2088 } 2089 2090 static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) 2091 { 2092 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); 2093 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002); 2094 2095 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n"); 2096 } 2097 2098 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2099 enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only) 2100 { 2101 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2102 u8 step = DPK_AGC_STEP_SYNC_DGAIN; 2103 u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0; 2104 u8 tmp_rxbb; 2105 u8 goout = 0, agc_cnt = 0; 2106 u16 dgain = 0; 2107 bool is_fail = false; 2108 int limit = 200; 2109 2110 do { 2111 switch (step) { 2112 case DPK_AGC_STEP_SYNC_DGAIN: 2113 is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx); 2114 2115 if (RTW8852C_DPK_RXSRAM_DBG) 2116 _dpk_read_rxsram(rtwdev); 2117 2118 if (is_fail) { 2119 goout = 1; 2120 break; 2121 } 2122 2123 dgain = _dpk_dgain_read(rtwdev); 2124 2125 if (dgain > 0x5fc || dgain < 0x556) { 2126 _dpk_one_shot(rtwdev, phy, path, D_SYNC); 2127 dgain = _dpk_dgain_read(rtwdev); 2128 } 2129 2130 if (agc_cnt == 0) { 2131 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) 2132 _dpk_bypass_rxiqc(rtwdev, path); 2133 else 2134 _dpk_lbk_rxiqk(rtwdev, phy, path); 2135 } 2136 step = DPK_AGC_STEP_GAIN_LOSS_IDX; 2137 break; 2138 2139 case DPK_AGC_STEP_GAIN_LOSS_IDX: 2140 tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx); 2141 2142 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) || 2143 tmp_gl_idx >= 7) 2144 step = DPK_AGC_STEP_GL_GT_CRITERION; 2145 else if (tmp_gl_idx == 0) 2146 step = DPK_AGC_STEP_GL_LT_CRITERION; 2147 else 2148 step = DPK_AGC_STEP_SET_TX_GAIN; 2149 break; 2150 2151 case DPK_AGC_STEP_GL_GT_CRITERION: 2152 if (tmp_dbm <= 7) { 2153 goout = 1; 2154 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n"); 2155 } else { 2156 tmp_dbm = max_t(u8, tmp_dbm - 3, 7); 2157 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2158 } 2159 step = DPK_AGC_STEP_SYNC_DGAIN; 2160 agc_cnt++; 2161 break; 2162 2163 case DPK_AGC_STEP_GL_LT_CRITERION: 2164 if (tmp_dbm >= 24) { 2165 goout = 1; 2166 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n"); 2167 } else { 2168 tmp_dbm = min_t(u8, tmp_dbm + 2, 24); 2169 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); 2170 } 2171 step = DPK_AGC_STEP_SYNC_DGAIN; 2172 agc_cnt++; 2173 break; 2174 2175 case DPK_AGC_STEP_SET_TX_GAIN: 2176 _dpk_kip_control_rfc(rtwdev, path, false); 2177 tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); 2178 if (tmp_rxbb + tmp_gl_idx > 0x1f) 2179 tmp_rxbb = 0x1f; 2180 else 2181 tmp_rxbb = tmp_rxbb + tmp_gl_idx; 2182 2183 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb); 2184 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n", 2185 tmp_gl_idx, tmp_rxbb); 2186 _dpk_kip_control_rfc(rtwdev, path, true); 2187 goout = 1; 2188 break; 2189 default: 2190 goout = 1; 2191 break; 2192 } 2193 } while (!goout && agc_cnt < 6 && --limit > 0); 2194 2195 if (limit <= 0) 2196 rtw89_warn(rtwdev, "[DPK] exceed loop limit\n"); 2197 2198 return is_fail; 2199 } 2200 2201 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) 2202 { 2203 static const struct rtw89_rfk_tbl *order_tbls[] = { 2204 &rtw8852c_dpk_mdpd_order0_defs_tbl, 2205 &rtw8852c_dpk_mdpd_order1_defs_tbl, 2206 &rtw8852c_dpk_mdpd_order2_defs_tbl, 2207 &rtw8852c_dpk_mdpd_order3_defs_tbl, 2208 }; 2209 2210 if (order >= ARRAY_SIZE(order_tbls)) { 2211 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order); 2212 return; 2213 } 2214 2215 rtw89_rfk_parser(rtwdev, order_tbls[order]); 2216 2217 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", 2218 order == 0x0 ? "(5,3,1)" : 2219 order == 0x1 ? "(5,3,0)" : 2220 order == 0x2 ? "(5,0,0)" : "(7,3,1)"); 2221 } 2222 2223 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2224 enum rtw89_rf_path path, u8 kidx) 2225 { 2226 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2227 u8 cnt; 2228 u8 ov_flag; 2229 u32 dpk_sync; 2230 2231 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1); 2232 2233 if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1) 2234 _dpk_set_mdpd_para(rtwdev, 0x2); 2235 else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1) 2236 _dpk_set_mdpd_para(rtwdev, 0x1); 2237 else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1) 2238 _dpk_set_mdpd_para(rtwdev, 0x0); 2239 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 || 2240 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 || 2241 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20) 2242 _dpk_set_mdpd_para(rtwdev, 0x2); 2243 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 || 2244 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) 2245 _dpk_set_mdpd_para(rtwdev, 0x1); 2246 else 2247 _dpk_set_mdpd_para(rtwdev, 0x0); 2248 2249 rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0); 2250 fsleep(1000); 2251 2252 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); 2253 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); 2254 dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); 2255 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync); 2256 2257 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf); 2258 ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); 2259 for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) { 2260 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n"); 2261 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); 2262 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf); 2263 ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); 2264 } 2265 2266 if (ov_flag) { 2267 _dpk_set_mdpd_para(rtwdev, 0x2); 2268 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); 2269 } 2270 } 2271 2272 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2273 enum rtw89_rf_path path) 2274 { 2275 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2276 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2277 bool is_reload = false; 2278 u8 idx, cur_band, cur_ch; 2279 2280 cur_band = chan->band_type; 2281 cur_ch = chan->channel; 2282 2283 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { 2284 if (cur_band != dpk->bp[path][idx].band || 2285 cur_ch != dpk->bp[path][idx].ch) 2286 continue; 2287 2288 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 2289 B_COEF_SEL_MDPD, idx); 2290 dpk->cur_idx[path] = idx; 2291 is_reload = true; 2292 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2293 "[DPK] reload S%d[%d] success\n", path, idx); 2294 } 2295 2296 return is_reload; 2297 } 2298 2299 static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on) 2300 { 2301 rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl : 2302 &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl); 2303 } 2304 2305 static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2306 enum rtw89_rf_path path, u8 kidx) 2307 { 2308 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 2309 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); 2310 2311 if (rtwdev->hal.cv == CHIP_CAV) 2312 rtw89_phy_write32_mask(rtwdev, 2313 R_DPD_CH0A + (path << 8) + (kidx << 2), 2314 B_DPD_SEL, 0x01); 2315 else 2316 rtw89_phy_write32_mask(rtwdev, 2317 R_DPD_CH0A + (path << 8) + (kidx << 2), 2318 B_DPD_SEL, 0x0c); 2319 2320 _dpk_kip_control_rfc(rtwdev, path, true); 2321 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx); 2322 2323 _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET); 2324 } 2325 2326 static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) 2327 { 2328 #define _DPK_PARA_TXAGC GENMASK(15, 10) 2329 #define _DPK_PARA_THER GENMASK(31, 26) 2330 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2331 u32 para; 2332 2333 para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), 2334 MASKDWORD); 2335 2336 dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para); 2337 dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para); 2338 2339 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n", 2340 dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk); 2341 } 2342 2343 static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2344 enum rtw89_rf_path path, u8 kidx, bool is_execute) 2345 { 2346 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2347 2348 if (is_execute) { 2349 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200); 2350 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3); 2351 2352 _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM); 2353 } else { 2354 rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), 2355 0x0000007F, 0x5b); 2356 } 2357 dpk->bp[path][kidx].gs = 2358 rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), 2359 0x0000007F); 2360 } 2361 2362 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) 2363 { 2364 u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); 2365 u8 val; 2366 2367 switch (val32) { 2368 case 0: 2369 val = 0x6; 2370 break; 2371 case 1: 2372 val = 0x2; 2373 break; 2374 case 2: 2375 val = 0x0; 2376 break; 2377 case 3: 2378 val = 0x7; 2379 break; 2380 default: 2381 val = 0xff; 2382 break; 2383 } 2384 2385 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val); 2386 2387 return val; 2388 } 2389 2390 static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2391 enum rtw89_rf_path path, u8 kidx) 2392 { 2393 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2394 2395 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); 2396 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); 2397 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2398 B_DPD_ORDER, _dpk_order_convert(rtwdev)); 2399 2400 dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set); 2401 dpk->bp[path][kidx].path_ok = true; 2402 2403 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n", 2404 path, kidx, dpk->bp[path][kidx].mdpd_en); 2405 2406 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2407 B_DPD_MEN, dpk->bp[path][kidx].mdpd_en); 2408 2409 _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false); 2410 } 2411 2412 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2413 enum rtw89_rf_path path, u8 gain) 2414 { 2415 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2416 u8 kidx = dpk->cur_idx[path]; 2417 u8 init_xdbm = 15; 2418 bool is_fail; 2419 2420 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2421 "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx); 2422 _dpk_kip_control_rfc(rtwdev, path, false); 2423 _rf_direct_cntrl(rtwdev, path, false); 2424 rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd); 2425 _dpk_rf_setting(rtwdev, gain, path, kidx); 2426 _set_rx_dck(rtwdev, phy, path, false); 2427 _dpk_kip_pwr_clk_onoff(rtwdev, true); 2428 _dpk_kip_preset_8852c(rtwdev, phy, path, kidx); 2429 _dpk_txpwr_bb_force(rtwdev, path, true); 2430 _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true); 2431 _dpk_tpg_sel(rtwdev, path, kidx); 2432 2433 is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false); 2434 if (is_fail) 2435 goto _error; 2436 2437 _dpk_idl_mpa(rtwdev, phy, path, kidx); 2438 _dpk_para_query(rtwdev, path, kidx); 2439 _dpk_on(rtwdev, phy, path, kidx); 2440 2441 _error: 2442 _dpk_kip_control_rfc(rtwdev, path, false); 2443 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); 2444 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx, 2445 dpk->cur_k_set, is_fail ? "need Check" : "is Success"); 2446 2447 return is_fail; 2448 } 2449 2450 static void _dpk_init(struct rtw89_dev *rtwdev, u8 path) 2451 { 2452 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2453 u8 kidx = dpk->cur_idx[path]; 2454 2455 dpk->bp[path][kidx].path_ok = false; 2456 } 2457 2458 static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb) 2459 { 2460 if (is_bybb) 2461 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); 2462 else 2463 rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); 2464 } 2465 2466 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, 2467 enum rtw89_phy_idx phy, u8 kpath) 2468 { 2469 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2470 static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8}; 2471 u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR]; 2472 u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {}; 2473 u8 path; 2474 bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false}; 2475 2476 if (dpk->is_dpk_reload_en) { 2477 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { 2478 if (!(kpath & BIT(path))) 2479 continue; 2480 2481 reloaded[path] = _dpk_reload_check(rtwdev, phy, path); 2482 if (!reloaded[path] && dpk->bp[path][0].ch != 0) 2483 dpk->cur_idx[path] = !dpk->cur_idx[path]; 2484 else 2485 _dpk_onoff(rtwdev, path, false); 2486 } 2487 } else { 2488 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) 2489 dpk->cur_idx[path] = 0; 2490 } 2491 2492 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { 2493 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2494 "[DPK] ========= S%d[%d] DPK Init =========\n", 2495 path, dpk->cur_idx[path]); 2496 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); 2497 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); 2498 _dpk_information(rtwdev, phy, path); 2499 _dpk_init(rtwdev, path); 2500 if (rtwdev->is_tssi_mode[path]) 2501 _dpk_tssi_pause(rtwdev, path, true); 2502 } 2503 2504 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { 2505 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2506 "[DPK] ========= S%d[%d] DPK Start =========\n", 2507 path, dpk->cur_idx[path]); 2508 rtw8852c_disable_rxagc(rtwdev, path, 0x0); 2509 _dpk_drf_direct_cntrl(rtwdev, path, false); 2510 _dpk_bb_afe_setting(rtwdev, phy, path, kpath); 2511 is_fail = _dpk_main(rtwdev, phy, path, 1); 2512 _dpk_onoff(rtwdev, path, is_fail); 2513 } 2514 2515 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { 2516 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2517 "[DPK] ========= S%d[%d] DPK Restore =========\n", 2518 path, dpk->cur_idx[path]); 2519 _dpk_kip_restore(rtwdev, phy, path); 2520 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path); 2521 _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path); 2522 _dpk_bb_afe_restore(rtwdev, path); 2523 rtw8852c_disable_rxagc(rtwdev, path, 0x1); 2524 if (rtwdev->is_tssi_mode[path]) 2525 _dpk_tssi_pause(rtwdev, path, false); 2526 } 2527 2528 _dpk_kip_pwr_clk_onoff(rtwdev, false); 2529 } 2530 2531 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2532 { 2533 struct rtw89_fem_info *fem = &rtwdev->fem; 2534 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2535 u8 band = chan->band_type; 2536 2537 if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) { 2538 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n"); 2539 return true; 2540 } else if (fem->epa_2g && band == RTW89_BAND_2G) { 2541 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); 2542 return true; 2543 } else if (fem->epa_5g && band == RTW89_BAND_5G) { 2544 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); 2545 return true; 2546 } else if (fem->epa_6g && band == RTW89_BAND_6G) { 2547 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n"); 2548 return true; 2549 } 2550 2551 return false; 2552 } 2553 2554 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2555 { 2556 u8 path, kpath; 2557 2558 kpath = _kpath(rtwdev, phy); 2559 2560 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { 2561 if (kpath & BIT(path)) 2562 _dpk_onoff(rtwdev, path, true); 2563 } 2564 } 2565 2566 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) 2567 { 2568 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2569 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", 2570 RTW8852C_DPK_VER, rtwdev->hal.cv, 2571 RTW8852C_RF_REL_VERSION); 2572 2573 if (_dpk_bypass_check(rtwdev, phy)) 2574 _dpk_force_bypass(rtwdev, phy); 2575 else 2576 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); 2577 2578 if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1) 2579 rtw8852c_rx_dck(rtwdev, phy, false); 2580 } 2581 2582 static void _dpk_onoff(struct rtw89_dev *rtwdev, 2583 enum rtw89_rf_path path, bool off) 2584 { 2585 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2586 u8 val, kidx = dpk->cur_idx[path]; 2587 2588 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ? 2589 dpk->bp[path][kidx].mdpd_en : 0; 2590 2591 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 2592 B_DPD_MEN, val); 2593 2594 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, 2595 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable"); 2596 } 2597 2598 static void _dpk_track(struct rtw89_dev *rtwdev) 2599 { 2600 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 2601 u8 path, kidx; 2602 u8 txagc_rf = 0; 2603 s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0; 2604 u8 cur_ther; 2605 s8 delta_ther = 0; 2606 s16 pwsf_tssi_ofst; 2607 2608 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { 2609 kidx = dpk->cur_idx[path]; 2610 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2611 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", 2612 path, kidx, dpk->bp[path][kidx].ch); 2613 2614 txagc_rf = 2615 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f); 2616 txagc_bb = 2617 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2); 2618 txagc_bb_tp = 2619 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP); 2620 2621 /* report from KIP */ 2622 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf); 2623 cur_ther = 2624 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH); 2625 txagc_ofst = 2626 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF); 2627 pwsf_tssi_ofst = 2628 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI); 2629 pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12); 2630 2631 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); 2632 2633 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2634 "[DPK_TRK] thermal now = %d\n", cur_ther); 2635 2636 if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0) 2637 delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther; 2638 2639 delta_ther = delta_ther * 1 / 2; 2640 2641 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2642 "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n", 2643 delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk); 2644 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2645 "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n", 2646 txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf, 2647 dpk->bp[path][kidx].txagc_dpk); 2648 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2649 "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n", 2650 txagc_ofst, pwsf_tssi_ofst); 2651 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2652 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", 2653 txagc_bb_tp, txagc_bb); 2654 2655 if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 && 2656 txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) { 2657 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 2658 "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther); 2659 2660 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), 2661 0x07FC0000, 0x78 - delta_ther); 2662 } 2663 } 2664 } 2665 2666 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2667 enum rtw89_rf_path path) 2668 { 2669 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2670 enum rtw89_band band = chan->band_type; 2671 2672 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl); 2673 2674 if (path == RF_PATH_A) 2675 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2676 &rtw8852c_tssi_sys_defs_2g_a_tbl, 2677 &rtw8852c_tssi_sys_defs_5g_a_tbl); 2678 else 2679 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2680 &rtw8852c_tssi_sys_defs_2g_b_tbl, 2681 &rtw8852c_tssi_sys_defs_5g_b_tbl); 2682 } 2683 2684 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2685 enum rtw89_rf_path path) 2686 { 2687 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2688 &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl, 2689 &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl); 2690 } 2691 2692 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, 2693 enum rtw89_phy_idx phy, 2694 enum rtw89_rf_path path) 2695 { 2696 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2697 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl, 2698 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl); 2699 } 2700 2701 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2702 enum rtw89_rf_path path) 2703 { 2704 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2705 enum rtw89_band band = chan->band_type; 2706 2707 if (path == RF_PATH_A) { 2708 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl); 2709 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2710 &rtw8852c_tssi_dck_defs_2g_a_tbl, 2711 &rtw8852c_tssi_dck_defs_5g_a_tbl); 2712 } else { 2713 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl); 2714 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2715 &rtw8852c_tssi_dck_defs_2g_b_tbl, 2716 &rtw8852c_tssi_dck_defs_5g_b_tbl); 2717 } 2718 } 2719 2720 static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2721 enum rtw89_rf_path path) 2722 { 2723 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2724 &rtw8852c_tssi_set_bbgain_split_a_tbl, 2725 &rtw8852c_tssi_set_bbgain_split_b_tbl); 2726 } 2727 2728 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2729 enum rtw89_rf_path path) 2730 { 2731 #define RTW8852C_TSSI_GET_VAL(ptr, idx) \ 2732 ({ \ 2733 s8 *__ptr = (ptr); \ 2734 u8 __idx = (idx), __i, __v; \ 2735 u32 __val = 0; \ 2736 for (__i = 0; __i < 4; __i++) { \ 2737 __v = (__ptr[__idx + __i]); \ 2738 __val |= (__v << (8 * __i)); \ 2739 } \ 2740 __val; \ 2741 }) 2742 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2743 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2744 u8 ch = chan->channel; 2745 u8 subband = chan->subband_type; 2746 const s8 *thm_up_a = NULL; 2747 const s8 *thm_down_a = NULL; 2748 const s8 *thm_up_b = NULL; 2749 const s8 *thm_down_b = NULL; 2750 u8 thermal = 0xff; 2751 s8 thm_ofst[64] = {0}; 2752 u32 tmp = 0; 2753 u8 i, j; 2754 2755 switch (subband) { 2756 default: 2757 case RTW89_CH_2G: 2758 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p; 2759 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n; 2760 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p; 2761 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n; 2762 break; 2763 case RTW89_CH_5G_BAND_1: 2764 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0]; 2765 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0]; 2766 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0]; 2767 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0]; 2768 break; 2769 case RTW89_CH_5G_BAND_3: 2770 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1]; 2771 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1]; 2772 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1]; 2773 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1]; 2774 break; 2775 case RTW89_CH_5G_BAND_4: 2776 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2]; 2777 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2]; 2778 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2]; 2779 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2]; 2780 break; 2781 case RTW89_CH_6G_BAND_IDX0: 2782 case RTW89_CH_6G_BAND_IDX1: 2783 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0]; 2784 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0]; 2785 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0]; 2786 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0]; 2787 break; 2788 case RTW89_CH_6G_BAND_IDX2: 2789 case RTW89_CH_6G_BAND_IDX3: 2790 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1]; 2791 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1]; 2792 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1]; 2793 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1]; 2794 break; 2795 case RTW89_CH_6G_BAND_IDX4: 2796 case RTW89_CH_6G_BAND_IDX5: 2797 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2]; 2798 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2]; 2799 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2]; 2800 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2]; 2801 break; 2802 case RTW89_CH_6G_BAND_IDX6: 2803 case RTW89_CH_6G_BAND_IDX7: 2804 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3]; 2805 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3]; 2806 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3]; 2807 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3]; 2808 break; 2809 } 2810 2811 if (path == RF_PATH_A) { 2812 thermal = tssi_info->thermal[RF_PATH_A]; 2813 2814 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2815 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); 2816 2817 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); 2818 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); 2819 2820 if (thermal == 0xff) { 2821 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); 2822 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); 2823 2824 for (i = 0; i < 64; i += 4) { 2825 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); 2826 2827 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2828 "[TSSI] write 0x%x val=0x%08x\n", 2829 0x5c00 + i, 0x0); 2830 } 2831 2832 } else { 2833 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal); 2834 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 2835 thermal); 2836 2837 i = 0; 2838 for (j = 0; j < 32; j++) 2839 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2840 -thm_down_a[i++] : 2841 -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; 2842 2843 i = 1; 2844 for (j = 63; j >= 32; j--) 2845 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2846 thm_up_a[i++] : 2847 thm_up_a[DELTA_SWINGIDX_SIZE - 1]; 2848 2849 for (i = 0; i < 64; i += 4) { 2850 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i); 2851 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); 2852 2853 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2854 "[TSSI] write 0x%x val=0x%08x\n", 2855 0x5c00 + i, tmp); 2856 } 2857 } 2858 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); 2859 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); 2860 2861 } else { 2862 thermal = tssi_info->thermal[RF_PATH_B]; 2863 2864 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2865 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); 2866 2867 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); 2868 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); 2869 2870 if (thermal == 0xff) { 2871 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); 2872 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); 2873 2874 for (i = 0; i < 64; i += 4) { 2875 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); 2876 2877 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2878 "[TSSI] write 0x%x val=0x%08x\n", 2879 0x7c00 + i, 0x0); 2880 } 2881 2882 } else { 2883 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal); 2884 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 2885 thermal); 2886 2887 i = 0; 2888 for (j = 0; j < 32; j++) 2889 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2890 -thm_down_b[i++] : 2891 -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; 2892 2893 i = 1; 2894 for (j = 63; j >= 32; j--) 2895 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 2896 thm_up_b[i++] : 2897 thm_up_b[DELTA_SWINGIDX_SIZE - 1]; 2898 2899 for (i = 0; i < 64; i += 4) { 2900 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i); 2901 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); 2902 2903 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 2904 "[TSSI] write 0x%x val=0x%08x\n", 2905 0x7c00 + i, tmp); 2906 } 2907 } 2908 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); 2909 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); 2910 } 2911 #undef RTW8852C_TSSI_GET_VAL 2912 } 2913 2914 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2915 enum rtw89_rf_path path) 2916 { 2917 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2918 enum rtw89_band band = chan->band_type; 2919 2920 if (path == RF_PATH_A) { 2921 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2922 &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl, 2923 &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl); 2924 } else { 2925 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, 2926 &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl, 2927 &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl); 2928 } 2929 } 2930 2931 static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2932 enum rtw89_rf_path path) 2933 { 2934 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2935 enum rtw89_band band = chan->band_type; 2936 const struct rtw89_rfk_tbl *tbl; 2937 2938 if (path == RF_PATH_A) { 2939 if (band == RTW89_BAND_2G) 2940 tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl; 2941 else if (band == RTW89_BAND_6G) 2942 tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl; 2943 else 2944 tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl; 2945 } else { 2946 if (band == RTW89_BAND_2G) 2947 tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl; 2948 else if (band == RTW89_BAND_6G) 2949 tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl; 2950 else 2951 tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl; 2952 } 2953 2954 rtw89_rfk_parser(rtwdev, tbl); 2955 } 2956 2957 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2958 enum rtw89_rf_path path) 2959 { 2960 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2961 &rtw8852c_tssi_slope_defs_a_tbl, 2962 &rtw8852c_tssi_slope_defs_b_tbl); 2963 } 2964 2965 static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2966 enum rtw89_rf_path path) 2967 { 2968 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2969 &rtw8852c_tssi_run_slope_defs_a_tbl, 2970 &rtw8852c_tssi_run_slope_defs_b_tbl); 2971 } 2972 2973 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 2974 enum rtw89_rf_path path) 2975 { 2976 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2977 &rtw8852c_tssi_track_defs_a_tbl, 2978 &rtw8852c_tssi_track_defs_b_tbl); 2979 } 2980 2981 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, 2982 enum rtw89_phy_idx phy, 2983 enum rtw89_rf_path path) 2984 { 2985 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, 2986 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl, 2987 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl); 2988 } 2989 2990 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 2991 { 2992 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 2993 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; 2994 2995 if (rtwdev->dbcc_en) { 2996 if (phy == RTW89_PHY_0) { 2997 path = RF_PATH_A; 2998 path_max = RF_PATH_B; 2999 } else if (phy == RTW89_PHY_1) { 3000 path = RF_PATH_B; 3001 path_max = RF_PATH_NUM_8852C; 3002 } 3003 } 3004 3005 for (i = path; i < path_max; i++) { 3006 _tssi_set_track(rtwdev, phy, i); 3007 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); 3008 3009 rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A, 3010 &rtw8852c_tssi_enable_defs_a_tbl, 3011 &rtw8852c_tssi_enable_defs_b_tbl); 3012 3013 tssi_info->base_thermal[i] = 3014 ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]); 3015 rtwdev->is_tssi_mode[i] = true; 3016 } 3017 } 3018 3019 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3020 { 3021 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; 3022 3023 if (rtwdev->dbcc_en) { 3024 if (phy == RTW89_PHY_0) { 3025 path = RF_PATH_A; 3026 path_max = RF_PATH_B; 3027 } else if (phy == RTW89_PHY_1) { 3028 path = RF_PATH_B; 3029 path_max = RF_PATH_NUM_8852C; 3030 } 3031 } 3032 3033 for (i = path; i < path_max; i++) { 3034 if (i == RF_PATH_A) { 3035 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl); 3036 rtwdev->is_tssi_mode[RF_PATH_A] = false; 3037 } else if (i == RF_PATH_B) { 3038 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl); 3039 rtwdev->is_tssi_mode[RF_PATH_B] = false; 3040 } 3041 } 3042 } 3043 3044 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) 3045 { 3046 switch (ch) { 3047 case 1 ... 2: 3048 return 0; 3049 case 3 ... 5: 3050 return 1; 3051 case 6 ... 8: 3052 return 2; 3053 case 9 ... 11: 3054 return 3; 3055 case 12 ... 13: 3056 return 4; 3057 case 14: 3058 return 5; 3059 } 3060 3061 return 0; 3062 } 3063 3064 #define TSSI_EXTRA_GROUP_BIT (BIT(31)) 3065 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) 3066 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) 3067 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) 3068 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 3069 3070 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) 3071 { 3072 switch (ch) { 3073 case 1 ... 2: 3074 return 0; 3075 case 3 ... 5: 3076 return 1; 3077 case 6 ... 8: 3078 return 2; 3079 case 9 ... 11: 3080 return 3; 3081 case 12 ... 14: 3082 return 4; 3083 case 36 ... 40: 3084 return 5; 3085 case 41 ... 43: 3086 return TSSI_EXTRA_GROUP(5); 3087 case 44 ... 48: 3088 return 6; 3089 case 49 ... 51: 3090 return TSSI_EXTRA_GROUP(6); 3091 case 52 ... 56: 3092 return 7; 3093 case 57 ... 59: 3094 return TSSI_EXTRA_GROUP(7); 3095 case 60 ... 64: 3096 return 8; 3097 case 100 ... 104: 3098 return 9; 3099 case 105 ... 107: 3100 return TSSI_EXTRA_GROUP(9); 3101 case 108 ... 112: 3102 return 10; 3103 case 113 ... 115: 3104 return TSSI_EXTRA_GROUP(10); 3105 case 116 ... 120: 3106 return 11; 3107 case 121 ... 123: 3108 return TSSI_EXTRA_GROUP(11); 3109 case 124 ... 128: 3110 return 12; 3111 case 129 ... 131: 3112 return TSSI_EXTRA_GROUP(12); 3113 case 132 ... 136: 3114 return 13; 3115 case 137 ... 139: 3116 return TSSI_EXTRA_GROUP(13); 3117 case 140 ... 144: 3118 return 14; 3119 case 149 ... 153: 3120 return 15; 3121 case 154 ... 156: 3122 return TSSI_EXTRA_GROUP(15); 3123 case 157 ... 161: 3124 return 16; 3125 case 162 ... 164: 3126 return TSSI_EXTRA_GROUP(16); 3127 case 165 ... 169: 3128 return 17; 3129 case 170 ... 172: 3130 return TSSI_EXTRA_GROUP(17); 3131 case 173 ... 177: 3132 return 18; 3133 } 3134 3135 return 0; 3136 } 3137 3138 static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) 3139 { 3140 switch (ch) { 3141 case 1 ... 5: 3142 return 0; 3143 case 6 ... 8: 3144 return TSSI_EXTRA_GROUP(0); 3145 case 9 ... 13: 3146 return 1; 3147 case 14 ... 16: 3148 return TSSI_EXTRA_GROUP(1); 3149 case 17 ... 21: 3150 return 2; 3151 case 22 ... 24: 3152 return TSSI_EXTRA_GROUP(2); 3153 case 25 ... 29: 3154 return 3; 3155 case 33 ... 37: 3156 return 4; 3157 case 38 ... 40: 3158 return TSSI_EXTRA_GROUP(4); 3159 case 41 ... 45: 3160 return 5; 3161 case 46 ... 48: 3162 return TSSI_EXTRA_GROUP(5); 3163 case 49 ... 53: 3164 return 6; 3165 case 54 ... 56: 3166 return TSSI_EXTRA_GROUP(6); 3167 case 57 ... 61: 3168 return 7; 3169 case 65 ... 69: 3170 return 8; 3171 case 70 ... 72: 3172 return TSSI_EXTRA_GROUP(8); 3173 case 73 ... 77: 3174 return 9; 3175 case 78 ... 80: 3176 return TSSI_EXTRA_GROUP(9); 3177 case 81 ... 85: 3178 return 10; 3179 case 86 ... 88: 3180 return TSSI_EXTRA_GROUP(10); 3181 case 89 ... 93: 3182 return 11; 3183 case 97 ... 101: 3184 return 12; 3185 case 102 ... 104: 3186 return TSSI_EXTRA_GROUP(12); 3187 case 105 ... 109: 3188 return 13; 3189 case 110 ... 112: 3190 return TSSI_EXTRA_GROUP(13); 3191 case 113 ... 117: 3192 return 14; 3193 case 118 ... 120: 3194 return TSSI_EXTRA_GROUP(14); 3195 case 121 ... 125: 3196 return 15; 3197 case 129 ... 133: 3198 return 16; 3199 case 134 ... 136: 3200 return TSSI_EXTRA_GROUP(16); 3201 case 137 ... 141: 3202 return 17; 3203 case 142 ... 144: 3204 return TSSI_EXTRA_GROUP(17); 3205 case 145 ... 149: 3206 return 18; 3207 case 150 ... 152: 3208 return TSSI_EXTRA_GROUP(18); 3209 case 153 ... 157: 3210 return 19; 3211 case 161 ... 165: 3212 return 20; 3213 case 166 ... 168: 3214 return TSSI_EXTRA_GROUP(20); 3215 case 169 ... 173: 3216 return 21; 3217 case 174 ... 176: 3218 return TSSI_EXTRA_GROUP(21); 3219 case 177 ... 181: 3220 return 22; 3221 case 182 ... 184: 3222 return TSSI_EXTRA_GROUP(22); 3223 case 185 ... 189: 3224 return 23; 3225 case 193 ... 197: 3226 return 24; 3227 case 198 ... 200: 3228 return TSSI_EXTRA_GROUP(24); 3229 case 201 ... 205: 3230 return 25; 3231 case 206 ... 208: 3232 return TSSI_EXTRA_GROUP(25); 3233 case 209 ... 213: 3234 return 26; 3235 case 214 ... 216: 3236 return TSSI_EXTRA_GROUP(26); 3237 case 217 ... 221: 3238 return 27; 3239 case 225 ... 229: 3240 return 28; 3241 case 230 ... 232: 3242 return TSSI_EXTRA_GROUP(28); 3243 case 233 ... 237: 3244 return 29; 3245 case 238 ... 240: 3246 return TSSI_EXTRA_GROUP(29); 3247 case 241 ... 245: 3248 return 30; 3249 case 246 ... 248: 3250 return TSSI_EXTRA_GROUP(30); 3251 case 249 ... 253: 3252 return 31; 3253 } 3254 3255 return 0; 3256 } 3257 3258 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) 3259 { 3260 switch (ch) { 3261 case 1 ... 8: 3262 return 0; 3263 case 9 ... 14: 3264 return 1; 3265 case 36 ... 48: 3266 return 2; 3267 case 49 ... 51: 3268 return TSSI_EXTRA_GROUP(2); 3269 case 52 ... 64: 3270 return 3; 3271 case 100 ... 112: 3272 return 4; 3273 case 113 ... 115: 3274 return TSSI_EXTRA_GROUP(4); 3275 case 116 ... 128: 3276 return 5; 3277 case 132 ... 144: 3278 return 6; 3279 case 149 ... 177: 3280 return 7; 3281 } 3282 3283 return 0; 3284 } 3285 3286 static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch) 3287 { 3288 switch (ch) { 3289 case 1 ... 13: 3290 return 0; 3291 case 14 ... 16: 3292 return TSSI_EXTRA_GROUP(0); 3293 case 17 ... 29: 3294 return 1; 3295 case 33 ... 45: 3296 return 2; 3297 case 46 ... 48: 3298 return TSSI_EXTRA_GROUP(2); 3299 case 49 ... 61: 3300 return 3; 3301 case 65 ... 77: 3302 return 4; 3303 case 78 ... 80: 3304 return TSSI_EXTRA_GROUP(4); 3305 case 81 ... 93: 3306 return 5; 3307 case 97 ... 109: 3308 return 6; 3309 case 110 ... 112: 3310 return TSSI_EXTRA_GROUP(6); 3311 case 113 ... 125: 3312 return 7; 3313 case 129 ... 141: 3314 return 8; 3315 case 142 ... 144: 3316 return TSSI_EXTRA_GROUP(8); 3317 case 145 ... 157: 3318 return 9; 3319 case 161 ... 173: 3320 return 10; 3321 case 174 ... 176: 3322 return TSSI_EXTRA_GROUP(10); 3323 case 177 ... 189: 3324 return 11; 3325 case 193 ... 205: 3326 return 12; 3327 case 206 ... 208: 3328 return TSSI_EXTRA_GROUP(12); 3329 case 209 ... 221: 3330 return 13; 3331 case 225 ... 237: 3332 return 14; 3333 case 238 ... 240: 3334 return TSSI_EXTRA_GROUP(14); 3335 case 241 ... 253: 3336 return 15; 3337 } 3338 3339 return 0; 3340 } 3341 3342 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3343 enum rtw89_rf_path path) 3344 { 3345 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3346 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3347 enum rtw89_band band = chan->band_type; 3348 u8 ch = chan->channel; 3349 u32 gidx, gidx_1st, gidx_2nd; 3350 s8 de_1st; 3351 s8 de_2nd; 3352 s8 val; 3353 3354 if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) { 3355 gidx = _tssi_get_ofdm_group(rtwdev, ch); 3356 3357 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3358 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3359 path, gidx); 3360 3361 if (IS_TSSI_EXTRA_GROUP(gidx)) { 3362 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3363 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3364 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 3365 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 3366 val = (de_1st + de_2nd) / 2; 3367 3368 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3369 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3370 path, val, de_1st, de_2nd); 3371 } else { 3372 val = tssi_info->tssi_mcs[path][gidx]; 3373 3374 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3375 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3376 } 3377 } else { 3378 gidx = _tssi_get_6g_ofdm_group(rtwdev, ch); 3379 3380 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3381 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3382 path, gidx); 3383 3384 if (IS_TSSI_EXTRA_GROUP(gidx)) { 3385 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3386 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3387 de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st]; 3388 de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd]; 3389 val = (de_1st + de_2nd) / 2; 3390 3391 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3392 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3393 path, val, de_1st, de_2nd); 3394 } else { 3395 val = tssi_info->tssi_6g_mcs[path][gidx]; 3396 3397 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3398 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3399 } 3400 } 3401 3402 return val; 3403 } 3404 3405 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, 3406 enum rtw89_phy_idx phy, 3407 enum rtw89_rf_path path) 3408 { 3409 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3410 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3411 enum rtw89_band band = chan->band_type; 3412 u8 ch = chan->channel; 3413 u32 tgidx, tgidx_1st, tgidx_2nd; 3414 s8 tde_1st = 0; 3415 s8 tde_2nd = 0; 3416 s8 val; 3417 3418 if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) { 3419 tgidx = _tssi_get_trim_group(rtwdev, ch); 3420 3421 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3422 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3423 path, tgidx); 3424 3425 if (IS_TSSI_EXTRA_GROUP(tgidx)) { 3426 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3427 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3428 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 3429 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 3430 val = (tde_1st + tde_2nd) / 2; 3431 3432 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3433 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3434 path, val, tde_1st, tde_2nd); 3435 } else { 3436 val = tssi_info->tssi_trim[path][tgidx]; 3437 3438 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3439 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3440 path, val); 3441 } 3442 } else { 3443 tgidx = _tssi_get_6g_trim_group(rtwdev, ch); 3444 3445 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3446 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3447 path, tgidx); 3448 3449 if (IS_TSSI_EXTRA_GROUP(tgidx)) { 3450 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3451 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3452 tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st]; 3453 tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd]; 3454 val = (tde_1st + tde_2nd) / 2; 3455 3456 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3457 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3458 path, val, tde_1st, tde_2nd); 3459 } else { 3460 val = tssi_info->tssi_trim_6g[path][tgidx]; 3461 3462 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3463 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3464 path, val); 3465 } 3466 } 3467 3468 return val; 3469 } 3470 3471 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, 3472 enum rtw89_phy_idx phy) 3473 { 3474 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3475 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3476 u8 ch = chan->channel; 3477 u8 gidx; 3478 s8 ofdm_de; 3479 s8 trim_de; 3480 s32 val; 3481 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; 3482 3483 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 3484 phy, ch); 3485 3486 if (rtwdev->dbcc_en) { 3487 if (phy == RTW89_PHY_0) { 3488 path = RF_PATH_A; 3489 path_max = RF_PATH_B; 3490 } else if (phy == RTW89_PHY_1) { 3491 path = RF_PATH_B; 3492 path_max = RF_PATH_NUM_8852C; 3493 } 3494 } 3495 3496 for (i = path; i < path_max; i++) { 3497 gidx = _tssi_get_cck_group(rtwdev, ch); 3498 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); 3499 val = tssi_info->tssi_cck[i][gidx] + trim_de; 3500 3501 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3502 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", 3503 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); 3504 3505 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); 3506 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); 3507 3508 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3509 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", 3510 _tssi_de_cck_long[i], 3511 rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], 3512 _TSSI_DE_MASK)); 3513 3514 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); 3515 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); 3516 val = ofdm_de + trim_de; 3517 3518 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3519 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", 3520 i, ofdm_de, trim_de); 3521 3522 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); 3523 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); 3524 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); 3525 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val); 3526 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); 3527 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); 3528 3529 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3530 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", 3531 _tssi_de_mcs_20m[i], 3532 rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], 3533 _TSSI_DE_MASK)); 3534 } 3535 } 3536 3537 static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, 3538 enum rtw89_rf_path path) 3539 { 3540 static const u32 tssi_trk[2] = {0x5818, 0x7818}; 3541 static const u32 tssi_en[2] = {0x5820, 0x7820}; 3542 3543 if (en) { 3544 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0); 3545 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0); 3546 if (rtwdev->dbcc_en && path == RF_PATH_B) 3547 _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1); 3548 else 3549 _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0); 3550 } else { 3551 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1); 3552 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1); 3553 } 3554 } 3555 3556 void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx) 3557 { 3558 if (!rtwdev->dbcc_en) { 3559 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); 3560 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); 3561 } else { 3562 if (phy_idx == RTW89_PHY_0) 3563 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); 3564 else 3565 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); 3566 } 3567 } 3568 3569 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3570 enum rtw89_bandwidth bw, bool is_dav) 3571 { 3572 u32 rf_reg18; 3573 u32 reg_reg18_addr; 3574 3575 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); 3576 if (is_dav) 3577 reg_reg18_addr = RR_CFGCH; 3578 else 3579 reg_reg18_addr = RR_CFGCH_V1; 3580 3581 rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK); 3582 rf_reg18 &= ~RR_CFGCH_BW; 3583 3584 switch (bw) { 3585 case RTW89_CHANNEL_WIDTH_5: 3586 case RTW89_CHANNEL_WIDTH_10: 3587 case RTW89_CHANNEL_WIDTH_20: 3588 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); 3589 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3); 3590 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf); 3591 break; 3592 case RTW89_CHANNEL_WIDTH_40: 3593 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); 3594 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3); 3595 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf); 3596 break; 3597 case RTW89_CHANNEL_WIDTH_80: 3598 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); 3599 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2); 3600 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd); 3601 break; 3602 case RTW89_CHANNEL_WIDTH_160: 3603 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M); 3604 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1); 3605 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb); 3606 break; 3607 default: 3608 break; 3609 } 3610 3611 rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18); 3612 } 3613 3614 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3615 enum rtw89_bandwidth bw) 3616 { 3617 bool is_dav; 3618 u8 kpath, path; 3619 u32 tmp = 0; 3620 3621 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); 3622 kpath = _kpath(rtwdev, phy); 3623 3624 for (path = 0; path < 2; path++) { 3625 if (!(kpath & BIT(path))) 3626 continue; 3627 3628 is_dav = true; 3629 _bw_setting(rtwdev, path, bw, is_dav); 3630 is_dav = false; 3631 _bw_setting(rtwdev, path, bw, is_dav); 3632 if (rtwdev->dbcc_en) 3633 continue; 3634 3635 if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) { 3636 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); 3637 tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3638 rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3); 3639 rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp); 3640 fsleep(100); 3641 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); 3642 } 3643 } 3644 } 3645 3646 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 3647 u8 central_ch, enum rtw89_band band, bool is_dav) 3648 { 3649 u32 rf_reg18; 3650 u32 reg_reg18_addr; 3651 3652 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); 3653 if (is_dav) 3654 reg_reg18_addr = 0x18; 3655 else 3656 reg_reg18_addr = 0x10018; 3657 3658 rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK); 3659 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH); 3660 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); 3661 3662 switch (band) { 3663 case RTW89_BAND_2G: 3664 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G); 3665 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G); 3666 break; 3667 case RTW89_BAND_5G: 3668 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G); 3669 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); 3670 break; 3671 case RTW89_BAND_6G: 3672 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G); 3673 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G); 3674 break; 3675 default: 3676 break; 3677 } 3678 rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18); 3679 fsleep(100); 3680 } 3681 3682 static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3683 u8 central_ch, enum rtw89_band band) 3684 { 3685 u8 kpath, path; 3686 3687 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); 3688 if (band != RTW89_BAND_6G) { 3689 if ((central_ch > 14 && central_ch < 36) || 3690 (central_ch > 64 && central_ch < 100) || 3691 (central_ch > 144 && central_ch < 149) || central_ch > 177) 3692 return; 3693 } else { 3694 if (central_ch > 253 || central_ch == 2) 3695 return; 3696 } 3697 3698 kpath = _kpath(rtwdev, phy); 3699 3700 for (path = 0; path < 2; path++) { 3701 if (kpath & BIT(path)) { 3702 _ch_setting(rtwdev, path, central_ch, band, true); 3703 _ch_setting(rtwdev, path, central_ch, band, false); 3704 } 3705 } 3706 } 3707 3708 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3709 enum rtw89_bandwidth bw) 3710 { 3711 u8 kpath; 3712 u8 path; 3713 u32 val; 3714 3715 kpath = _kpath(rtwdev, phy); 3716 for (path = 0; path < 2; path++) { 3717 if (!(kpath & BIT(path))) 3718 continue; 3719 3720 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); 3721 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa); 3722 switch (bw) { 3723 case RTW89_CHANNEL_WIDTH_20: 3724 val = 0x1b; 3725 break; 3726 case RTW89_CHANNEL_WIDTH_40: 3727 val = 0x13; 3728 break; 3729 case RTW89_CHANNEL_WIDTH_80: 3730 val = 0xb; 3731 break; 3732 case RTW89_CHANNEL_WIDTH_160: 3733 default: 3734 val = 0x3; 3735 break; 3736 } 3737 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val); 3738 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); 3739 } 3740 } 3741 3742 static void _lck_keep_thermal(struct rtw89_dev *rtwdev) 3743 { 3744 struct rtw89_lck_info *lck = &rtwdev->lck; 3745 int path; 3746 3747 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 3748 lck->thermal[path] = 3749 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); 3750 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 3751 "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]); 3752 } 3753 } 3754 3755 static void _lck(struct rtw89_dev *rtwdev) 3756 { 3757 u32 tmp18[2]; 3758 int path = rtwdev->dbcc_en ? 2 : 1; 3759 int i; 3760 3761 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n"); 3762 3763 tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 3764 tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 3765 3766 for (i = 0; i < path; i++) { 3767 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); 3768 rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]); 3769 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); 3770 } 3771 3772 _lck_keep_thermal(rtwdev); 3773 } 3774 3775 #define RTW8852C_LCK_TH 8 3776 3777 void rtw8852c_lck_track(struct rtw89_dev *rtwdev) 3778 { 3779 struct rtw89_lck_info *lck = &rtwdev->lck; 3780 u8 cur_thermal; 3781 int delta; 3782 int path; 3783 3784 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 3785 cur_thermal = 3786 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); 3787 delta = abs((int)cur_thermal - lck->thermal[path]); 3788 3789 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 3790 "[LCK] path=%d current thermal=0x%x delta=0x%x\n", 3791 path, cur_thermal, delta); 3792 3793 if (delta >= RTW8852C_LCK_TH) { 3794 _lck(rtwdev); 3795 return; 3796 } 3797 } 3798 } 3799 3800 void rtw8852c_lck_init(struct rtw89_dev *rtwdev) 3801 { 3802 _lck_keep_thermal(rtwdev); 3803 } 3804 3805 static 3806 void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 3807 u8 central_ch, enum rtw89_band band, 3808 enum rtw89_bandwidth bw) 3809 { 3810 _ctrl_ch(rtwdev, phy, central_ch, band); 3811 _ctrl_bw(rtwdev, phy, bw); 3812 _rxbb_bw(rtwdev, phy, bw); 3813 } 3814 3815 void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, 3816 const struct rtw89_chan *chan, 3817 enum rtw89_phy_idx phy_idx) 3818 { 3819 rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, 3820 chan->band_type, 3821 chan->band_width); 3822 } 3823 3824 void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3825 { 3826 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3827 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 3828 u8 idx = rfk_mcc->table_idx; 3829 int i; 3830 3831 for (i = 0; i < RTW89_IQK_CHS_NR; i++) { 3832 if (rfk_mcc->ch[idx] == 0) 3833 break; 3834 if (++idx >= RTW89_IQK_CHS_NR) 3835 idx = 0; 3836 } 3837 3838 rfk_mcc->table_idx = idx; 3839 rfk_mcc->ch[idx] = chan->channel; 3840 rfk_mcc->band[idx] = chan->band_type; 3841 } 3842 3843 void rtw8852c_rck(struct rtw89_dev *rtwdev) 3844 { 3845 u8 path; 3846 3847 for (path = 0; path < 2; path++) 3848 _rck(rtwdev, path); 3849 } 3850 3851 void rtw8852c_dack(struct rtw89_dev *rtwdev) 3852 { 3853 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); 3854 3855 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); 3856 _dac_cal(rtwdev, false); 3857 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); 3858 } 3859 3860 void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3861 { 3862 u32 tx_en; 3863 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3864 3865 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); 3866 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3867 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3868 3869 _iqk_init(rtwdev); 3870 _iqk(rtwdev, phy_idx, false); 3871 3872 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3873 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); 3874 } 3875 3876 #define RXDCK_VER_8852C 0xe 3877 3878 void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe) 3879 { 3880 struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck; 3881 u8 path, kpath; 3882 u32 rf_reg5; 3883 3884 kpath = _kpath(rtwdev, phy); 3885 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3886 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", 3887 RXDCK_VER_8852C, rtwdev->hal.cv); 3888 3889 for (path = 0; path < 2; path++) { 3890 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); 3891 if (!(kpath & BIT(path))) 3892 continue; 3893 3894 if (rtwdev->is_tssi_mode[path]) 3895 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), 3896 B_P0_TSSI_TRK_EN, 0x1); 3897 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); 3898 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); 3899 _set_rx_dck(rtwdev, phy, path, is_afe); 3900 rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); 3901 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); 3902 3903 if (rtwdev->is_tssi_mode[path]) 3904 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), 3905 B_P0_TSSI_TRK_EN, 0x0); 3906 } 3907 } 3908 3909 #define RTW8852C_RX_DCK_TH 8 3910 3911 void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev) 3912 { 3913 struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck; 3914 u8 cur_thermal; 3915 int delta; 3916 int path; 3917 3918 for (path = 0; path < RF_PATH_NUM_8852C; path++) { 3919 cur_thermal = 3920 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); 3921 delta = abs((int)cur_thermal - rx_dck->thermal[path]); 3922 3923 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 3924 "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n", 3925 path, cur_thermal, delta); 3926 3927 if (delta >= RTW8852C_RX_DCK_TH) { 3928 rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false); 3929 return; 3930 } 3931 } 3932 } 3933 3934 void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 3935 { 3936 u32 tx_en; 3937 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); 3938 3939 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); 3940 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); 3941 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); 3942 3943 rtwdev->dpk.is_dpk_enable = true; 3944 rtwdev->dpk.is_dpk_reload_en = false; 3945 _dpk(rtwdev, phy_idx, false); 3946 3947 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); 3948 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); 3949 } 3950 3951 void rtw8852c_dpk_track(struct rtw89_dev *rtwdev) 3952 { 3953 _dpk_track(rtwdev); 3954 } 3955 3956 void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3957 { 3958 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; 3959 3960 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); 3961 3962 if (rtwdev->dbcc_en) { 3963 if (phy == RTW89_PHY_0) { 3964 path = RF_PATH_A; 3965 path_max = RF_PATH_B; 3966 } else if (phy == RTW89_PHY_1) { 3967 path = RF_PATH_B; 3968 path_max = RF_PATH_NUM_8852C; 3969 } 3970 } 3971 3972 _tssi_disable(rtwdev, phy); 3973 3974 for (i = path; i < path_max; i++) { 3975 _tssi_set_sys(rtwdev, phy, i); 3976 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); 3977 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); 3978 _tssi_set_dck(rtwdev, phy, i); 3979 _tssi_set_bbgain_split(rtwdev, phy, i); 3980 _tssi_set_tmeter_tbl(rtwdev, phy, i); 3981 _tssi_slope_cal_org(rtwdev, phy, i); 3982 _tssi_set_aligk_default(rtwdev, phy, i); 3983 _tssi_set_slope(rtwdev, phy, i); 3984 _tssi_run_slope(rtwdev, phy, i); 3985 } 3986 3987 _tssi_enable(rtwdev, phy); 3988 _tssi_set_efuse_to_de(rtwdev, phy); 3989 } 3990 3991 void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) 3992 { 3993 u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; 3994 3995 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", 3996 __func__, phy); 3997 3998 if (!rtwdev->is_tssi_mode[RF_PATH_A]) 3999 return; 4000 if (!rtwdev->is_tssi_mode[RF_PATH_B]) 4001 return; 4002 4003 if (rtwdev->dbcc_en) { 4004 if (phy == RTW89_PHY_0) { 4005 path = RF_PATH_A; 4006 path_max = RF_PATH_B; 4007 } else if (phy == RTW89_PHY_1) { 4008 path = RF_PATH_B; 4009 path_max = RF_PATH_NUM_8852C; 4010 } 4011 } 4012 4013 _tssi_disable(rtwdev, phy); 4014 4015 for (i = path; i < path_max; i++) { 4016 _tssi_set_sys(rtwdev, phy, i); 4017 _tssi_set_dck(rtwdev, phy, i); 4018 _tssi_set_tmeter_tbl(rtwdev, phy, i); 4019 _tssi_slope_cal_org(rtwdev, phy, i); 4020 _tssi_set_aligk_default(rtwdev, phy, i); 4021 } 4022 4023 _tssi_enable(rtwdev, phy); 4024 _tssi_set_efuse_to_de(rtwdev, phy); 4025 } 4026 4027 static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev, 4028 enum rtw89_phy_idx phy, bool enable) 4029 { 4030 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 4031 u8 i; 4032 4033 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) 4034 return; 4035 4036 if (enable) { 4037 /* SCAN_START */ 4038 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 && 4039 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) { 4040 for (i = 0; i < 6; i++) { 4041 tssi_info->default_txagc_offset[RF_PATH_A] = 4042 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, 4043 B_TXAGC_BB); 4044 if (tssi_info->default_txagc_offset[RF_PATH_A]) 4045 break; 4046 } 4047 } 4048 4049 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 && 4050 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) { 4051 for (i = 0; i < 6; i++) { 4052 tssi_info->default_txagc_offset[RF_PATH_B] = 4053 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, 4054 B_TXAGC_BB_S1); 4055 if (tssi_info->default_txagc_offset[RF_PATH_B]) 4056 break; 4057 } 4058 } 4059 } else { 4060 /* SCAN_END */ 4061 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 4062 tssi_info->default_txagc_offset[RF_PATH_A]); 4063 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 4064 tssi_info->default_txagc_offset[RF_PATH_B]); 4065 4066 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); 4067 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); 4068 4069 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); 4070 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); 4071 } 4072 } 4073 4074 void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, 4075 bool scan_start, enum rtw89_phy_idx phy_idx) 4076 { 4077 if (scan_start) 4078 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true); 4079 else 4080 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false); 4081 } 4082