1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/bcd.h> 6 7 #include "main.h" 8 #include "reg.h" 9 #include "fw.h" 10 #include "phy.h" 11 #include "debug.h" 12 13 struct phy_cfg_pair { 14 u32 addr; 15 u32 data; 16 }; 17 18 union phy_table_tile { 19 struct rtw_phy_cond cond; 20 struct phy_cfg_pair cfg; 21 }; 22 23 static const u32 db_invert_table[12][8] = { 24 {10, 13, 16, 20, 25 25, 32, 40, 50}, 26 {64, 80, 101, 128, 27 160, 201, 256, 318}, 28 {401, 505, 635, 800, 29 1007, 1268, 1596, 2010}, 30 {316, 398, 501, 631, 31 794, 1000, 1259, 1585}, 32 {1995, 2512, 3162, 3981, 33 5012, 6310, 7943, 10000}, 34 {12589, 15849, 19953, 25119, 35 31623, 39811, 50119, 63098}, 36 {79433, 100000, 125893, 158489, 37 199526, 251189, 316228, 398107}, 38 {501187, 630957, 794328, 1000000, 39 1258925, 1584893, 1995262, 2511886}, 40 {3162278, 3981072, 5011872, 6309573, 41 7943282, 1000000, 12589254, 15848932}, 42 {19952623, 25118864, 31622777, 39810717, 43 50118723, 63095734, 79432823, 100000000}, 44 {125892541, 158489319, 199526232, 251188643, 45 316227766, 398107171, 501187234, 630957345}, 46 {794328235, 1000000000, 1258925412, 1584893192, 47 1995262315, 2511886432U, 3162277660U, 3981071706U} 48 }; 49 50 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; 51 u8 rtw_ofdm_rates[] = { 52 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, 53 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, 54 DESC_RATE48M, DESC_RATE54M 55 }; 56 u8 rtw_ht_1s_rates[] = { 57 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, 58 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, 59 DESC_RATEMCS6, DESC_RATEMCS7 60 }; 61 u8 rtw_ht_2s_rates[] = { 62 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, 63 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, 64 DESC_RATEMCS14, DESC_RATEMCS15 65 }; 66 u8 rtw_vht_1s_rates[] = { 67 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, 68 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, 69 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, 70 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, 71 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 72 }; 73 u8 rtw_vht_2s_rates[] = { 74 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, 75 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, 76 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, 77 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, 78 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 79 }; 80 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { 81 rtw_cck_rates, rtw_ofdm_rates, 82 rtw_ht_1s_rates, rtw_ht_2s_rates, 83 rtw_vht_1s_rates, rtw_vht_2s_rates 84 }; 85 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { 86 ARRAY_SIZE(rtw_cck_rates), 87 ARRAY_SIZE(rtw_ofdm_rates), 88 ARRAY_SIZE(rtw_ht_1s_rates), 89 ARRAY_SIZE(rtw_ht_2s_rates), 90 ARRAY_SIZE(rtw_vht_1s_rates), 91 ARRAY_SIZE(rtw_vht_2s_rates) 92 }; 93 static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); 94 static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); 95 static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); 96 static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); 97 static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); 98 static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); 99 100 enum rtw_phy_band_type { 101 PHY_BAND_2G = 0, 102 PHY_BAND_5G = 1, 103 }; 104 105 static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) 106 { 107 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 108 u8 i, j; 109 110 for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) { 111 for (j = 0; j < RTW_RF_PATH_MAX; j++) 112 dm_info->cck_pd_lv[i][j] = CCK_PD_LV0; 113 } 114 115 dm_info->cck_fa_avg = CCK_FA_AVG_RESET; 116 } 117 118 void rtw_phy_init(struct rtw_dev *rtwdev) 119 { 120 struct rtw_chip_info *chip = rtwdev->chip; 121 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 122 u32 addr, mask; 123 124 dm_info->fa_history[3] = 0; 125 dm_info->fa_history[2] = 0; 126 dm_info->fa_history[1] = 0; 127 dm_info->fa_history[0] = 0; 128 dm_info->igi_bitmap = 0; 129 dm_info->igi_history[3] = 0; 130 dm_info->igi_history[2] = 0; 131 dm_info->igi_history[1] = 0; 132 133 addr = chip->dig[0].addr; 134 mask = chip->dig[0].mask; 135 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); 136 rtw_phy_cck_pd_init(rtwdev); 137 } 138 139 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) 140 { 141 struct rtw_chip_info *chip = rtwdev->chip; 142 struct rtw_hal *hal = &rtwdev->hal; 143 u32 addr, mask; 144 u8 path; 145 146 for (path = 0; path < hal->rf_path_num; path++) { 147 addr = chip->dig[path].addr; 148 mask = chip->dig[path].mask; 149 rtw_write32_mask(rtwdev, addr, mask, igi); 150 } 151 } 152 153 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) 154 { 155 struct rtw_chip_info *chip = rtwdev->chip; 156 157 chip->ops->false_alarm_statistics(rtwdev); 158 } 159 160 #define RA_FLOOR_TABLE_SIZE 7 161 #define RA_FLOOR_UP_GAP 3 162 163 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) 164 { 165 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; 166 u8 new_level = 0; 167 int i; 168 169 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) 170 if (i >= old_level) 171 table[i] += RA_FLOOR_UP_GAP; 172 173 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 174 if (rssi < table[i]) { 175 new_level = i; 176 break; 177 } 178 } 179 180 return new_level; 181 } 182 183 struct rtw_phy_stat_iter_data { 184 struct rtw_dev *rtwdev; 185 u8 min_rssi; 186 }; 187 188 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) 189 { 190 struct rtw_phy_stat_iter_data *iter_data = data; 191 struct rtw_dev *rtwdev = iter_data->rtwdev; 192 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 193 u8 rssi; 194 195 rssi = ewma_rssi_read(&si->avg_rssi); 196 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); 197 198 rtw_fw_send_rssi_info(rtwdev, si); 199 200 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); 201 } 202 203 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) 204 { 205 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 206 struct rtw_phy_stat_iter_data data = {}; 207 208 data.rtwdev = rtwdev; 209 data.min_rssi = U8_MAX; 210 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); 211 212 dm_info->pre_min_rssi = dm_info->min_rssi; 213 dm_info->min_rssi = data.min_rssi; 214 } 215 216 static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev) 217 { 218 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 219 220 dm_info->last_pkt_count = dm_info->cur_pkt_count; 221 memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count)); 222 } 223 224 static void rtw_phy_statistics(struct rtw_dev *rtwdev) 225 { 226 rtw_phy_stat_rssi(rtwdev); 227 rtw_phy_stat_false_alarm(rtwdev); 228 rtw_phy_stat_rate_cnt(rtwdev); 229 } 230 231 #define DIG_PERF_FA_TH_LOW 250 232 #define DIG_PERF_FA_TH_HIGH 500 233 #define DIG_PERF_FA_TH_EXTRA_HIGH 750 234 #define DIG_PERF_MAX 0x5a 235 #define DIG_PERF_MID 0x40 236 #define DIG_CVRG_FA_TH_LOW 2000 237 #define DIG_CVRG_FA_TH_HIGH 4000 238 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 239 #define DIG_CVRG_MAX 0x2a 240 #define DIG_CVRG_MID 0x26 241 #define DIG_CVRG_MIN 0x1c 242 #define DIG_RSSI_GAIN_OFFSET 15 243 244 static bool 245 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) 246 { 247 u16 fa_lo = DIG_PERF_FA_TH_LOW; 248 u16 fa_hi = DIG_PERF_FA_TH_HIGH; 249 u16 *fa_history; 250 u8 *igi_history; 251 u8 damping_rssi; 252 u8 min_rssi; 253 u8 diff; 254 u8 igi_bitmap; 255 bool damping = false; 256 257 min_rssi = dm_info->min_rssi; 258 if (dm_info->damping) { 259 damping_rssi = dm_info->damping_rssi; 260 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : 261 damping_rssi - min_rssi; 262 if (diff > 3 || dm_info->damping_cnt++ > 20) { 263 dm_info->damping = false; 264 return false; 265 } 266 267 return true; 268 } 269 270 igi_history = dm_info->igi_history; 271 fa_history = dm_info->fa_history; 272 igi_bitmap = dm_info->igi_bitmap & 0xf; 273 switch (igi_bitmap) { 274 case 5: 275 /* down -> up -> down -> up */ 276 if (igi_history[0] > igi_history[1] && 277 igi_history[2] > igi_history[3] && 278 igi_history[0] - igi_history[1] >= 2 && 279 igi_history[2] - igi_history[3] >= 2 && 280 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 281 fa_history[2] > fa_hi && fa_history[3] < fa_lo) 282 damping = true; 283 break; 284 case 9: 285 /* up -> down -> down -> up */ 286 if (igi_history[0] > igi_history[1] && 287 igi_history[3] > igi_history[2] && 288 igi_history[0] - igi_history[1] >= 4 && 289 igi_history[3] - igi_history[2] >= 2 && 290 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 291 fa_history[2] < fa_lo && fa_history[3] > fa_hi) 292 damping = true; 293 break; 294 default: 295 return false; 296 } 297 298 if (damping) { 299 dm_info->damping = true; 300 dm_info->damping_cnt = 0; 301 dm_info->damping_rssi = min_rssi; 302 } 303 304 return damping; 305 } 306 307 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, 308 u8 *upper, u8 *lower, bool linked) 309 { 310 u8 dig_max, dig_min, dig_mid; 311 u8 min_rssi; 312 313 if (linked) { 314 dig_max = DIG_PERF_MAX; 315 dig_mid = DIG_PERF_MID; 316 /* 22B=0x1c, 22C=0x20 */ 317 dig_min = 0x1c; 318 min_rssi = max_t(u8, dm_info->min_rssi, dig_min); 319 } else { 320 dig_max = DIG_CVRG_MAX; 321 dig_mid = DIG_CVRG_MID; 322 dig_min = DIG_CVRG_MIN; 323 min_rssi = dig_min; 324 } 325 326 /* DIG MAX should be bounded by minimum RSSI with offset +15 */ 327 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); 328 329 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); 330 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); 331 } 332 333 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, 334 u16 *fa_th, u8 *step, bool linked) 335 { 336 u8 min_rssi, pre_min_rssi; 337 338 min_rssi = dm_info->min_rssi; 339 pre_min_rssi = dm_info->pre_min_rssi; 340 step[0] = 4; 341 step[1] = 3; 342 step[2] = 2; 343 344 if (linked) { 345 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; 346 fa_th[1] = DIG_PERF_FA_TH_HIGH; 347 fa_th[2] = DIG_PERF_FA_TH_LOW; 348 if (pre_min_rssi > min_rssi) { 349 step[0] = 6; 350 step[1] = 4; 351 step[2] = 2; 352 } 353 } else { 354 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; 355 fa_th[1] = DIG_CVRG_FA_TH_HIGH; 356 fa_th[2] = DIG_CVRG_FA_TH_LOW; 357 } 358 } 359 360 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) 361 { 362 u8 *igi_history; 363 u16 *fa_history; 364 u8 igi_bitmap; 365 bool up; 366 367 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; 368 igi_history = dm_info->igi_history; 369 fa_history = dm_info->fa_history; 370 371 up = igi > igi_history[0]; 372 igi_bitmap |= up; 373 374 igi_history[3] = igi_history[2]; 375 igi_history[2] = igi_history[1]; 376 igi_history[1] = igi_history[0]; 377 igi_history[0] = igi; 378 379 fa_history[3] = fa_history[2]; 380 fa_history[2] = fa_history[1]; 381 fa_history[1] = fa_history[0]; 382 fa_history[0] = fa; 383 384 dm_info->igi_bitmap = igi_bitmap; 385 } 386 387 static void rtw_phy_dig(struct rtw_dev *rtwdev) 388 { 389 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 390 u8 upper_bound, lower_bound; 391 u8 pre_igi, cur_igi; 392 u16 fa_th[3], fa_cnt; 393 u8 level; 394 u8 step[3]; 395 bool linked; 396 397 if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags)) 398 return; 399 400 if (rtw_phy_dig_check_damping(dm_info)) 401 return; 402 403 linked = !!rtwdev->sta_cnt; 404 405 fa_cnt = dm_info->total_fa_cnt; 406 pre_igi = dm_info->igi_history[0]; 407 408 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); 409 410 /* test the false alarm count from the highest threshold level first, 411 * and increase it by corresponding step size 412 * 413 * note that the step size is offset by -2, compensate it afterall 414 */ 415 cur_igi = pre_igi; 416 for (level = 0; level < 3; level++) { 417 if (fa_cnt > fa_th[level]) { 418 cur_igi += step[level]; 419 break; 420 } 421 } 422 cur_igi -= 2; 423 424 /* calculate the upper/lower bound by the minimum rssi we have among 425 * the peers connected with us, meanwhile make sure the igi value does 426 * not beyond the hardware limitation 427 */ 428 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); 429 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); 430 431 /* record current igi value and false alarm statistics for further 432 * damping checks, and record the trend of igi values 433 */ 434 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); 435 436 if (cur_igi != pre_igi) 437 rtw_phy_dig_write(rtwdev, cur_igi); 438 } 439 440 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) 441 { 442 struct rtw_dev *rtwdev = data; 443 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 444 445 rtw_update_sta_info(rtwdev, si); 446 } 447 448 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) 449 { 450 if (rtwdev->watch_dog_cnt & 0x3) 451 return; 452 453 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); 454 } 455 456 static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) 457 { 458 struct rtw_chip_info *chip = rtwdev->chip; 459 460 if (chip->ops->dpk_track) 461 chip->ops->dpk_track(rtwdev); 462 } 463 464 #define CCK_PD_FA_LV1_MIN 1000 465 #define CCK_PD_FA_LV0_MAX 500 466 467 static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev) 468 { 469 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 470 u32 cck_fa_avg = dm_info->cck_fa_avg; 471 472 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 473 return CCK_PD_LV1; 474 475 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 476 return CCK_PD_LV0; 477 478 return CCK_PD_LV_MAX; 479 } 480 481 #define CCK_PD_IGI_LV4_VAL 0x38 482 #define CCK_PD_IGI_LV3_VAL 0x2a 483 #define CCK_PD_IGI_LV2_VAL 0x24 484 #define CCK_PD_RSSI_LV4_VAL 32 485 #define CCK_PD_RSSI_LV3_VAL 32 486 #define CCK_PD_RSSI_LV2_VAL 24 487 488 static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev) 489 { 490 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 491 u8 igi = dm_info->igi_history[0]; 492 u8 rssi = dm_info->min_rssi; 493 u32 cck_fa_avg = dm_info->cck_fa_avg; 494 495 if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL) 496 return CCK_PD_LV4; 497 if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL) 498 return CCK_PD_LV3; 499 if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL) 500 return CCK_PD_LV2; 501 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 502 return CCK_PD_LV1; 503 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 504 return CCK_PD_LV0; 505 506 return CCK_PD_LV_MAX; 507 } 508 509 static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev) 510 { 511 if (!rtw_is_assoc(rtwdev)) 512 return rtw_phy_cck_pd_lv_unlink(rtwdev); 513 else 514 return rtw_phy_cck_pd_lv_link(rtwdev); 515 } 516 517 static void rtw_phy_cck_pd(struct rtw_dev *rtwdev) 518 { 519 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 520 struct rtw_chip_info *chip = rtwdev->chip; 521 u32 cck_fa = dm_info->cck_fa_cnt; 522 u8 level; 523 524 if (rtwdev->hal.current_band_type != RTW_BAND_2G) 525 return; 526 527 if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET) 528 dm_info->cck_fa_avg = cck_fa; 529 else 530 dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2; 531 532 level = rtw_phy_cck_pd_lv(rtwdev); 533 534 if (level >= CCK_PD_LV_MAX) 535 return; 536 537 if (chip->ops->cck_pd_set) 538 chip->ops->cck_pd_set(rtwdev, level); 539 } 540 541 static void rtw_phy_pwr_track(struct rtw_dev *rtwdev) 542 { 543 rtwdev->chip->ops->pwr_track(rtwdev); 544 } 545 546 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) 547 { 548 /* for further calculation */ 549 rtw_phy_statistics(rtwdev); 550 rtw_phy_dig(rtwdev); 551 rtw_phy_cck_pd(rtwdev); 552 rtw_phy_ra_info_update(rtwdev); 553 rtw_phy_dpk_track(rtwdev); 554 rtw_phy_pwr_track(rtwdev); 555 } 556 557 #define FRAC_BITS 3 558 559 static u8 rtw_phy_power_2_db(s8 power) 560 { 561 if (power <= -100 || power >= 20) 562 return 0; 563 else if (power >= 0) 564 return 100; 565 else 566 return 100 + power; 567 } 568 569 static u64 rtw_phy_db_2_linear(u8 power_db) 570 { 571 u8 i, j; 572 u64 linear; 573 574 if (power_db > 96) 575 power_db = 96; 576 else if (power_db < 1) 577 return 1; 578 579 /* 1dB ~ 96dB */ 580 i = (power_db - 1) >> 3; 581 j = (power_db - 1) - (i << 3); 582 583 linear = db_invert_table[i][j]; 584 linear = i > 2 ? linear << FRAC_BITS : linear; 585 586 return linear; 587 } 588 589 static u8 rtw_phy_linear_2_db(u64 linear) 590 { 591 u8 i; 592 u8 j; 593 u32 dB; 594 595 if (linear >= db_invert_table[11][7]) 596 return 96; /* maximum 96 dB */ 597 598 for (i = 0; i < 12; i++) { 599 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) 600 break; 601 else if (i > 2 && linear <= db_invert_table[i][7]) 602 break; 603 } 604 605 for (j = 0; j < 8; j++) { 606 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) 607 break; 608 else if (i > 2 && linear <= db_invert_table[i][j]) 609 break; 610 } 611 612 if (j == 0 && i == 0) 613 goto end; 614 615 if (j == 0) { 616 if (i != 3) { 617 if (db_invert_table[i][0] - linear > 618 linear - db_invert_table[i - 1][7]) { 619 i = i - 1; 620 j = 7; 621 } 622 } else { 623 if (db_invert_table[3][0] - linear > 624 linear - db_invert_table[2][7]) { 625 i = 2; 626 j = 7; 627 } 628 } 629 } else { 630 if (db_invert_table[i][j] - linear > 631 linear - db_invert_table[i][j - 1]) { 632 j = j - 1; 633 } 634 } 635 end: 636 dB = (i << 3) + j + 1; 637 638 return dB; 639 } 640 641 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) 642 { 643 s8 power; 644 u8 power_db; 645 u64 linear; 646 u64 sum = 0; 647 u8 path; 648 649 for (path = 0; path < path_num; path++) { 650 power = rf_power[path]; 651 power_db = rtw_phy_power_2_db(power); 652 linear = rtw_phy_db_2_linear(power_db); 653 sum += linear; 654 } 655 656 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; 657 switch (path_num) { 658 case 2: 659 sum >>= 1; 660 break; 661 case 3: 662 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; 663 break; 664 case 4: 665 sum >>= 2; 666 break; 667 default: 668 break; 669 } 670 671 return rtw_phy_linear_2_db(sum); 672 } 673 674 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 675 u32 addr, u32 mask) 676 { 677 struct rtw_hal *hal = &rtwdev->hal; 678 struct rtw_chip_info *chip = rtwdev->chip; 679 const u32 *base_addr = chip->rf_base_addr; 680 u32 val, direct_addr; 681 682 if (rf_path >= hal->rf_path_num) { 683 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 684 return INV_RF_DATA; 685 } 686 687 addr &= 0xff; 688 direct_addr = base_addr[rf_path] + (addr << 2); 689 mask &= RFREG_MASK; 690 691 val = rtw_read32_mask(rtwdev, direct_addr, mask); 692 693 return val; 694 } 695 696 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 697 u32 addr, u32 mask, u32 data) 698 { 699 struct rtw_hal *hal = &rtwdev->hal; 700 struct rtw_chip_info *chip = rtwdev->chip; 701 u32 *sipi_addr = chip->rf_sipi_addr; 702 u32 data_and_addr; 703 u32 old_data = 0; 704 u32 shift; 705 706 if (rf_path >= hal->rf_path_num) { 707 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 708 return false; 709 } 710 711 addr &= 0xff; 712 mask &= RFREG_MASK; 713 714 if (mask != RFREG_MASK) { 715 old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK); 716 717 if (old_data == INV_RF_DATA) { 718 rtw_err(rtwdev, "Write fail, rf is disabled\n"); 719 return false; 720 } 721 722 shift = __ffs(mask); 723 data = ((old_data) & (~mask)) | (data << shift); 724 } 725 726 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; 727 728 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); 729 730 udelay(13); 731 732 return true; 733 } 734 735 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 736 u32 addr, u32 mask, u32 data) 737 { 738 struct rtw_hal *hal = &rtwdev->hal; 739 struct rtw_chip_info *chip = rtwdev->chip; 740 const u32 *base_addr = chip->rf_base_addr; 741 u32 direct_addr; 742 743 if (rf_path >= hal->rf_path_num) { 744 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 745 return false; 746 } 747 748 addr &= 0xff; 749 direct_addr = base_addr[rf_path] + (addr << 2); 750 mask &= RFREG_MASK; 751 752 if (addr == RF_CFGCH) { 753 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI); 754 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI); 755 } 756 757 rtw_write32_mask(rtwdev, direct_addr, mask, data); 758 759 udelay(1); 760 761 if (addr == RF_CFGCH) { 762 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI); 763 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI); 764 } 765 766 return true; 767 } 768 769 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 770 u32 addr, u32 mask, u32 data) 771 { 772 if (addr != 0x00) 773 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); 774 775 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); 776 } 777 778 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) 779 { 780 struct rtw_hal *hal = &rtwdev->hal; 781 struct rtw_efuse *efuse = &rtwdev->efuse; 782 struct rtw_phy_cond cond = {0}; 783 784 cond.cut = hal->cut_version ? hal->cut_version : 15; 785 cond.pkg = pkg ? pkg : 15; 786 cond.plat = 0x04; 787 cond.rfe = efuse->rfe_option; 788 789 switch (rtw_hci_type(rtwdev)) { 790 case RTW_HCI_TYPE_USB: 791 cond.intf = INTF_USB; 792 break; 793 case RTW_HCI_TYPE_SDIO: 794 cond.intf = INTF_SDIO; 795 break; 796 case RTW_HCI_TYPE_PCIE: 797 default: 798 cond.intf = INTF_PCIE; 799 break; 800 } 801 802 hal->phy_cond = cond; 803 804 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); 805 } 806 807 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) 808 { 809 struct rtw_hal *hal = &rtwdev->hal; 810 struct rtw_phy_cond drv_cond = hal->phy_cond; 811 812 if (cond.cut && cond.cut != drv_cond.cut) 813 return false; 814 815 if (cond.pkg && cond.pkg != drv_cond.pkg) 816 return false; 817 818 if (cond.intf && cond.intf != drv_cond.intf) 819 return false; 820 821 if (cond.rfe != drv_cond.rfe) 822 return false; 823 824 return true; 825 } 826 827 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 828 { 829 const union phy_table_tile *p = tbl->data; 830 const union phy_table_tile *end = p + tbl->size / 2; 831 struct rtw_phy_cond pos_cond = {0}; 832 bool is_matched = true, is_skipped = false; 833 834 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); 835 836 for (; p < end; p++) { 837 if (p->cond.pos) { 838 switch (p->cond.branch) { 839 case BRANCH_ENDIF: 840 is_matched = true; 841 is_skipped = false; 842 break; 843 case BRANCH_ELSE: 844 is_matched = is_skipped ? false : true; 845 break; 846 case BRANCH_IF: 847 case BRANCH_ELIF: 848 default: 849 pos_cond = p->cond; 850 break; 851 } 852 } else if (p->cond.neg) { 853 if (!is_skipped) { 854 if (check_positive(rtwdev, pos_cond)) { 855 is_matched = true; 856 is_skipped = true; 857 } else { 858 is_matched = false; 859 is_skipped = false; 860 } 861 } else { 862 is_matched = false; 863 } 864 } else if (is_matched) { 865 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); 866 } 867 } 868 } 869 870 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) 871 872 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) 873 { 874 if (rtwdev->chip->is_pwr_by_rate_dec) 875 return bcd_to_dec_pwr_by_rate(hex, i); 876 877 return (hex >> (i * 8)) & 0xFF; 878 } 879 880 static void 881 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, 882 u32 addr, u32 mask, u32 val, u8 *rate, 883 u8 *pwr_by_rate, u8 *rate_num) 884 { 885 int i; 886 887 switch (addr) { 888 case 0xE00: 889 case 0x830: 890 rate[0] = DESC_RATE6M; 891 rate[1] = DESC_RATE9M; 892 rate[2] = DESC_RATE12M; 893 rate[3] = DESC_RATE18M; 894 for (i = 0; i < 4; ++i) 895 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 896 *rate_num = 4; 897 break; 898 case 0xE04: 899 case 0x834: 900 rate[0] = DESC_RATE24M; 901 rate[1] = DESC_RATE36M; 902 rate[2] = DESC_RATE48M; 903 rate[3] = DESC_RATE54M; 904 for (i = 0; i < 4; ++i) 905 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 906 *rate_num = 4; 907 break; 908 case 0xE08: 909 rate[0] = DESC_RATE1M; 910 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); 911 *rate_num = 1; 912 break; 913 case 0x86C: 914 if (mask == 0xffffff00) { 915 rate[0] = DESC_RATE2M; 916 rate[1] = DESC_RATE5_5M; 917 rate[2] = DESC_RATE11M; 918 for (i = 1; i < 4; ++i) 919 pwr_by_rate[i - 1] = 920 tbl_to_dec_pwr_by_rate(rtwdev, val, i); 921 *rate_num = 3; 922 } else if (mask == 0x000000ff) { 923 rate[0] = DESC_RATE11M; 924 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); 925 *rate_num = 1; 926 } 927 break; 928 case 0xE10: 929 case 0x83C: 930 rate[0] = DESC_RATEMCS0; 931 rate[1] = DESC_RATEMCS1; 932 rate[2] = DESC_RATEMCS2; 933 rate[3] = DESC_RATEMCS3; 934 for (i = 0; i < 4; ++i) 935 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 936 *rate_num = 4; 937 break; 938 case 0xE14: 939 case 0x848: 940 rate[0] = DESC_RATEMCS4; 941 rate[1] = DESC_RATEMCS5; 942 rate[2] = DESC_RATEMCS6; 943 rate[3] = DESC_RATEMCS7; 944 for (i = 0; i < 4; ++i) 945 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 946 *rate_num = 4; 947 break; 948 case 0xE18: 949 case 0x84C: 950 rate[0] = DESC_RATEMCS8; 951 rate[1] = DESC_RATEMCS9; 952 rate[2] = DESC_RATEMCS10; 953 rate[3] = DESC_RATEMCS11; 954 for (i = 0; i < 4; ++i) 955 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 956 *rate_num = 4; 957 break; 958 case 0xE1C: 959 case 0x868: 960 rate[0] = DESC_RATEMCS12; 961 rate[1] = DESC_RATEMCS13; 962 rate[2] = DESC_RATEMCS14; 963 rate[3] = DESC_RATEMCS15; 964 for (i = 0; i < 4; ++i) 965 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 966 *rate_num = 4; 967 break; 968 case 0x838: 969 rate[0] = DESC_RATE1M; 970 rate[1] = DESC_RATE2M; 971 rate[2] = DESC_RATE5_5M; 972 for (i = 1; i < 4; ++i) 973 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev, 974 val, i); 975 *rate_num = 3; 976 break; 977 case 0xC20: 978 case 0xE20: 979 case 0x1820: 980 case 0x1A20: 981 rate[0] = DESC_RATE1M; 982 rate[1] = DESC_RATE2M; 983 rate[2] = DESC_RATE5_5M; 984 rate[3] = DESC_RATE11M; 985 for (i = 0; i < 4; ++i) 986 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 987 *rate_num = 4; 988 break; 989 case 0xC24: 990 case 0xE24: 991 case 0x1824: 992 case 0x1A24: 993 rate[0] = DESC_RATE6M; 994 rate[1] = DESC_RATE9M; 995 rate[2] = DESC_RATE12M; 996 rate[3] = DESC_RATE18M; 997 for (i = 0; i < 4; ++i) 998 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 999 *rate_num = 4; 1000 break; 1001 case 0xC28: 1002 case 0xE28: 1003 case 0x1828: 1004 case 0x1A28: 1005 rate[0] = DESC_RATE24M; 1006 rate[1] = DESC_RATE36M; 1007 rate[2] = DESC_RATE48M; 1008 rate[3] = DESC_RATE54M; 1009 for (i = 0; i < 4; ++i) 1010 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1011 *rate_num = 4; 1012 break; 1013 case 0xC2C: 1014 case 0xE2C: 1015 case 0x182C: 1016 case 0x1A2C: 1017 rate[0] = DESC_RATEMCS0; 1018 rate[1] = DESC_RATEMCS1; 1019 rate[2] = DESC_RATEMCS2; 1020 rate[3] = DESC_RATEMCS3; 1021 for (i = 0; i < 4; ++i) 1022 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1023 *rate_num = 4; 1024 break; 1025 case 0xC30: 1026 case 0xE30: 1027 case 0x1830: 1028 case 0x1A30: 1029 rate[0] = DESC_RATEMCS4; 1030 rate[1] = DESC_RATEMCS5; 1031 rate[2] = DESC_RATEMCS6; 1032 rate[3] = DESC_RATEMCS7; 1033 for (i = 0; i < 4; ++i) 1034 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1035 *rate_num = 4; 1036 break; 1037 case 0xC34: 1038 case 0xE34: 1039 case 0x1834: 1040 case 0x1A34: 1041 rate[0] = DESC_RATEMCS8; 1042 rate[1] = DESC_RATEMCS9; 1043 rate[2] = DESC_RATEMCS10; 1044 rate[3] = DESC_RATEMCS11; 1045 for (i = 0; i < 4; ++i) 1046 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1047 *rate_num = 4; 1048 break; 1049 case 0xC38: 1050 case 0xE38: 1051 case 0x1838: 1052 case 0x1A38: 1053 rate[0] = DESC_RATEMCS12; 1054 rate[1] = DESC_RATEMCS13; 1055 rate[2] = DESC_RATEMCS14; 1056 rate[3] = DESC_RATEMCS15; 1057 for (i = 0; i < 4; ++i) 1058 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1059 *rate_num = 4; 1060 break; 1061 case 0xC3C: 1062 case 0xE3C: 1063 case 0x183C: 1064 case 0x1A3C: 1065 rate[0] = DESC_RATEVHT1SS_MCS0; 1066 rate[1] = DESC_RATEVHT1SS_MCS1; 1067 rate[2] = DESC_RATEVHT1SS_MCS2; 1068 rate[3] = DESC_RATEVHT1SS_MCS3; 1069 for (i = 0; i < 4; ++i) 1070 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1071 *rate_num = 4; 1072 break; 1073 case 0xC40: 1074 case 0xE40: 1075 case 0x1840: 1076 case 0x1A40: 1077 rate[0] = DESC_RATEVHT1SS_MCS4; 1078 rate[1] = DESC_RATEVHT1SS_MCS5; 1079 rate[2] = DESC_RATEVHT1SS_MCS6; 1080 rate[3] = DESC_RATEVHT1SS_MCS7; 1081 for (i = 0; i < 4; ++i) 1082 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1083 *rate_num = 4; 1084 break; 1085 case 0xC44: 1086 case 0xE44: 1087 case 0x1844: 1088 case 0x1A44: 1089 rate[0] = DESC_RATEVHT1SS_MCS8; 1090 rate[1] = DESC_RATEVHT1SS_MCS9; 1091 rate[2] = DESC_RATEVHT2SS_MCS0; 1092 rate[3] = DESC_RATEVHT2SS_MCS1; 1093 for (i = 0; i < 4; ++i) 1094 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1095 *rate_num = 4; 1096 break; 1097 case 0xC48: 1098 case 0xE48: 1099 case 0x1848: 1100 case 0x1A48: 1101 rate[0] = DESC_RATEVHT2SS_MCS2; 1102 rate[1] = DESC_RATEVHT2SS_MCS3; 1103 rate[2] = DESC_RATEVHT2SS_MCS4; 1104 rate[3] = DESC_RATEVHT2SS_MCS5; 1105 for (i = 0; i < 4; ++i) 1106 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1107 *rate_num = 4; 1108 break; 1109 case 0xC4C: 1110 case 0xE4C: 1111 case 0x184C: 1112 case 0x1A4C: 1113 rate[0] = DESC_RATEVHT2SS_MCS6; 1114 rate[1] = DESC_RATEVHT2SS_MCS7; 1115 rate[2] = DESC_RATEVHT2SS_MCS8; 1116 rate[3] = DESC_RATEVHT2SS_MCS9; 1117 for (i = 0; i < 4; ++i) 1118 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1119 *rate_num = 4; 1120 break; 1121 case 0xCD8: 1122 case 0xED8: 1123 case 0x18D8: 1124 case 0x1AD8: 1125 rate[0] = DESC_RATEMCS16; 1126 rate[1] = DESC_RATEMCS17; 1127 rate[2] = DESC_RATEMCS18; 1128 rate[3] = DESC_RATEMCS19; 1129 for (i = 0; i < 4; ++i) 1130 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1131 *rate_num = 4; 1132 break; 1133 case 0xCDC: 1134 case 0xEDC: 1135 case 0x18DC: 1136 case 0x1ADC: 1137 rate[0] = DESC_RATEMCS20; 1138 rate[1] = DESC_RATEMCS21; 1139 rate[2] = DESC_RATEMCS22; 1140 rate[3] = DESC_RATEMCS23; 1141 for (i = 0; i < 4; ++i) 1142 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1143 *rate_num = 4; 1144 break; 1145 case 0xCE0: 1146 case 0xEE0: 1147 case 0x18E0: 1148 case 0x1AE0: 1149 rate[0] = DESC_RATEVHT3SS_MCS0; 1150 rate[1] = DESC_RATEVHT3SS_MCS1; 1151 rate[2] = DESC_RATEVHT3SS_MCS2; 1152 rate[3] = DESC_RATEVHT3SS_MCS3; 1153 for (i = 0; i < 4; ++i) 1154 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1155 *rate_num = 4; 1156 break; 1157 case 0xCE4: 1158 case 0xEE4: 1159 case 0x18E4: 1160 case 0x1AE4: 1161 rate[0] = DESC_RATEVHT3SS_MCS4; 1162 rate[1] = DESC_RATEVHT3SS_MCS5; 1163 rate[2] = DESC_RATEVHT3SS_MCS6; 1164 rate[3] = DESC_RATEVHT3SS_MCS7; 1165 for (i = 0; i < 4; ++i) 1166 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1167 *rate_num = 4; 1168 break; 1169 case 0xCE8: 1170 case 0xEE8: 1171 case 0x18E8: 1172 case 0x1AE8: 1173 rate[0] = DESC_RATEVHT3SS_MCS8; 1174 rate[1] = DESC_RATEVHT3SS_MCS9; 1175 for (i = 0; i < 2; ++i) 1176 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1177 *rate_num = 2; 1178 break; 1179 default: 1180 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr); 1181 break; 1182 } 1183 } 1184 1185 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev, 1186 u32 band, u32 rfpath, u32 txnum, 1187 u32 regaddr, u32 bitmask, u32 data) 1188 { 1189 struct rtw_hal *hal = &rtwdev->hal; 1190 u8 rate_num = 0; 1191 u8 rate; 1192 u8 rates[RTW_RF_PATH_MAX] = {0}; 1193 s8 offset; 1194 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0}; 1195 int i; 1196 1197 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data, 1198 rates, pwr_by_rate, &rate_num); 1199 1200 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX || 1201 (band != PHY_BAND_2G && band != PHY_BAND_5G) || 1202 rate_num > RTW_RF_PATH_MAX)) 1203 return; 1204 1205 for (i = 0; i < rate_num; i++) { 1206 offset = pwr_by_rate[i]; 1207 rate = rates[i]; 1208 if (band == PHY_BAND_2G) 1209 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; 1210 else if (band == PHY_BAND_5G) 1211 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; 1212 else 1213 continue; 1214 } 1215 } 1216 1217 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 1218 { 1219 const struct rtw_phy_pg_cfg_pair *p = tbl->data; 1220 const struct rtw_phy_pg_cfg_pair *end = p + tbl->size; 1221 1222 for (; p < end; p++) { 1223 if (p->addr == 0xfe || p->addr == 0xffe) { 1224 msleep(50); 1225 continue; 1226 } 1227 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path, 1228 p->tx_num, p->addr, p->bitmask, 1229 p->data); 1230 } 1231 } 1232 1233 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 1234 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ 1235 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ 1236 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ 1237 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ 1238 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ 1239 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ 1240 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */ 1241 1242 static int rtw_channel_to_idx(u8 band, u8 channel) 1243 { 1244 int ch_idx; 1245 u8 n_channel; 1246 1247 if (band == PHY_BAND_2G) { 1248 ch_idx = channel - 1; 1249 n_channel = RTW_MAX_CHANNEL_NUM_2G; 1250 } else if (band == PHY_BAND_5G) { 1251 n_channel = RTW_MAX_CHANNEL_NUM_5G; 1252 for (ch_idx = 0; ch_idx < n_channel; ch_idx++) 1253 if (rtw_channel_idx_5g[ch_idx] == channel) 1254 break; 1255 } else { 1256 return -1; 1257 } 1258 1259 if (ch_idx >= n_channel) 1260 return -1; 1261 1262 return ch_idx; 1263 } 1264 1265 static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, 1266 u8 bw, u8 rs, u8 ch, s8 pwr_limit) 1267 { 1268 struct rtw_hal *hal = &rtwdev->hal; 1269 u8 max_power_index = rtwdev->chip->max_power_index; 1270 s8 ww; 1271 int ch_idx; 1272 1273 pwr_limit = clamp_t(s8, pwr_limit, 1274 -max_power_index, max_power_index); 1275 ch_idx = rtw_channel_to_idx(band, ch); 1276 1277 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || 1278 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { 1279 WARN(1, 1280 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", 1281 regd, band, bw, rs, ch_idx, pwr_limit); 1282 return; 1283 } 1284 1285 if (band == PHY_BAND_2G) { 1286 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit; 1287 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx]; 1288 ww = min_t(s8, ww, pwr_limit); 1289 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1290 } else if (band == PHY_BAND_5G) { 1291 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit; 1292 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx]; 1293 ww = min_t(s8, ww, pwr_limit); 1294 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1295 } 1296 } 1297 1298 /* cross-reference 5G power limits if values are not assigned */ 1299 static void 1300 rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd, 1301 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht) 1302 { 1303 struct rtw_hal *hal = &rtwdev->hal; 1304 u8 max_power_index = rtwdev->chip->max_power_index; 1305 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx]; 1306 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx]; 1307 1308 if (lmt_ht == lmt_vht) 1309 return; 1310 1311 if (lmt_ht == max_power_index) 1312 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht; 1313 1314 else if (lmt_vht == max_power_index) 1315 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht; 1316 } 1317 1318 /* cross-reference power limits for ht and vht */ 1319 static void 1320 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx) 1321 { 1322 u8 rs_idx, rs_ht, rs_vht; 1323 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S}, 1324 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} }; 1325 1326 for (rs_idx = 0; rs_idx < 2; rs_idx++) { 1327 rs_ht = rs_cmp[rs_idx][0]; 1328 rs_vht = rs_cmp[rs_idx][1]; 1329 1330 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht); 1331 } 1332 } 1333 1334 /* cross-reference power limits for 5G channels */ 1335 static void 1336 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw) 1337 { 1338 u8 ch_idx; 1339 1340 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++) 1341 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx); 1342 } 1343 1344 /* cross-reference power limits for 20/40M bandwidth */ 1345 static void 1346 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd) 1347 { 1348 u8 bw; 1349 1350 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++) 1351 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw); 1352 } 1353 1354 /* cross-reference power limits */ 1355 static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev) 1356 { 1357 u8 regd; 1358 1359 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1360 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd); 1361 } 1362 1363 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, 1364 const struct rtw_table *tbl) 1365 { 1366 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; 1367 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; 1368 1369 for (; p < end; p++) { 1370 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band, 1371 p->bw, p->rs, p->ch, p->txpwr_lmt); 1372 } 1373 1374 rtw_xref_txpwr_lmt(rtwdev); 1375 } 1376 1377 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1378 u32 addr, u32 data) 1379 { 1380 rtw_write8(rtwdev, addr, data); 1381 } 1382 1383 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1384 u32 addr, u32 data) 1385 { 1386 rtw_write32(rtwdev, addr, data); 1387 } 1388 1389 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1390 u32 addr, u32 data) 1391 { 1392 if (addr == 0xfe) 1393 msleep(50); 1394 else if (addr == 0xfd) 1395 mdelay(5); 1396 else if (addr == 0xfc) 1397 mdelay(1); 1398 else if (addr == 0xfb) 1399 usleep_range(50, 60); 1400 else if (addr == 0xfa) 1401 udelay(5); 1402 else if (addr == 0xf9) 1403 udelay(1); 1404 else 1405 rtw_write32(rtwdev, addr, data); 1406 } 1407 1408 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1409 u32 addr, u32 data) 1410 { 1411 if (addr == 0xffe) { 1412 msleep(50); 1413 } else if (addr == 0xfe) { 1414 usleep_range(100, 110); 1415 } else { 1416 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); 1417 udelay(1); 1418 } 1419 } 1420 1421 static void rtw_load_rfk_table(struct rtw_dev *rtwdev) 1422 { 1423 struct rtw_chip_info *chip = rtwdev->chip; 1424 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; 1425 1426 if (!chip->rfk_init_tbl) 1427 return; 1428 1429 rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1); 1430 rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1); 1431 rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1); 1432 rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1); 1433 rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0); 1434 1435 rtw_load_table(rtwdev, chip->rfk_init_tbl); 1436 1437 dpk_info->is_dpk_pwr_on = true; 1438 } 1439 1440 void rtw_phy_load_tables(struct rtw_dev *rtwdev) 1441 { 1442 struct rtw_chip_info *chip = rtwdev->chip; 1443 u8 rf_path; 1444 1445 rtw_load_table(rtwdev, chip->mac_tbl); 1446 rtw_load_table(rtwdev, chip->bb_tbl); 1447 rtw_load_table(rtwdev, chip->agc_tbl); 1448 rtw_load_rfk_table(rtwdev); 1449 1450 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { 1451 const struct rtw_table *tbl; 1452 1453 tbl = chip->rf_tbl[rf_path]; 1454 rtw_load_table(rtwdev, tbl); 1455 } 1456 } 1457 1458 static u8 rtw_get_channel_group(u8 channel) 1459 { 1460 switch (channel) { 1461 default: 1462 WARN_ON(1); 1463 /* fall through */ 1464 case 1: 1465 case 2: 1466 case 36: 1467 case 38: 1468 case 40: 1469 case 42: 1470 return 0; 1471 case 3: 1472 case 4: 1473 case 5: 1474 case 44: 1475 case 46: 1476 case 48: 1477 case 50: 1478 return 1; 1479 case 6: 1480 case 7: 1481 case 8: 1482 case 52: 1483 case 54: 1484 case 56: 1485 case 58: 1486 return 2; 1487 case 9: 1488 case 10: 1489 case 11: 1490 case 60: 1491 case 62: 1492 case 64: 1493 return 3; 1494 case 12: 1495 case 13: 1496 case 100: 1497 case 102: 1498 case 104: 1499 case 106: 1500 return 4; 1501 case 14: 1502 case 108: 1503 case 110: 1504 case 112: 1505 case 114: 1506 return 5; 1507 case 116: 1508 case 118: 1509 case 120: 1510 case 122: 1511 return 6; 1512 case 124: 1513 case 126: 1514 case 128: 1515 case 130: 1516 return 7; 1517 case 132: 1518 case 134: 1519 case 136: 1520 case 138: 1521 return 8; 1522 case 140: 1523 case 142: 1524 case 144: 1525 return 9; 1526 case 149: 1527 case 151: 1528 case 153: 1529 case 155: 1530 return 10; 1531 case 157: 1532 case 159: 1533 case 161: 1534 return 11; 1535 case 165: 1536 case 167: 1537 case 169: 1538 case 171: 1539 return 12; 1540 case 173: 1541 case 175: 1542 case 177: 1543 return 13; 1544 } 1545 } 1546 1547 static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate) 1548 { 1549 struct rtw_chip_info *chip = rtwdev->chip; 1550 s8 dpd_diff = 0; 1551 1552 if (!chip->en_dis_dpd) 1553 return 0; 1554 1555 #define RTW_DPD_RATE_CHECK(_rate) \ 1556 case DESC_RATE ## _rate: \ 1557 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \ 1558 dpd_diff = -6 * chip->txgi_factor; \ 1559 break 1560 1561 switch (rate) { 1562 RTW_DPD_RATE_CHECK(6M); 1563 RTW_DPD_RATE_CHECK(9M); 1564 RTW_DPD_RATE_CHECK(MCS0); 1565 RTW_DPD_RATE_CHECK(MCS1); 1566 RTW_DPD_RATE_CHECK(MCS8); 1567 RTW_DPD_RATE_CHECK(MCS9); 1568 RTW_DPD_RATE_CHECK(VHT1SS_MCS0); 1569 RTW_DPD_RATE_CHECK(VHT1SS_MCS1); 1570 RTW_DPD_RATE_CHECK(VHT2SS_MCS0); 1571 RTW_DPD_RATE_CHECK(VHT2SS_MCS1); 1572 } 1573 #undef RTW_DPD_RATE_CHECK 1574 1575 return dpd_diff; 1576 } 1577 1578 static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, 1579 struct rtw_2g_txpwr_idx *pwr_idx_2g, 1580 enum rtw_bandwidth bandwidth, 1581 u8 rate, u8 group) 1582 { 1583 struct rtw_chip_info *chip = rtwdev->chip; 1584 u8 tx_power; 1585 bool mcs_rate; 1586 bool above_2ss; 1587 u8 factor = chip->txgi_factor; 1588 1589 if (rate <= DESC_RATE11M) 1590 tx_power = pwr_idx_2g->cck_base[group]; 1591 else 1592 tx_power = pwr_idx_2g->bw40_base[group]; 1593 1594 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1595 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; 1596 1597 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1598 (rate >= DESC_RATEVHT1SS_MCS0 && 1599 rate <= DESC_RATEVHT2SS_MCS9); 1600 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1601 (rate >= DESC_RATEVHT2SS_MCS0); 1602 1603 if (!mcs_rate) 1604 return tx_power; 1605 1606 switch (bandwidth) { 1607 default: 1608 WARN_ON(1); 1609 /* fall through */ 1610 case RTW_CHANNEL_WIDTH_20: 1611 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; 1612 if (above_2ss) 1613 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; 1614 break; 1615 case RTW_CHANNEL_WIDTH_40: 1616 /* bw40 is the base power */ 1617 if (above_2ss) 1618 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; 1619 break; 1620 } 1621 1622 return tx_power; 1623 } 1624 1625 static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, 1626 struct rtw_5g_txpwr_idx *pwr_idx_5g, 1627 enum rtw_bandwidth bandwidth, 1628 u8 rate, u8 group) 1629 { 1630 struct rtw_chip_info *chip = rtwdev->chip; 1631 u8 tx_power; 1632 u8 upper, lower; 1633 bool mcs_rate; 1634 bool above_2ss; 1635 u8 factor = chip->txgi_factor; 1636 1637 tx_power = pwr_idx_5g->bw40_base[group]; 1638 1639 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1640 (rate >= DESC_RATEVHT1SS_MCS0 && 1641 rate <= DESC_RATEVHT2SS_MCS9); 1642 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1643 (rate >= DESC_RATEVHT2SS_MCS0); 1644 1645 if (!mcs_rate) { 1646 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; 1647 return tx_power; 1648 } 1649 1650 switch (bandwidth) { 1651 default: 1652 WARN_ON(1); 1653 /* fall through */ 1654 case RTW_CHANNEL_WIDTH_20: 1655 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; 1656 if (above_2ss) 1657 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; 1658 break; 1659 case RTW_CHANNEL_WIDTH_40: 1660 /* bw40 is the base power */ 1661 if (above_2ss) 1662 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; 1663 break; 1664 case RTW_CHANNEL_WIDTH_80: 1665 /* the base idx of bw80 is the average of bw40+/bw40- */ 1666 lower = pwr_idx_5g->bw40_base[group]; 1667 upper = pwr_idx_5g->bw40_base[group + 1]; 1668 1669 tx_power = (lower + upper) / 2; 1670 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; 1671 if (above_2ss) 1672 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; 1673 break; 1674 } 1675 1676 return tx_power; 1677 } 1678 1679 static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1680 enum rtw_bandwidth bw, u8 rf_path, 1681 u8 rate, u8 channel, u8 regd) 1682 { 1683 struct rtw_hal *hal = &rtwdev->hal; 1684 u8 *cch_by_bw = hal->cch_by_bw; 1685 s8 power_limit = (s8)rtwdev->chip->max_power_index; 1686 u8 rs; 1687 int ch_idx; 1688 u8 cur_bw, cur_ch; 1689 s8 cur_lmt; 1690 1691 if (regd > RTW_REGD_WW) 1692 return power_limit; 1693 1694 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) 1695 rs = RTW_RATE_SECTION_CCK; 1696 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1697 rs = RTW_RATE_SECTION_OFDM; 1698 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) 1699 rs = RTW_RATE_SECTION_HT_1S; 1700 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) 1701 rs = RTW_RATE_SECTION_HT_2S; 1702 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) 1703 rs = RTW_RATE_SECTION_VHT_1S; 1704 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) 1705 rs = RTW_RATE_SECTION_VHT_2S; 1706 else 1707 goto err; 1708 1709 /* only 20M BW with cck and ofdm */ 1710 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM) 1711 bw = RTW_CHANNEL_WIDTH_20; 1712 1713 /* only 20/40M BW with ht */ 1714 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S) 1715 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40); 1716 1717 /* select min power limit among [20M BW ~ current BW] */ 1718 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) { 1719 cur_ch = cch_by_bw[cur_bw]; 1720 1721 ch_idx = rtw_channel_to_idx(band, cur_ch); 1722 if (ch_idx < 0) 1723 goto err; 1724 1725 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ? 1726 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] : 1727 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx]; 1728 1729 power_limit = min_t(s8, cur_lmt, power_limit); 1730 } 1731 1732 return power_limit; 1733 1734 err: 1735 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n", 1736 band, bw, rf_path, rate, channel); 1737 return (s8)rtwdev->chip->max_power_index; 1738 } 1739 1740 void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, 1741 u8 ch, u8 regd, struct rtw_power_params *pwr_param) 1742 { 1743 struct rtw_hal *hal = &rtwdev->hal; 1744 struct rtw_txpwr_idx *pwr_idx; 1745 u8 group, band; 1746 u8 *base = &pwr_param->pwr_base; 1747 s8 *offset = &pwr_param->pwr_offset; 1748 s8 *limit = &pwr_param->pwr_limit; 1749 1750 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; 1751 group = rtw_get_channel_group(ch); 1752 1753 /* base power index for 2.4G/5G */ 1754 if (IS_CH_2G_BAND(ch)) { 1755 band = PHY_BAND_2G; 1756 *base = rtw_phy_get_2g_tx_power_index(rtwdev, 1757 &pwr_idx->pwr_idx_2g, 1758 bw, rate, group); 1759 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate]; 1760 } else { 1761 band = PHY_BAND_5G; 1762 *base = rtw_phy_get_5g_tx_power_index(rtwdev, 1763 &pwr_idx->pwr_idx_5g, 1764 bw, rate, group); 1765 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate]; 1766 } 1767 1768 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path, 1769 rate, ch, regd); 1770 } 1771 1772 u8 1773 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, 1774 enum rtw_bandwidth bandwidth, u8 channel, u8 regd) 1775 { 1776 struct rtw_power_params pwr_param = {0}; 1777 u8 tx_power; 1778 s8 offset; 1779 1780 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth, 1781 channel, regd, &pwr_param); 1782 1783 tx_power = pwr_param.pwr_base; 1784 offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit); 1785 1786 if (rtwdev->chip->en_dis_dpd) 1787 offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate); 1788 1789 tx_power += offset; 1790 1791 if (tx_power > rtwdev->chip->max_power_index) 1792 tx_power = rtwdev->chip->max_power_index; 1793 1794 return tx_power; 1795 } 1796 1797 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, 1798 u8 ch, u8 path, u8 rs) 1799 { 1800 struct rtw_hal *hal = &rtwdev->hal; 1801 u8 regd = rtwdev->regd.txpwr_regd; 1802 u8 *rates; 1803 u8 size; 1804 u8 rate; 1805 u8 pwr_idx; 1806 u8 bw; 1807 int i; 1808 1809 if (rs >= RTW_RATE_SECTION_MAX) 1810 return; 1811 1812 rates = rtw_rate_section[rs]; 1813 size = rtw_rate_size[rs]; 1814 bw = hal->current_band_width; 1815 for (i = 0; i < size; i++) { 1816 rate = rates[i]; 1817 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate, 1818 bw, ch, regd); 1819 hal->tx_pwr_tbl[path][rate] = pwr_idx; 1820 } 1821 } 1822 1823 /* set tx power level by path for each rates, note that the order of the rates 1824 * are *very* important, bacause 8822B/8821C combines every four bytes of tx 1825 * power index into a four-byte power index register, and calls set_tx_agc to 1826 * write these values into hardware 1827 */ 1828 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, 1829 u8 ch, u8 path) 1830 { 1831 struct rtw_hal *hal = &rtwdev->hal; 1832 u8 rs; 1833 1834 /* do not need cck rates if we are not in 2.4G */ 1835 if (hal->current_band_type == RTW_BAND_2G) 1836 rs = RTW_RATE_SECTION_CCK; 1837 else 1838 rs = RTW_RATE_SECTION_OFDM; 1839 1840 for (; rs < RTW_RATE_SECTION_MAX; rs++) 1841 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); 1842 } 1843 1844 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) 1845 { 1846 struct rtw_chip_info *chip = rtwdev->chip; 1847 struct rtw_hal *hal = &rtwdev->hal; 1848 u8 path; 1849 1850 mutex_lock(&hal->tx_power_mutex); 1851 1852 for (path = 0; path < hal->rf_path_num; path++) 1853 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path); 1854 1855 chip->ops->set_tx_power_index(rtwdev); 1856 mutex_unlock(&hal->tx_power_mutex); 1857 } 1858 1859 static void 1860 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, 1861 u8 rs, u8 size, u8 *rates) 1862 { 1863 u8 rate; 1864 u8 base_idx, rate_idx; 1865 s8 base_2g, base_5g; 1866 1867 if (rs >= RTW_RATE_SECTION_VHT_1S) 1868 base_idx = rates[size - 3]; 1869 else 1870 base_idx = rates[size - 1]; 1871 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx]; 1872 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx]; 1873 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g; 1874 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g; 1875 for (rate = 0; rate < size; rate++) { 1876 rate_idx = rates[rate]; 1877 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g; 1878 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g; 1879 } 1880 } 1881 1882 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) 1883 { 1884 u8 path; 1885 1886 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1887 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1888 RTW_RATE_SECTION_CCK, 1889 rtw_cck_size, rtw_cck_rates); 1890 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1891 RTW_RATE_SECTION_OFDM, 1892 rtw_ofdm_size, rtw_ofdm_rates); 1893 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1894 RTW_RATE_SECTION_HT_1S, 1895 rtw_ht_1s_size, rtw_ht_1s_rates); 1896 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1897 RTW_RATE_SECTION_HT_2S, 1898 rtw_ht_2s_size, rtw_ht_2s_rates); 1899 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1900 RTW_RATE_SECTION_VHT_1S, 1901 rtw_vht_1s_size, rtw_vht_1s_rates); 1902 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1903 RTW_RATE_SECTION_VHT_2S, 1904 rtw_vht_2s_size, rtw_vht_2s_rates); 1905 } 1906 } 1907 1908 static void 1909 __rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1910 { 1911 s8 base; 1912 u8 ch; 1913 1914 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) { 1915 base = hal->tx_pwr_by_rate_base_2g[0][rs]; 1916 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base; 1917 } 1918 1919 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) { 1920 base = hal->tx_pwr_by_rate_base_5g[0][rs]; 1921 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base; 1922 } 1923 } 1924 1925 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) 1926 { 1927 u8 regd, bw, rs; 1928 1929 /* default at channel 1 */ 1930 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1; 1931 1932 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1933 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1934 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1935 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs); 1936 } 1937 1938 static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev, 1939 u8 regd, u8 bw, u8 rs) 1940 { 1941 struct rtw_hal *hal = &rtwdev->hal; 1942 s8 max_power_index = (s8)rtwdev->chip->max_power_index; 1943 u8 ch; 1944 1945 /* 2.4G channels */ 1946 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) 1947 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index; 1948 1949 /* 5G channels */ 1950 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) 1951 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index; 1952 } 1953 1954 void rtw_phy_init_tx_power(struct rtw_dev *rtwdev) 1955 { 1956 struct rtw_hal *hal = &rtwdev->hal; 1957 u8 regd, path, rate, rs, bw; 1958 1959 /* init tx power by rate offset */ 1960 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1961 for (rate = 0; rate < DESC_RATE_MAX; rate++) { 1962 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0; 1963 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0; 1964 } 1965 } 1966 1967 /* init tx power limit */ 1968 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1969 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1970 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1971 rtw_phy_init_tx_power_limit(rtwdev, regd, bw, 1972 rs); 1973 } 1974 1975 void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, 1976 struct rtw_swing_table *swing_table) 1977 { 1978 const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; 1979 u8 channel = rtwdev->hal.current_channel; 1980 1981 if (IS_CH_2G_BAND(channel)) { 1982 if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) { 1983 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p; 1984 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n; 1985 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p; 1986 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n; 1987 } else { 1988 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; 1989 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; 1990 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; 1991 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; 1992 } 1993 } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) { 1994 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1]; 1995 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1]; 1996 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1]; 1997 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1]; 1998 } else if (IS_CH_5G_BAND_3(channel)) { 1999 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2]; 2000 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2]; 2001 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2]; 2002 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2]; 2003 } else if (IS_CH_5G_BAND_4(channel)) { 2004 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3]; 2005 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3]; 2006 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3]; 2007 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3]; 2008 } else { 2009 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; 2010 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; 2011 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; 2012 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; 2013 } 2014 } 2015 2016 void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path) 2017 { 2018 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2019 2020 ewma_thermal_add(&dm_info->avg_thermal[path], thermal); 2021 dm_info->thermal_avg[path] = 2022 ewma_thermal_read(&dm_info->avg_thermal[path]); 2023 } 2024 2025 bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal, 2026 u8 path) 2027 { 2028 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2029 u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]); 2030 2031 if (avg == thermal) 2032 return false; 2033 2034 return true; 2035 } 2036 2037 u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path) 2038 { 2039 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2040 u8 therm_avg, therm_efuse, therm_delta; 2041 2042 therm_avg = dm_info->thermal_avg[path]; 2043 therm_efuse = rtwdev->efuse.thermal_meter[path]; 2044 therm_delta = abs(therm_avg - therm_efuse); 2045 2046 return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1); 2047 } 2048 2049 s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, 2050 struct rtw_swing_table *swing_table, 2051 u8 tbl_path, u8 therm_path, u8 delta) 2052 { 2053 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2054 const u8 *delta_swing_table_idx_pos; 2055 const u8 *delta_swing_table_idx_neg; 2056 2057 if (delta >= RTW_PWR_TRK_TBL_SZ) { 2058 rtw_warn(rtwdev, "power track table overflow\n"); 2059 return 0; 2060 } 2061 2062 if (!swing_table) { 2063 rtw_warn(rtwdev, "swing table not configured\n"); 2064 return 0; 2065 } 2066 2067 delta_swing_table_idx_pos = swing_table->p[tbl_path]; 2068 delta_swing_table_idx_neg = swing_table->n[tbl_path]; 2069 2070 if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) { 2071 rtw_warn(rtwdev, "invalid swing table index\n"); 2072 return 0; 2073 } 2074 2075 if (dm_info->thermal_avg[therm_path] > 2076 rtwdev->efuse.thermal_meter[therm_path]) 2077 return delta_swing_table_idx_pos[delta]; 2078 else 2079 return -delta_swing_table_idx_neg[delta]; 2080 } 2081 2082 bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev) 2083 { 2084 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2085 u8 delta_iqk; 2086 2087 delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k); 2088 if (delta_iqk >= rtwdev->chip->iqk_threshold) { 2089 dm_info->thermal_meter_k = dm_info->thermal_avg[0]; 2090 return true; 2091 } 2092 return false; 2093 } 2094