1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/bcd.h> 6 7 #include "main.h" 8 #include "reg.h" 9 #include "fw.h" 10 #include "phy.h" 11 #include "debug.h" 12 13 struct phy_cfg_pair { 14 u32 addr; 15 u32 data; 16 }; 17 18 union phy_table_tile { 19 struct rtw_phy_cond cond; 20 struct phy_cfg_pair cfg; 21 }; 22 23 static const u32 db_invert_table[12][8] = { 24 {10, 13, 16, 20, 25 25, 32, 40, 50}, 26 {64, 80, 101, 128, 27 160, 201, 256, 318}, 28 {401, 505, 635, 800, 29 1007, 1268, 1596, 2010}, 30 {316, 398, 501, 631, 31 794, 1000, 1259, 1585}, 32 {1995, 2512, 3162, 3981, 33 5012, 6310, 7943, 10000}, 34 {12589, 15849, 19953, 25119, 35 31623, 39811, 50119, 63098}, 36 {79433, 100000, 125893, 158489, 37 199526, 251189, 316228, 398107}, 38 {501187, 630957, 794328, 1000000, 39 1258925, 1584893, 1995262, 2511886}, 40 {3162278, 3981072, 5011872, 6309573, 41 7943282, 1000000, 12589254, 15848932}, 42 {19952623, 25118864, 31622777, 39810717, 43 50118723, 63095734, 79432823, 100000000}, 44 {125892541, 158489319, 199526232, 251188643, 45 316227766, 398107171, 501187234, 630957345}, 46 {794328235, 1000000000, 1258925412, 1584893192, 47 1995262315, 2511886432U, 3162277660U, 3981071706U} 48 }; 49 50 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; 51 u8 rtw_ofdm_rates[] = { 52 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, 53 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, 54 DESC_RATE48M, DESC_RATE54M 55 }; 56 u8 rtw_ht_1s_rates[] = { 57 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, 58 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, 59 DESC_RATEMCS6, DESC_RATEMCS7 60 }; 61 u8 rtw_ht_2s_rates[] = { 62 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, 63 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, 64 DESC_RATEMCS14, DESC_RATEMCS15 65 }; 66 u8 rtw_vht_1s_rates[] = { 67 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, 68 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, 69 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, 70 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, 71 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 72 }; 73 u8 rtw_vht_2s_rates[] = { 74 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, 75 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, 76 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, 77 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, 78 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 79 }; 80 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { 81 rtw_cck_rates, rtw_ofdm_rates, 82 rtw_ht_1s_rates, rtw_ht_2s_rates, 83 rtw_vht_1s_rates, rtw_vht_2s_rates 84 }; 85 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { 86 ARRAY_SIZE(rtw_cck_rates), 87 ARRAY_SIZE(rtw_ofdm_rates), 88 ARRAY_SIZE(rtw_ht_1s_rates), 89 ARRAY_SIZE(rtw_ht_2s_rates), 90 ARRAY_SIZE(rtw_vht_1s_rates), 91 ARRAY_SIZE(rtw_vht_2s_rates) 92 }; 93 static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); 94 static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); 95 static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); 96 static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); 97 static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); 98 static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); 99 100 enum rtw_phy_band_type { 101 PHY_BAND_2G = 0, 102 PHY_BAND_5G = 1, 103 }; 104 105 static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) 106 { 107 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 108 u8 i, j; 109 110 for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) { 111 for (j = 0; j < RTW_RF_PATH_MAX; j++) 112 dm_info->cck_pd_lv[i][j] = CCK_PD_LV0; 113 } 114 115 dm_info->cck_fa_avg = CCK_FA_AVG_RESET; 116 } 117 118 void rtw_phy_init(struct rtw_dev *rtwdev) 119 { 120 struct rtw_chip_info *chip = rtwdev->chip; 121 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 122 u32 addr, mask; 123 124 dm_info->fa_history[3] = 0; 125 dm_info->fa_history[2] = 0; 126 dm_info->fa_history[1] = 0; 127 dm_info->fa_history[0] = 0; 128 dm_info->igi_bitmap = 0; 129 dm_info->igi_history[3] = 0; 130 dm_info->igi_history[2] = 0; 131 dm_info->igi_history[1] = 0; 132 133 addr = chip->dig[0].addr; 134 mask = chip->dig[0].mask; 135 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); 136 rtw_phy_cck_pd_init(rtwdev); 137 } 138 139 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) 140 { 141 struct rtw_chip_info *chip = rtwdev->chip; 142 struct rtw_hal *hal = &rtwdev->hal; 143 u32 addr, mask; 144 u8 path; 145 146 for (path = 0; path < hal->rf_path_num; path++) { 147 addr = chip->dig[path].addr; 148 mask = chip->dig[path].mask; 149 rtw_write32_mask(rtwdev, addr, mask, igi); 150 } 151 } 152 153 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) 154 { 155 struct rtw_chip_info *chip = rtwdev->chip; 156 157 chip->ops->false_alarm_statistics(rtwdev); 158 } 159 160 #define RA_FLOOR_TABLE_SIZE 7 161 #define RA_FLOOR_UP_GAP 3 162 163 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) 164 { 165 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; 166 u8 new_level = 0; 167 int i; 168 169 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) 170 if (i >= old_level) 171 table[i] += RA_FLOOR_UP_GAP; 172 173 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 174 if (rssi < table[i]) { 175 new_level = i; 176 break; 177 } 178 } 179 180 return new_level; 181 } 182 183 struct rtw_phy_stat_iter_data { 184 struct rtw_dev *rtwdev; 185 u8 min_rssi; 186 }; 187 188 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) 189 { 190 struct rtw_phy_stat_iter_data *iter_data = data; 191 struct rtw_dev *rtwdev = iter_data->rtwdev; 192 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 193 u8 rssi; 194 195 rssi = ewma_rssi_read(&si->avg_rssi); 196 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); 197 198 rtw_fw_send_rssi_info(rtwdev, si); 199 200 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); 201 } 202 203 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) 204 { 205 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 206 struct rtw_phy_stat_iter_data data = {}; 207 208 data.rtwdev = rtwdev; 209 data.min_rssi = U8_MAX; 210 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); 211 212 dm_info->pre_min_rssi = dm_info->min_rssi; 213 dm_info->min_rssi = data.min_rssi; 214 } 215 216 static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev) 217 { 218 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 219 220 dm_info->last_pkt_count = dm_info->cur_pkt_count; 221 memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count)); 222 } 223 224 static void rtw_phy_statistics(struct rtw_dev *rtwdev) 225 { 226 rtw_phy_stat_rssi(rtwdev); 227 rtw_phy_stat_false_alarm(rtwdev); 228 rtw_phy_stat_rate_cnt(rtwdev); 229 } 230 231 #define DIG_PERF_FA_TH_LOW 250 232 #define DIG_PERF_FA_TH_HIGH 500 233 #define DIG_PERF_FA_TH_EXTRA_HIGH 750 234 #define DIG_PERF_MAX 0x5a 235 #define DIG_PERF_MID 0x40 236 #define DIG_CVRG_FA_TH_LOW 2000 237 #define DIG_CVRG_FA_TH_HIGH 4000 238 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 239 #define DIG_CVRG_MAX 0x2a 240 #define DIG_CVRG_MID 0x26 241 #define DIG_CVRG_MIN 0x1c 242 #define DIG_RSSI_GAIN_OFFSET 15 243 244 static bool 245 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) 246 { 247 u16 fa_lo = DIG_PERF_FA_TH_LOW; 248 u16 fa_hi = DIG_PERF_FA_TH_HIGH; 249 u16 *fa_history; 250 u8 *igi_history; 251 u8 damping_rssi; 252 u8 min_rssi; 253 u8 diff; 254 u8 igi_bitmap; 255 bool damping = false; 256 257 min_rssi = dm_info->min_rssi; 258 if (dm_info->damping) { 259 damping_rssi = dm_info->damping_rssi; 260 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : 261 damping_rssi - min_rssi; 262 if (diff > 3 || dm_info->damping_cnt++ > 20) { 263 dm_info->damping = false; 264 return false; 265 } 266 267 return true; 268 } 269 270 igi_history = dm_info->igi_history; 271 fa_history = dm_info->fa_history; 272 igi_bitmap = dm_info->igi_bitmap & 0xf; 273 switch (igi_bitmap) { 274 case 5: 275 /* down -> up -> down -> up */ 276 if (igi_history[0] > igi_history[1] && 277 igi_history[2] > igi_history[3] && 278 igi_history[0] - igi_history[1] >= 2 && 279 igi_history[2] - igi_history[3] >= 2 && 280 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 281 fa_history[2] > fa_hi && fa_history[3] < fa_lo) 282 damping = true; 283 break; 284 case 9: 285 /* up -> down -> down -> up */ 286 if (igi_history[0] > igi_history[1] && 287 igi_history[3] > igi_history[2] && 288 igi_history[0] - igi_history[1] >= 4 && 289 igi_history[3] - igi_history[2] >= 2 && 290 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 291 fa_history[2] < fa_lo && fa_history[3] > fa_hi) 292 damping = true; 293 break; 294 default: 295 return false; 296 } 297 298 if (damping) { 299 dm_info->damping = true; 300 dm_info->damping_cnt = 0; 301 dm_info->damping_rssi = min_rssi; 302 } 303 304 return damping; 305 } 306 307 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, 308 u8 *upper, u8 *lower, bool linked) 309 { 310 u8 dig_max, dig_min, dig_mid; 311 u8 min_rssi; 312 313 if (linked) { 314 dig_max = DIG_PERF_MAX; 315 dig_mid = DIG_PERF_MID; 316 /* 22B=0x1c, 22C=0x20 */ 317 dig_min = 0x1c; 318 min_rssi = max_t(u8, dm_info->min_rssi, dig_min); 319 } else { 320 dig_max = DIG_CVRG_MAX; 321 dig_mid = DIG_CVRG_MID; 322 dig_min = DIG_CVRG_MIN; 323 min_rssi = dig_min; 324 } 325 326 /* DIG MAX should be bounded by minimum RSSI with offset +15 */ 327 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); 328 329 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); 330 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); 331 } 332 333 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, 334 u16 *fa_th, u8 *step, bool linked) 335 { 336 u8 min_rssi, pre_min_rssi; 337 338 min_rssi = dm_info->min_rssi; 339 pre_min_rssi = dm_info->pre_min_rssi; 340 step[0] = 4; 341 step[1] = 3; 342 step[2] = 2; 343 344 if (linked) { 345 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; 346 fa_th[1] = DIG_PERF_FA_TH_HIGH; 347 fa_th[2] = DIG_PERF_FA_TH_LOW; 348 if (pre_min_rssi > min_rssi) { 349 step[0] = 6; 350 step[1] = 4; 351 step[2] = 2; 352 } 353 } else { 354 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; 355 fa_th[1] = DIG_CVRG_FA_TH_HIGH; 356 fa_th[2] = DIG_CVRG_FA_TH_LOW; 357 } 358 } 359 360 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) 361 { 362 u8 *igi_history; 363 u16 *fa_history; 364 u8 igi_bitmap; 365 bool up; 366 367 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; 368 igi_history = dm_info->igi_history; 369 fa_history = dm_info->fa_history; 370 371 up = igi > igi_history[0]; 372 igi_bitmap |= up; 373 374 igi_history[3] = igi_history[2]; 375 igi_history[2] = igi_history[1]; 376 igi_history[1] = igi_history[0]; 377 igi_history[0] = igi; 378 379 fa_history[3] = fa_history[2]; 380 fa_history[2] = fa_history[1]; 381 fa_history[1] = fa_history[0]; 382 fa_history[0] = fa; 383 384 dm_info->igi_bitmap = igi_bitmap; 385 } 386 387 static void rtw_phy_dig(struct rtw_dev *rtwdev) 388 { 389 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 390 u8 upper_bound, lower_bound; 391 u8 pre_igi, cur_igi; 392 u16 fa_th[3], fa_cnt; 393 u8 level; 394 u8 step[3]; 395 bool linked; 396 397 if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags)) 398 return; 399 400 if (rtw_phy_dig_check_damping(dm_info)) 401 return; 402 403 linked = !!rtwdev->sta_cnt; 404 405 fa_cnt = dm_info->total_fa_cnt; 406 pre_igi = dm_info->igi_history[0]; 407 408 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); 409 410 /* test the false alarm count from the highest threshold level first, 411 * and increase it by corresponding step size 412 * 413 * note that the step size is offset by -2, compensate it afterall 414 */ 415 cur_igi = pre_igi; 416 for (level = 0; level < 3; level++) { 417 if (fa_cnt > fa_th[level]) { 418 cur_igi += step[level]; 419 break; 420 } 421 } 422 cur_igi -= 2; 423 424 /* calculate the upper/lower bound by the minimum rssi we have among 425 * the peers connected with us, meanwhile make sure the igi value does 426 * not beyond the hardware limitation 427 */ 428 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); 429 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); 430 431 /* record current igi value and false alarm statistics for further 432 * damping checks, and record the trend of igi values 433 */ 434 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); 435 436 if (cur_igi != pre_igi) 437 rtw_phy_dig_write(rtwdev, cur_igi); 438 } 439 440 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) 441 { 442 struct rtw_dev *rtwdev = data; 443 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 444 445 rtw_update_sta_info(rtwdev, si); 446 } 447 448 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) 449 { 450 if (rtwdev->watch_dog_cnt & 0x3) 451 return; 452 453 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); 454 } 455 456 static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) 457 { 458 struct rtw_chip_info *chip = rtwdev->chip; 459 460 if (chip->ops->dpk_track) 461 chip->ops->dpk_track(rtwdev); 462 } 463 464 #define CCK_PD_FA_LV1_MIN 1000 465 #define CCK_PD_FA_LV0_MAX 500 466 467 static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev) 468 { 469 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 470 u32 cck_fa_avg = dm_info->cck_fa_avg; 471 472 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 473 return CCK_PD_LV1; 474 475 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 476 return CCK_PD_LV0; 477 478 return CCK_PD_LV_MAX; 479 } 480 481 #define CCK_PD_IGI_LV4_VAL 0x38 482 #define CCK_PD_IGI_LV3_VAL 0x2a 483 #define CCK_PD_IGI_LV2_VAL 0x24 484 #define CCK_PD_RSSI_LV4_VAL 32 485 #define CCK_PD_RSSI_LV3_VAL 32 486 #define CCK_PD_RSSI_LV2_VAL 24 487 488 static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev) 489 { 490 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 491 u8 igi = dm_info->igi_history[0]; 492 u8 rssi = dm_info->min_rssi; 493 u32 cck_fa_avg = dm_info->cck_fa_avg; 494 495 if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL) 496 return CCK_PD_LV4; 497 if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL) 498 return CCK_PD_LV3; 499 if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL) 500 return CCK_PD_LV2; 501 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 502 return CCK_PD_LV1; 503 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 504 return CCK_PD_LV0; 505 506 return CCK_PD_LV_MAX; 507 } 508 509 static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev) 510 { 511 if (!rtw_is_assoc(rtwdev)) 512 return rtw_phy_cck_pd_lv_unlink(rtwdev); 513 else 514 return rtw_phy_cck_pd_lv_link(rtwdev); 515 } 516 517 static void rtw_phy_cck_pd(struct rtw_dev *rtwdev) 518 { 519 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 520 struct rtw_chip_info *chip = rtwdev->chip; 521 u32 cck_fa = dm_info->cck_fa_cnt; 522 u8 level; 523 524 if (rtwdev->hal.current_band_type != RTW_BAND_2G) 525 return; 526 527 if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET) 528 dm_info->cck_fa_avg = cck_fa; 529 else 530 dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2; 531 532 level = rtw_phy_cck_pd_lv(rtwdev); 533 534 if (level >= CCK_PD_LV_MAX) 535 return; 536 537 if (chip->ops->cck_pd_set) 538 chip->ops->cck_pd_set(rtwdev, level); 539 } 540 541 static void rtw_phy_pwr_track(struct rtw_dev *rtwdev) 542 { 543 rtwdev->chip->ops->pwr_track(rtwdev); 544 } 545 546 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) 547 { 548 /* for further calculation */ 549 rtw_phy_statistics(rtwdev); 550 rtw_phy_dig(rtwdev); 551 rtw_phy_cck_pd(rtwdev); 552 rtw_phy_ra_info_update(rtwdev); 553 rtw_phy_dpk_track(rtwdev); 554 rtw_phy_pwr_track(rtwdev); 555 } 556 557 #define FRAC_BITS 3 558 559 static u8 rtw_phy_power_2_db(s8 power) 560 { 561 if (power <= -100 || power >= 20) 562 return 0; 563 else if (power >= 0) 564 return 100; 565 else 566 return 100 + power; 567 } 568 569 static u64 rtw_phy_db_2_linear(u8 power_db) 570 { 571 u8 i, j; 572 u64 linear; 573 574 if (power_db > 96) 575 power_db = 96; 576 else if (power_db < 1) 577 return 1; 578 579 /* 1dB ~ 96dB */ 580 i = (power_db - 1) >> 3; 581 j = (power_db - 1) - (i << 3); 582 583 linear = db_invert_table[i][j]; 584 linear = i > 2 ? linear << FRAC_BITS : linear; 585 586 return linear; 587 } 588 589 static u8 rtw_phy_linear_2_db(u64 linear) 590 { 591 u8 i; 592 u8 j; 593 u32 dB; 594 595 if (linear >= db_invert_table[11][7]) 596 return 96; /* maximum 96 dB */ 597 598 for (i = 0; i < 12; i++) { 599 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) 600 break; 601 else if (i > 2 && linear <= db_invert_table[i][7]) 602 break; 603 } 604 605 for (j = 0; j < 8; j++) { 606 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) 607 break; 608 else if (i > 2 && linear <= db_invert_table[i][j]) 609 break; 610 } 611 612 if (j == 0 && i == 0) 613 goto end; 614 615 if (j == 0) { 616 if (i != 3) { 617 if (db_invert_table[i][0] - linear > 618 linear - db_invert_table[i - 1][7]) { 619 i = i - 1; 620 j = 7; 621 } 622 } else { 623 if (db_invert_table[3][0] - linear > 624 linear - db_invert_table[2][7]) { 625 i = 2; 626 j = 7; 627 } 628 } 629 } else { 630 if (db_invert_table[i][j] - linear > 631 linear - db_invert_table[i][j - 1]) { 632 j = j - 1; 633 } 634 } 635 end: 636 dB = (i << 3) + j + 1; 637 638 return dB; 639 } 640 641 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) 642 { 643 s8 power; 644 u8 power_db; 645 u64 linear; 646 u64 sum = 0; 647 u8 path; 648 649 for (path = 0; path < path_num; path++) { 650 power = rf_power[path]; 651 power_db = rtw_phy_power_2_db(power); 652 linear = rtw_phy_db_2_linear(power_db); 653 sum += linear; 654 } 655 656 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; 657 switch (path_num) { 658 case 2: 659 sum >>= 1; 660 break; 661 case 3: 662 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; 663 break; 664 case 4: 665 sum >>= 2; 666 break; 667 default: 668 break; 669 } 670 671 return rtw_phy_linear_2_db(sum); 672 } 673 674 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 675 u32 addr, u32 mask) 676 { 677 struct rtw_hal *hal = &rtwdev->hal; 678 struct rtw_chip_info *chip = rtwdev->chip; 679 const u32 *base_addr = chip->rf_base_addr; 680 u32 val, direct_addr; 681 682 if (rf_path >= hal->rf_path_num) { 683 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 684 return INV_RF_DATA; 685 } 686 687 addr &= 0xff; 688 direct_addr = base_addr[rf_path] + (addr << 2); 689 mask &= RFREG_MASK; 690 691 val = rtw_read32_mask(rtwdev, direct_addr, mask); 692 693 return val; 694 } 695 696 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 697 u32 addr, u32 mask, u32 data) 698 { 699 struct rtw_hal *hal = &rtwdev->hal; 700 struct rtw_chip_info *chip = rtwdev->chip; 701 u32 *sipi_addr = chip->rf_sipi_addr; 702 u32 data_and_addr; 703 u32 old_data = 0; 704 u32 shift; 705 706 if (rf_path >= hal->rf_path_num) { 707 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 708 return false; 709 } 710 711 addr &= 0xff; 712 mask &= RFREG_MASK; 713 714 if (mask != RFREG_MASK) { 715 old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK); 716 717 if (old_data == INV_RF_DATA) { 718 rtw_err(rtwdev, "Write fail, rf is disabled\n"); 719 return false; 720 } 721 722 shift = __ffs(mask); 723 data = ((old_data) & (~mask)) | (data << shift); 724 } 725 726 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; 727 728 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); 729 730 udelay(13); 731 732 return true; 733 } 734 735 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 736 u32 addr, u32 mask, u32 data) 737 { 738 struct rtw_hal *hal = &rtwdev->hal; 739 struct rtw_chip_info *chip = rtwdev->chip; 740 const u32 *base_addr = chip->rf_base_addr; 741 u32 direct_addr; 742 743 if (rf_path >= hal->rf_path_num) { 744 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 745 return false; 746 } 747 748 addr &= 0xff; 749 direct_addr = base_addr[rf_path] + (addr << 2); 750 mask &= RFREG_MASK; 751 752 rtw_write32_mask(rtwdev, direct_addr, mask, data); 753 754 udelay(1); 755 756 return true; 757 } 758 759 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 760 u32 addr, u32 mask, u32 data) 761 { 762 if (addr != 0x00) 763 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); 764 765 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); 766 } 767 768 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) 769 { 770 struct rtw_hal *hal = &rtwdev->hal; 771 struct rtw_efuse *efuse = &rtwdev->efuse; 772 struct rtw_phy_cond cond = {0}; 773 774 cond.cut = hal->cut_version ? hal->cut_version : 15; 775 cond.pkg = pkg ? pkg : 15; 776 cond.plat = 0x04; 777 cond.rfe = efuse->rfe_option; 778 779 switch (rtw_hci_type(rtwdev)) { 780 case RTW_HCI_TYPE_USB: 781 cond.intf = INTF_USB; 782 break; 783 case RTW_HCI_TYPE_SDIO: 784 cond.intf = INTF_SDIO; 785 break; 786 case RTW_HCI_TYPE_PCIE: 787 default: 788 cond.intf = INTF_PCIE; 789 break; 790 } 791 792 hal->phy_cond = cond; 793 794 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); 795 } 796 797 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) 798 { 799 struct rtw_hal *hal = &rtwdev->hal; 800 struct rtw_phy_cond drv_cond = hal->phy_cond; 801 802 if (cond.cut && cond.cut != drv_cond.cut) 803 return false; 804 805 if (cond.pkg && cond.pkg != drv_cond.pkg) 806 return false; 807 808 if (cond.intf && cond.intf != drv_cond.intf) 809 return false; 810 811 if (cond.rfe != drv_cond.rfe) 812 return false; 813 814 return true; 815 } 816 817 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 818 { 819 const union phy_table_tile *p = tbl->data; 820 const union phy_table_tile *end = p + tbl->size / 2; 821 struct rtw_phy_cond pos_cond = {0}; 822 bool is_matched = true, is_skipped = false; 823 824 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); 825 826 for (; p < end; p++) { 827 if (p->cond.pos) { 828 switch (p->cond.branch) { 829 case BRANCH_ENDIF: 830 is_matched = true; 831 is_skipped = false; 832 break; 833 case BRANCH_ELSE: 834 is_matched = is_skipped ? false : true; 835 break; 836 case BRANCH_IF: 837 case BRANCH_ELIF: 838 default: 839 pos_cond = p->cond; 840 break; 841 } 842 } else if (p->cond.neg) { 843 if (!is_skipped) { 844 if (check_positive(rtwdev, pos_cond)) { 845 is_matched = true; 846 is_skipped = true; 847 } else { 848 is_matched = false; 849 is_skipped = false; 850 } 851 } else { 852 is_matched = false; 853 } 854 } else if (is_matched) { 855 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); 856 } 857 } 858 } 859 860 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) 861 862 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) 863 { 864 if (rtwdev->chip->is_pwr_by_rate_dec) 865 return bcd_to_dec_pwr_by_rate(hex, i); 866 867 return (hex >> (i * 8)) & 0xFF; 868 } 869 870 static void 871 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, 872 u32 addr, u32 mask, u32 val, u8 *rate, 873 u8 *pwr_by_rate, u8 *rate_num) 874 { 875 int i; 876 877 switch (addr) { 878 case 0xE00: 879 case 0x830: 880 rate[0] = DESC_RATE6M; 881 rate[1] = DESC_RATE9M; 882 rate[2] = DESC_RATE12M; 883 rate[3] = DESC_RATE18M; 884 for (i = 0; i < 4; ++i) 885 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 886 *rate_num = 4; 887 break; 888 case 0xE04: 889 case 0x834: 890 rate[0] = DESC_RATE24M; 891 rate[1] = DESC_RATE36M; 892 rate[2] = DESC_RATE48M; 893 rate[3] = DESC_RATE54M; 894 for (i = 0; i < 4; ++i) 895 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 896 *rate_num = 4; 897 break; 898 case 0xE08: 899 rate[0] = DESC_RATE1M; 900 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); 901 *rate_num = 1; 902 break; 903 case 0x86C: 904 if (mask == 0xffffff00) { 905 rate[0] = DESC_RATE2M; 906 rate[1] = DESC_RATE5_5M; 907 rate[2] = DESC_RATE11M; 908 for (i = 1; i < 4; ++i) 909 pwr_by_rate[i - 1] = 910 tbl_to_dec_pwr_by_rate(rtwdev, val, i); 911 *rate_num = 3; 912 } else if (mask == 0x000000ff) { 913 rate[0] = DESC_RATE11M; 914 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); 915 *rate_num = 1; 916 } 917 break; 918 case 0xE10: 919 case 0x83C: 920 rate[0] = DESC_RATEMCS0; 921 rate[1] = DESC_RATEMCS1; 922 rate[2] = DESC_RATEMCS2; 923 rate[3] = DESC_RATEMCS3; 924 for (i = 0; i < 4; ++i) 925 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 926 *rate_num = 4; 927 break; 928 case 0xE14: 929 case 0x848: 930 rate[0] = DESC_RATEMCS4; 931 rate[1] = DESC_RATEMCS5; 932 rate[2] = DESC_RATEMCS6; 933 rate[3] = DESC_RATEMCS7; 934 for (i = 0; i < 4; ++i) 935 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 936 *rate_num = 4; 937 break; 938 case 0xE18: 939 case 0x84C: 940 rate[0] = DESC_RATEMCS8; 941 rate[1] = DESC_RATEMCS9; 942 rate[2] = DESC_RATEMCS10; 943 rate[3] = DESC_RATEMCS11; 944 for (i = 0; i < 4; ++i) 945 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 946 *rate_num = 4; 947 break; 948 case 0xE1C: 949 case 0x868: 950 rate[0] = DESC_RATEMCS12; 951 rate[1] = DESC_RATEMCS13; 952 rate[2] = DESC_RATEMCS14; 953 rate[3] = DESC_RATEMCS15; 954 for (i = 0; i < 4; ++i) 955 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 956 *rate_num = 4; 957 break; 958 case 0x838: 959 rate[0] = DESC_RATE1M; 960 rate[1] = DESC_RATE2M; 961 rate[2] = DESC_RATE5_5M; 962 for (i = 1; i < 4; ++i) 963 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev, 964 val, i); 965 *rate_num = 3; 966 break; 967 case 0xC20: 968 case 0xE20: 969 case 0x1820: 970 case 0x1A20: 971 rate[0] = DESC_RATE1M; 972 rate[1] = DESC_RATE2M; 973 rate[2] = DESC_RATE5_5M; 974 rate[3] = DESC_RATE11M; 975 for (i = 0; i < 4; ++i) 976 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 977 *rate_num = 4; 978 break; 979 case 0xC24: 980 case 0xE24: 981 case 0x1824: 982 case 0x1A24: 983 rate[0] = DESC_RATE6M; 984 rate[1] = DESC_RATE9M; 985 rate[2] = DESC_RATE12M; 986 rate[3] = DESC_RATE18M; 987 for (i = 0; i < 4; ++i) 988 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 989 *rate_num = 4; 990 break; 991 case 0xC28: 992 case 0xE28: 993 case 0x1828: 994 case 0x1A28: 995 rate[0] = DESC_RATE24M; 996 rate[1] = DESC_RATE36M; 997 rate[2] = DESC_RATE48M; 998 rate[3] = DESC_RATE54M; 999 for (i = 0; i < 4; ++i) 1000 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1001 *rate_num = 4; 1002 break; 1003 case 0xC2C: 1004 case 0xE2C: 1005 case 0x182C: 1006 case 0x1A2C: 1007 rate[0] = DESC_RATEMCS0; 1008 rate[1] = DESC_RATEMCS1; 1009 rate[2] = DESC_RATEMCS2; 1010 rate[3] = DESC_RATEMCS3; 1011 for (i = 0; i < 4; ++i) 1012 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1013 *rate_num = 4; 1014 break; 1015 case 0xC30: 1016 case 0xE30: 1017 case 0x1830: 1018 case 0x1A30: 1019 rate[0] = DESC_RATEMCS4; 1020 rate[1] = DESC_RATEMCS5; 1021 rate[2] = DESC_RATEMCS6; 1022 rate[3] = DESC_RATEMCS7; 1023 for (i = 0; i < 4; ++i) 1024 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1025 *rate_num = 4; 1026 break; 1027 case 0xC34: 1028 case 0xE34: 1029 case 0x1834: 1030 case 0x1A34: 1031 rate[0] = DESC_RATEMCS8; 1032 rate[1] = DESC_RATEMCS9; 1033 rate[2] = DESC_RATEMCS10; 1034 rate[3] = DESC_RATEMCS11; 1035 for (i = 0; i < 4; ++i) 1036 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1037 *rate_num = 4; 1038 break; 1039 case 0xC38: 1040 case 0xE38: 1041 case 0x1838: 1042 case 0x1A38: 1043 rate[0] = DESC_RATEMCS12; 1044 rate[1] = DESC_RATEMCS13; 1045 rate[2] = DESC_RATEMCS14; 1046 rate[3] = DESC_RATEMCS15; 1047 for (i = 0; i < 4; ++i) 1048 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1049 *rate_num = 4; 1050 break; 1051 case 0xC3C: 1052 case 0xE3C: 1053 case 0x183C: 1054 case 0x1A3C: 1055 rate[0] = DESC_RATEVHT1SS_MCS0; 1056 rate[1] = DESC_RATEVHT1SS_MCS1; 1057 rate[2] = DESC_RATEVHT1SS_MCS2; 1058 rate[3] = DESC_RATEVHT1SS_MCS3; 1059 for (i = 0; i < 4; ++i) 1060 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1061 *rate_num = 4; 1062 break; 1063 case 0xC40: 1064 case 0xE40: 1065 case 0x1840: 1066 case 0x1A40: 1067 rate[0] = DESC_RATEVHT1SS_MCS4; 1068 rate[1] = DESC_RATEVHT1SS_MCS5; 1069 rate[2] = DESC_RATEVHT1SS_MCS6; 1070 rate[3] = DESC_RATEVHT1SS_MCS7; 1071 for (i = 0; i < 4; ++i) 1072 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1073 *rate_num = 4; 1074 break; 1075 case 0xC44: 1076 case 0xE44: 1077 case 0x1844: 1078 case 0x1A44: 1079 rate[0] = DESC_RATEVHT1SS_MCS8; 1080 rate[1] = DESC_RATEVHT1SS_MCS9; 1081 rate[2] = DESC_RATEVHT2SS_MCS0; 1082 rate[3] = DESC_RATEVHT2SS_MCS1; 1083 for (i = 0; i < 4; ++i) 1084 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1085 *rate_num = 4; 1086 break; 1087 case 0xC48: 1088 case 0xE48: 1089 case 0x1848: 1090 case 0x1A48: 1091 rate[0] = DESC_RATEVHT2SS_MCS2; 1092 rate[1] = DESC_RATEVHT2SS_MCS3; 1093 rate[2] = DESC_RATEVHT2SS_MCS4; 1094 rate[3] = DESC_RATEVHT2SS_MCS5; 1095 for (i = 0; i < 4; ++i) 1096 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1097 *rate_num = 4; 1098 break; 1099 case 0xC4C: 1100 case 0xE4C: 1101 case 0x184C: 1102 case 0x1A4C: 1103 rate[0] = DESC_RATEVHT2SS_MCS6; 1104 rate[1] = DESC_RATEVHT2SS_MCS7; 1105 rate[2] = DESC_RATEVHT2SS_MCS8; 1106 rate[3] = DESC_RATEVHT2SS_MCS9; 1107 for (i = 0; i < 4; ++i) 1108 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1109 *rate_num = 4; 1110 break; 1111 case 0xCD8: 1112 case 0xED8: 1113 case 0x18D8: 1114 case 0x1AD8: 1115 rate[0] = DESC_RATEMCS16; 1116 rate[1] = DESC_RATEMCS17; 1117 rate[2] = DESC_RATEMCS18; 1118 rate[3] = DESC_RATEMCS19; 1119 for (i = 0; i < 4; ++i) 1120 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1121 *rate_num = 4; 1122 break; 1123 case 0xCDC: 1124 case 0xEDC: 1125 case 0x18DC: 1126 case 0x1ADC: 1127 rate[0] = DESC_RATEMCS20; 1128 rate[1] = DESC_RATEMCS21; 1129 rate[2] = DESC_RATEMCS22; 1130 rate[3] = DESC_RATEMCS23; 1131 for (i = 0; i < 4; ++i) 1132 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1133 *rate_num = 4; 1134 break; 1135 case 0xCE0: 1136 case 0xEE0: 1137 case 0x18E0: 1138 case 0x1AE0: 1139 rate[0] = DESC_RATEVHT3SS_MCS0; 1140 rate[1] = DESC_RATEVHT3SS_MCS1; 1141 rate[2] = DESC_RATEVHT3SS_MCS2; 1142 rate[3] = DESC_RATEVHT3SS_MCS3; 1143 for (i = 0; i < 4; ++i) 1144 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1145 *rate_num = 4; 1146 break; 1147 case 0xCE4: 1148 case 0xEE4: 1149 case 0x18E4: 1150 case 0x1AE4: 1151 rate[0] = DESC_RATEVHT3SS_MCS4; 1152 rate[1] = DESC_RATEVHT3SS_MCS5; 1153 rate[2] = DESC_RATEVHT3SS_MCS6; 1154 rate[3] = DESC_RATEVHT3SS_MCS7; 1155 for (i = 0; i < 4; ++i) 1156 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1157 *rate_num = 4; 1158 break; 1159 case 0xCE8: 1160 case 0xEE8: 1161 case 0x18E8: 1162 case 0x1AE8: 1163 rate[0] = DESC_RATEVHT3SS_MCS8; 1164 rate[1] = DESC_RATEVHT3SS_MCS9; 1165 for (i = 0; i < 2; ++i) 1166 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1167 *rate_num = 2; 1168 break; 1169 default: 1170 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr); 1171 break; 1172 } 1173 } 1174 1175 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev, 1176 u32 band, u32 rfpath, u32 txnum, 1177 u32 regaddr, u32 bitmask, u32 data) 1178 { 1179 struct rtw_hal *hal = &rtwdev->hal; 1180 u8 rate_num = 0; 1181 u8 rate; 1182 u8 rates[RTW_RF_PATH_MAX] = {0}; 1183 s8 offset; 1184 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0}; 1185 int i; 1186 1187 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data, 1188 rates, pwr_by_rate, &rate_num); 1189 1190 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX || 1191 (band != PHY_BAND_2G && band != PHY_BAND_5G) || 1192 rate_num > RTW_RF_PATH_MAX)) 1193 return; 1194 1195 for (i = 0; i < rate_num; i++) { 1196 offset = pwr_by_rate[i]; 1197 rate = rates[i]; 1198 if (band == PHY_BAND_2G) 1199 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; 1200 else if (band == PHY_BAND_5G) 1201 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; 1202 else 1203 continue; 1204 } 1205 } 1206 1207 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 1208 { 1209 const struct rtw_phy_pg_cfg_pair *p = tbl->data; 1210 const struct rtw_phy_pg_cfg_pair *end = p + tbl->size; 1211 1212 for (; p < end; p++) { 1213 if (p->addr == 0xfe || p->addr == 0xffe) { 1214 msleep(50); 1215 continue; 1216 } 1217 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path, 1218 p->tx_num, p->addr, p->bitmask, 1219 p->data); 1220 } 1221 } 1222 1223 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 1224 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ 1225 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ 1226 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ 1227 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ 1228 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ 1229 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ 1230 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */ 1231 1232 static int rtw_channel_to_idx(u8 band, u8 channel) 1233 { 1234 int ch_idx; 1235 u8 n_channel; 1236 1237 if (band == PHY_BAND_2G) { 1238 ch_idx = channel - 1; 1239 n_channel = RTW_MAX_CHANNEL_NUM_2G; 1240 } else if (band == PHY_BAND_5G) { 1241 n_channel = RTW_MAX_CHANNEL_NUM_5G; 1242 for (ch_idx = 0; ch_idx < n_channel; ch_idx++) 1243 if (rtw_channel_idx_5g[ch_idx] == channel) 1244 break; 1245 } else { 1246 return -1; 1247 } 1248 1249 if (ch_idx >= n_channel) 1250 return -1; 1251 1252 return ch_idx; 1253 } 1254 1255 static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, 1256 u8 bw, u8 rs, u8 ch, s8 pwr_limit) 1257 { 1258 struct rtw_hal *hal = &rtwdev->hal; 1259 u8 max_power_index = rtwdev->chip->max_power_index; 1260 s8 ww; 1261 int ch_idx; 1262 1263 pwr_limit = clamp_t(s8, pwr_limit, 1264 -max_power_index, max_power_index); 1265 ch_idx = rtw_channel_to_idx(band, ch); 1266 1267 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || 1268 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { 1269 WARN(1, 1270 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", 1271 regd, band, bw, rs, ch_idx, pwr_limit); 1272 return; 1273 } 1274 1275 if (band == PHY_BAND_2G) { 1276 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit; 1277 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx]; 1278 ww = min_t(s8, ww, pwr_limit); 1279 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1280 } else if (band == PHY_BAND_5G) { 1281 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit; 1282 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx]; 1283 ww = min_t(s8, ww, pwr_limit); 1284 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1285 } 1286 } 1287 1288 /* cross-reference 5G power limits if values are not assigned */ 1289 static void 1290 rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd, 1291 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht) 1292 { 1293 struct rtw_hal *hal = &rtwdev->hal; 1294 u8 max_power_index = rtwdev->chip->max_power_index; 1295 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx]; 1296 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx]; 1297 1298 if (lmt_ht == lmt_vht) 1299 return; 1300 1301 if (lmt_ht == max_power_index) 1302 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht; 1303 1304 else if (lmt_vht == max_power_index) 1305 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht; 1306 } 1307 1308 /* cross-reference power limits for ht and vht */ 1309 static void 1310 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx) 1311 { 1312 u8 rs_idx, rs_ht, rs_vht; 1313 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S}, 1314 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} }; 1315 1316 for (rs_idx = 0; rs_idx < 2; rs_idx++) { 1317 rs_ht = rs_cmp[rs_idx][0]; 1318 rs_vht = rs_cmp[rs_idx][1]; 1319 1320 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht); 1321 } 1322 } 1323 1324 /* cross-reference power limits for 5G channels */ 1325 static void 1326 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw) 1327 { 1328 u8 ch_idx; 1329 1330 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++) 1331 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx); 1332 } 1333 1334 /* cross-reference power limits for 20/40M bandwidth */ 1335 static void 1336 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd) 1337 { 1338 u8 bw; 1339 1340 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++) 1341 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw); 1342 } 1343 1344 /* cross-reference power limits */ 1345 static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev) 1346 { 1347 u8 regd; 1348 1349 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1350 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd); 1351 } 1352 1353 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, 1354 const struct rtw_table *tbl) 1355 { 1356 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; 1357 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; 1358 1359 for (; p < end; p++) { 1360 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band, 1361 p->bw, p->rs, p->ch, p->txpwr_lmt); 1362 } 1363 1364 rtw_xref_txpwr_lmt(rtwdev); 1365 } 1366 1367 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1368 u32 addr, u32 data) 1369 { 1370 rtw_write8(rtwdev, addr, data); 1371 } 1372 1373 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1374 u32 addr, u32 data) 1375 { 1376 rtw_write32(rtwdev, addr, data); 1377 } 1378 1379 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1380 u32 addr, u32 data) 1381 { 1382 if (addr == 0xfe) 1383 msleep(50); 1384 else if (addr == 0xfd) 1385 mdelay(5); 1386 else if (addr == 0xfc) 1387 mdelay(1); 1388 else if (addr == 0xfb) 1389 usleep_range(50, 60); 1390 else if (addr == 0xfa) 1391 udelay(5); 1392 else if (addr == 0xf9) 1393 udelay(1); 1394 else 1395 rtw_write32(rtwdev, addr, data); 1396 } 1397 1398 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1399 u32 addr, u32 data) 1400 { 1401 if (addr == 0xffe) { 1402 msleep(50); 1403 } else if (addr == 0xfe) { 1404 usleep_range(100, 110); 1405 } else { 1406 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); 1407 udelay(1); 1408 } 1409 } 1410 1411 static void rtw_load_rfk_table(struct rtw_dev *rtwdev) 1412 { 1413 struct rtw_chip_info *chip = rtwdev->chip; 1414 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; 1415 1416 if (!chip->rfk_init_tbl) 1417 return; 1418 1419 rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1); 1420 rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1); 1421 rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1); 1422 rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1); 1423 rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0); 1424 1425 rtw_load_table(rtwdev, chip->rfk_init_tbl); 1426 1427 dpk_info->is_dpk_pwr_on = true; 1428 } 1429 1430 void rtw_phy_load_tables(struct rtw_dev *rtwdev) 1431 { 1432 struct rtw_chip_info *chip = rtwdev->chip; 1433 u8 rf_path; 1434 1435 rtw_load_table(rtwdev, chip->mac_tbl); 1436 rtw_load_table(rtwdev, chip->bb_tbl); 1437 rtw_load_table(rtwdev, chip->agc_tbl); 1438 rtw_load_rfk_table(rtwdev); 1439 1440 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { 1441 const struct rtw_table *tbl; 1442 1443 tbl = chip->rf_tbl[rf_path]; 1444 rtw_load_table(rtwdev, tbl); 1445 } 1446 } 1447 1448 static u8 rtw_get_channel_group(u8 channel) 1449 { 1450 switch (channel) { 1451 default: 1452 WARN_ON(1); 1453 /* fall through */ 1454 case 1: 1455 case 2: 1456 case 36: 1457 case 38: 1458 case 40: 1459 case 42: 1460 return 0; 1461 case 3: 1462 case 4: 1463 case 5: 1464 case 44: 1465 case 46: 1466 case 48: 1467 case 50: 1468 return 1; 1469 case 6: 1470 case 7: 1471 case 8: 1472 case 52: 1473 case 54: 1474 case 56: 1475 case 58: 1476 return 2; 1477 case 9: 1478 case 10: 1479 case 11: 1480 case 60: 1481 case 62: 1482 case 64: 1483 return 3; 1484 case 12: 1485 case 13: 1486 case 100: 1487 case 102: 1488 case 104: 1489 case 106: 1490 return 4; 1491 case 14: 1492 case 108: 1493 case 110: 1494 case 112: 1495 case 114: 1496 return 5; 1497 case 116: 1498 case 118: 1499 case 120: 1500 case 122: 1501 return 6; 1502 case 124: 1503 case 126: 1504 case 128: 1505 case 130: 1506 return 7; 1507 case 132: 1508 case 134: 1509 case 136: 1510 case 138: 1511 return 8; 1512 case 140: 1513 case 142: 1514 case 144: 1515 return 9; 1516 case 149: 1517 case 151: 1518 case 153: 1519 case 155: 1520 return 10; 1521 case 157: 1522 case 159: 1523 case 161: 1524 return 11; 1525 case 165: 1526 case 167: 1527 case 169: 1528 case 171: 1529 return 12; 1530 case 173: 1531 case 175: 1532 case 177: 1533 return 13; 1534 } 1535 } 1536 1537 static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate) 1538 { 1539 struct rtw_chip_info *chip = rtwdev->chip; 1540 s8 dpd_diff = 0; 1541 1542 if (!chip->en_dis_dpd) 1543 return 0; 1544 1545 #define RTW_DPD_RATE_CHECK(_rate) \ 1546 case DESC_RATE ## _rate: \ 1547 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \ 1548 dpd_diff = -6 * chip->txgi_factor; \ 1549 break 1550 1551 switch (rate) { 1552 RTW_DPD_RATE_CHECK(6M); 1553 RTW_DPD_RATE_CHECK(9M); 1554 RTW_DPD_RATE_CHECK(MCS0); 1555 RTW_DPD_RATE_CHECK(MCS1); 1556 RTW_DPD_RATE_CHECK(MCS8); 1557 RTW_DPD_RATE_CHECK(MCS9); 1558 RTW_DPD_RATE_CHECK(VHT1SS_MCS0); 1559 RTW_DPD_RATE_CHECK(VHT1SS_MCS1); 1560 RTW_DPD_RATE_CHECK(VHT2SS_MCS0); 1561 RTW_DPD_RATE_CHECK(VHT2SS_MCS1); 1562 } 1563 #undef RTW_DPD_RATE_CHECK 1564 1565 return dpd_diff; 1566 } 1567 1568 static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, 1569 struct rtw_2g_txpwr_idx *pwr_idx_2g, 1570 enum rtw_bandwidth bandwidth, 1571 u8 rate, u8 group) 1572 { 1573 struct rtw_chip_info *chip = rtwdev->chip; 1574 u8 tx_power; 1575 bool mcs_rate; 1576 bool above_2ss; 1577 u8 factor = chip->txgi_factor; 1578 1579 if (rate <= DESC_RATE11M) 1580 tx_power = pwr_idx_2g->cck_base[group]; 1581 else 1582 tx_power = pwr_idx_2g->bw40_base[group]; 1583 1584 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1585 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; 1586 1587 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1588 (rate >= DESC_RATEVHT1SS_MCS0 && 1589 rate <= DESC_RATEVHT2SS_MCS9); 1590 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1591 (rate >= DESC_RATEVHT2SS_MCS0); 1592 1593 if (!mcs_rate) 1594 return tx_power; 1595 1596 switch (bandwidth) { 1597 default: 1598 WARN_ON(1); 1599 /* fall through */ 1600 case RTW_CHANNEL_WIDTH_20: 1601 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; 1602 if (above_2ss) 1603 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; 1604 break; 1605 case RTW_CHANNEL_WIDTH_40: 1606 /* bw40 is the base power */ 1607 if (above_2ss) 1608 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; 1609 break; 1610 } 1611 1612 return tx_power; 1613 } 1614 1615 static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, 1616 struct rtw_5g_txpwr_idx *pwr_idx_5g, 1617 enum rtw_bandwidth bandwidth, 1618 u8 rate, u8 group) 1619 { 1620 struct rtw_chip_info *chip = rtwdev->chip; 1621 u8 tx_power; 1622 u8 upper, lower; 1623 bool mcs_rate; 1624 bool above_2ss; 1625 u8 factor = chip->txgi_factor; 1626 1627 tx_power = pwr_idx_5g->bw40_base[group]; 1628 1629 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1630 (rate >= DESC_RATEVHT1SS_MCS0 && 1631 rate <= DESC_RATEVHT2SS_MCS9); 1632 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1633 (rate >= DESC_RATEVHT2SS_MCS0); 1634 1635 if (!mcs_rate) { 1636 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; 1637 return tx_power; 1638 } 1639 1640 switch (bandwidth) { 1641 default: 1642 WARN_ON(1); 1643 /* fall through */ 1644 case RTW_CHANNEL_WIDTH_20: 1645 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; 1646 if (above_2ss) 1647 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; 1648 break; 1649 case RTW_CHANNEL_WIDTH_40: 1650 /* bw40 is the base power */ 1651 if (above_2ss) 1652 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; 1653 break; 1654 case RTW_CHANNEL_WIDTH_80: 1655 /* the base idx of bw80 is the average of bw40+/bw40- */ 1656 lower = pwr_idx_5g->bw40_base[group]; 1657 upper = pwr_idx_5g->bw40_base[group + 1]; 1658 1659 tx_power = (lower + upper) / 2; 1660 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; 1661 if (above_2ss) 1662 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; 1663 break; 1664 } 1665 1666 return tx_power; 1667 } 1668 1669 static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1670 enum rtw_bandwidth bw, u8 rf_path, 1671 u8 rate, u8 channel, u8 regd) 1672 { 1673 struct rtw_hal *hal = &rtwdev->hal; 1674 u8 *cch_by_bw = hal->cch_by_bw; 1675 s8 power_limit = (s8)rtwdev->chip->max_power_index; 1676 u8 rs; 1677 int ch_idx; 1678 u8 cur_bw, cur_ch; 1679 s8 cur_lmt; 1680 1681 if (regd > RTW_REGD_WW) 1682 return power_limit; 1683 1684 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) 1685 rs = RTW_RATE_SECTION_CCK; 1686 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1687 rs = RTW_RATE_SECTION_OFDM; 1688 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) 1689 rs = RTW_RATE_SECTION_HT_1S; 1690 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) 1691 rs = RTW_RATE_SECTION_HT_2S; 1692 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) 1693 rs = RTW_RATE_SECTION_VHT_1S; 1694 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) 1695 rs = RTW_RATE_SECTION_VHT_2S; 1696 else 1697 goto err; 1698 1699 /* only 20M BW with cck and ofdm */ 1700 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM) 1701 bw = RTW_CHANNEL_WIDTH_20; 1702 1703 /* only 20/40M BW with ht */ 1704 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S) 1705 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40); 1706 1707 /* select min power limit among [20M BW ~ current BW] */ 1708 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) { 1709 cur_ch = cch_by_bw[cur_bw]; 1710 1711 ch_idx = rtw_channel_to_idx(band, cur_ch); 1712 if (ch_idx < 0) 1713 goto err; 1714 1715 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ? 1716 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] : 1717 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx]; 1718 1719 power_limit = min_t(s8, cur_lmt, power_limit); 1720 } 1721 1722 return power_limit; 1723 1724 err: 1725 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n", 1726 band, bw, rf_path, rate, channel); 1727 return (s8)rtwdev->chip->max_power_index; 1728 } 1729 1730 void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, 1731 u8 ch, u8 regd, struct rtw_power_params *pwr_param) 1732 { 1733 struct rtw_hal *hal = &rtwdev->hal; 1734 struct rtw_txpwr_idx *pwr_idx; 1735 u8 group, band; 1736 u8 *base = &pwr_param->pwr_base; 1737 s8 *offset = &pwr_param->pwr_offset; 1738 s8 *limit = &pwr_param->pwr_limit; 1739 1740 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; 1741 group = rtw_get_channel_group(ch); 1742 1743 /* base power index for 2.4G/5G */ 1744 if (IS_CH_2G_BAND(ch)) { 1745 band = PHY_BAND_2G; 1746 *base = rtw_phy_get_2g_tx_power_index(rtwdev, 1747 &pwr_idx->pwr_idx_2g, 1748 bw, rate, group); 1749 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate]; 1750 } else { 1751 band = PHY_BAND_5G; 1752 *base = rtw_phy_get_5g_tx_power_index(rtwdev, 1753 &pwr_idx->pwr_idx_5g, 1754 bw, rate, group); 1755 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate]; 1756 } 1757 1758 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path, 1759 rate, ch, regd); 1760 } 1761 1762 u8 1763 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, 1764 enum rtw_bandwidth bandwidth, u8 channel, u8 regd) 1765 { 1766 struct rtw_power_params pwr_param = {0}; 1767 u8 tx_power; 1768 s8 offset; 1769 1770 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth, 1771 channel, regd, &pwr_param); 1772 1773 tx_power = pwr_param.pwr_base; 1774 offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit); 1775 1776 if (rtwdev->chip->en_dis_dpd) 1777 offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate); 1778 1779 tx_power += offset; 1780 1781 if (tx_power > rtwdev->chip->max_power_index) 1782 tx_power = rtwdev->chip->max_power_index; 1783 1784 return tx_power; 1785 } 1786 1787 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, 1788 u8 ch, u8 path, u8 rs) 1789 { 1790 struct rtw_hal *hal = &rtwdev->hal; 1791 u8 regd = rtwdev->regd.txpwr_regd; 1792 u8 *rates; 1793 u8 size; 1794 u8 rate; 1795 u8 pwr_idx; 1796 u8 bw; 1797 int i; 1798 1799 if (rs >= RTW_RATE_SECTION_MAX) 1800 return; 1801 1802 rates = rtw_rate_section[rs]; 1803 size = rtw_rate_size[rs]; 1804 bw = hal->current_band_width; 1805 for (i = 0; i < size; i++) { 1806 rate = rates[i]; 1807 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate, 1808 bw, ch, regd); 1809 hal->tx_pwr_tbl[path][rate] = pwr_idx; 1810 } 1811 } 1812 1813 /* set tx power level by path for each rates, note that the order of the rates 1814 * are *very* important, bacause 8822B/8821C combines every four bytes of tx 1815 * power index into a four-byte power index register, and calls set_tx_agc to 1816 * write these values into hardware 1817 */ 1818 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, 1819 u8 ch, u8 path) 1820 { 1821 struct rtw_hal *hal = &rtwdev->hal; 1822 u8 rs; 1823 1824 /* do not need cck rates if we are not in 2.4G */ 1825 if (hal->current_band_type == RTW_BAND_2G) 1826 rs = RTW_RATE_SECTION_CCK; 1827 else 1828 rs = RTW_RATE_SECTION_OFDM; 1829 1830 for (; rs < RTW_RATE_SECTION_MAX; rs++) 1831 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); 1832 } 1833 1834 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) 1835 { 1836 struct rtw_chip_info *chip = rtwdev->chip; 1837 struct rtw_hal *hal = &rtwdev->hal; 1838 u8 path; 1839 1840 mutex_lock(&hal->tx_power_mutex); 1841 1842 for (path = 0; path < hal->rf_path_num; path++) 1843 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path); 1844 1845 chip->ops->set_tx_power_index(rtwdev); 1846 mutex_unlock(&hal->tx_power_mutex); 1847 } 1848 1849 static void 1850 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, 1851 u8 rs, u8 size, u8 *rates) 1852 { 1853 u8 rate; 1854 u8 base_idx, rate_idx; 1855 s8 base_2g, base_5g; 1856 1857 if (rs >= RTW_RATE_SECTION_VHT_1S) 1858 base_idx = rates[size - 3]; 1859 else 1860 base_idx = rates[size - 1]; 1861 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx]; 1862 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx]; 1863 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g; 1864 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g; 1865 for (rate = 0; rate < size; rate++) { 1866 rate_idx = rates[rate]; 1867 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g; 1868 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g; 1869 } 1870 } 1871 1872 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) 1873 { 1874 u8 path; 1875 1876 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1877 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1878 RTW_RATE_SECTION_CCK, 1879 rtw_cck_size, rtw_cck_rates); 1880 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1881 RTW_RATE_SECTION_OFDM, 1882 rtw_ofdm_size, rtw_ofdm_rates); 1883 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1884 RTW_RATE_SECTION_HT_1S, 1885 rtw_ht_1s_size, rtw_ht_1s_rates); 1886 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1887 RTW_RATE_SECTION_HT_2S, 1888 rtw_ht_2s_size, rtw_ht_2s_rates); 1889 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1890 RTW_RATE_SECTION_VHT_1S, 1891 rtw_vht_1s_size, rtw_vht_1s_rates); 1892 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1893 RTW_RATE_SECTION_VHT_2S, 1894 rtw_vht_2s_size, rtw_vht_2s_rates); 1895 } 1896 } 1897 1898 static void 1899 __rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1900 { 1901 s8 base; 1902 u8 ch; 1903 1904 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) { 1905 base = hal->tx_pwr_by_rate_base_2g[0][rs]; 1906 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base; 1907 } 1908 1909 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) { 1910 base = hal->tx_pwr_by_rate_base_5g[0][rs]; 1911 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base; 1912 } 1913 } 1914 1915 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) 1916 { 1917 u8 regd, bw, rs; 1918 1919 /* default at channel 1 */ 1920 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1; 1921 1922 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1923 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1924 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1925 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs); 1926 } 1927 1928 static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev, 1929 u8 regd, u8 bw, u8 rs) 1930 { 1931 struct rtw_hal *hal = &rtwdev->hal; 1932 s8 max_power_index = (s8)rtwdev->chip->max_power_index; 1933 u8 ch; 1934 1935 /* 2.4G channels */ 1936 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) 1937 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index; 1938 1939 /* 5G channels */ 1940 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) 1941 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index; 1942 } 1943 1944 void rtw_phy_init_tx_power(struct rtw_dev *rtwdev) 1945 { 1946 struct rtw_hal *hal = &rtwdev->hal; 1947 u8 regd, path, rate, rs, bw; 1948 1949 /* init tx power by rate offset */ 1950 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1951 for (rate = 0; rate < DESC_RATE_MAX; rate++) { 1952 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0; 1953 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0; 1954 } 1955 } 1956 1957 /* init tx power limit */ 1958 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1959 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1960 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1961 rtw_phy_init_tx_power_limit(rtwdev, regd, bw, 1962 rs); 1963 } 1964 1965 void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, 1966 struct rtw_swing_table *swing_table) 1967 { 1968 const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; 1969 u8 channel = rtwdev->hal.current_channel; 1970 1971 if (IS_CH_2G_BAND(channel)) { 1972 if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) { 1973 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p; 1974 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n; 1975 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p; 1976 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n; 1977 } else { 1978 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; 1979 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; 1980 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; 1981 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; 1982 } 1983 } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) { 1984 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1]; 1985 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1]; 1986 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1]; 1987 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1]; 1988 } else if (IS_CH_5G_BAND_3(channel)) { 1989 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2]; 1990 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2]; 1991 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2]; 1992 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2]; 1993 } else if (IS_CH_5G_BAND_4(channel)) { 1994 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3]; 1995 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3]; 1996 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3]; 1997 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3]; 1998 } else { 1999 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; 2000 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; 2001 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; 2002 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; 2003 } 2004 } 2005 2006 void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path) 2007 { 2008 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2009 2010 ewma_thermal_add(&dm_info->avg_thermal[path], thermal); 2011 dm_info->thermal_avg[path] = 2012 ewma_thermal_read(&dm_info->avg_thermal[path]); 2013 } 2014 2015 bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal, 2016 u8 path) 2017 { 2018 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2019 u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]); 2020 2021 if (avg == thermal) 2022 return false; 2023 2024 return true; 2025 } 2026 2027 u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path) 2028 { 2029 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2030 u8 therm_avg, therm_efuse, therm_delta; 2031 2032 therm_avg = dm_info->thermal_avg[path]; 2033 therm_efuse = rtwdev->efuse.thermal_meter[path]; 2034 therm_delta = abs(therm_avg - therm_efuse); 2035 2036 return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1); 2037 } 2038 2039 s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, 2040 struct rtw_swing_table *swing_table, 2041 u8 tbl_path, u8 therm_path, u8 delta) 2042 { 2043 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2044 const u8 *delta_swing_table_idx_pos; 2045 const u8 *delta_swing_table_idx_neg; 2046 2047 if (delta >= RTW_PWR_TRK_TBL_SZ) { 2048 rtw_warn(rtwdev, "power track table overflow\n"); 2049 return 0; 2050 } 2051 2052 if (!swing_table) { 2053 rtw_warn(rtwdev, "swing table not configured\n"); 2054 return 0; 2055 } 2056 2057 delta_swing_table_idx_pos = swing_table->p[tbl_path]; 2058 delta_swing_table_idx_neg = swing_table->n[tbl_path]; 2059 2060 if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) { 2061 rtw_warn(rtwdev, "invalid swing table index\n"); 2062 return 0; 2063 } 2064 2065 if (dm_info->thermal_avg[therm_path] > 2066 rtwdev->efuse.thermal_meter[therm_path]) 2067 return delta_swing_table_idx_pos[delta]; 2068 else 2069 return -delta_swing_table_idx_neg[delta]; 2070 } 2071 2072 bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev) 2073 { 2074 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 2075 u8 delta_iqk; 2076 2077 delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k); 2078 if (delta_iqk >= rtwdev->chip->iqk_threshold) { 2079 dm_info->thermal_meter_k = dm_info->thermal_avg[0]; 2080 return true; 2081 } 2082 return false; 2083 } 2084