1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/bcd.h> 6 7 #include "main.h" 8 #include "reg.h" 9 #include "fw.h" 10 #include "phy.h" 11 #include "debug.h" 12 13 struct phy_cfg_pair { 14 u32 addr; 15 u32 data; 16 }; 17 18 union phy_table_tile { 19 struct rtw_phy_cond cond; 20 struct phy_cfg_pair cfg; 21 }; 22 23 struct phy_pg_cfg_pair { 24 u32 band; 25 u32 rf_path; 26 u32 tx_num; 27 u32 addr; 28 u32 bitmask; 29 u32 data; 30 }; 31 32 static const u32 db_invert_table[12][8] = { 33 {10, 13, 16, 20, 34 25, 32, 40, 50}, 35 {64, 80, 101, 128, 36 160, 201, 256, 318}, 37 {401, 505, 635, 800, 38 1007, 1268, 1596, 2010}, 39 {316, 398, 501, 631, 40 794, 1000, 1259, 1585}, 41 {1995, 2512, 3162, 3981, 42 5012, 6310, 7943, 10000}, 43 {12589, 15849, 19953, 25119, 44 31623, 39811, 50119, 63098}, 45 {79433, 100000, 125893, 158489, 46 199526, 251189, 316228, 398107}, 47 {501187, 630957, 794328, 1000000, 48 1258925, 1584893, 1995262, 2511886}, 49 {3162278, 3981072, 5011872, 6309573, 50 7943282, 1000000, 12589254, 15848932}, 51 {19952623, 25118864, 31622777, 39810717, 52 50118723, 63095734, 79432823, 100000000}, 53 {125892541, 158489319, 199526232, 251188643, 54 316227766, 398107171, 501187234, 630957345}, 55 {794328235, 1000000000, 1258925412, 1584893192, 56 1995262315, 2511886432U, 3162277660U, 3981071706U} 57 }; 58 59 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; 60 u8 rtw_ofdm_rates[] = { 61 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, 62 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, 63 DESC_RATE48M, DESC_RATE54M 64 }; 65 u8 rtw_ht_1s_rates[] = { 66 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, 67 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, 68 DESC_RATEMCS6, DESC_RATEMCS7 69 }; 70 u8 rtw_ht_2s_rates[] = { 71 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, 72 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, 73 DESC_RATEMCS14, DESC_RATEMCS15 74 }; 75 u8 rtw_vht_1s_rates[] = { 76 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, 77 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, 78 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, 79 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, 80 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 81 }; 82 u8 rtw_vht_2s_rates[] = { 83 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, 84 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, 85 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, 86 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, 87 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 88 }; 89 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { 90 rtw_cck_rates, rtw_ofdm_rates, 91 rtw_ht_1s_rates, rtw_ht_2s_rates, 92 rtw_vht_1s_rates, rtw_vht_2s_rates 93 }; 94 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { 95 ARRAY_SIZE(rtw_cck_rates), 96 ARRAY_SIZE(rtw_ofdm_rates), 97 ARRAY_SIZE(rtw_ht_1s_rates), 98 ARRAY_SIZE(rtw_ht_2s_rates), 99 ARRAY_SIZE(rtw_vht_1s_rates), 100 ARRAY_SIZE(rtw_vht_2s_rates) 101 }; 102 static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); 103 static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); 104 static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); 105 static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); 106 static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); 107 static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); 108 109 enum rtw_phy_band_type { 110 PHY_BAND_2G = 0, 111 PHY_BAND_5G = 1, 112 }; 113 114 static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) 115 { 116 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 117 u8 i, j; 118 119 for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) { 120 for (j = 0; j < RTW_RF_PATH_MAX; j++) 121 dm_info->cck_pd_lv[i][j] = 0; 122 } 123 124 dm_info->cck_fa_avg = CCK_FA_AVG_RESET; 125 } 126 127 void rtw_phy_init(struct rtw_dev *rtwdev) 128 { 129 struct rtw_chip_info *chip = rtwdev->chip; 130 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 131 u32 addr, mask; 132 133 dm_info->fa_history[3] = 0; 134 dm_info->fa_history[2] = 0; 135 dm_info->fa_history[1] = 0; 136 dm_info->fa_history[0] = 0; 137 dm_info->igi_bitmap = 0; 138 dm_info->igi_history[3] = 0; 139 dm_info->igi_history[2] = 0; 140 dm_info->igi_history[1] = 0; 141 142 addr = chip->dig[0].addr; 143 mask = chip->dig[0].mask; 144 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); 145 rtw_phy_cck_pd_init(rtwdev); 146 } 147 148 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) 149 { 150 struct rtw_chip_info *chip = rtwdev->chip; 151 struct rtw_hal *hal = &rtwdev->hal; 152 u32 addr, mask; 153 u8 path; 154 155 for (path = 0; path < hal->rf_path_num; path++) { 156 addr = chip->dig[path].addr; 157 mask = chip->dig[path].mask; 158 rtw_write32_mask(rtwdev, addr, mask, igi); 159 } 160 } 161 162 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) 163 { 164 struct rtw_chip_info *chip = rtwdev->chip; 165 166 chip->ops->false_alarm_statistics(rtwdev); 167 } 168 169 #define RA_FLOOR_TABLE_SIZE 7 170 #define RA_FLOOR_UP_GAP 3 171 172 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) 173 { 174 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; 175 u8 new_level = 0; 176 int i; 177 178 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) 179 if (i >= old_level) 180 table[i] += RA_FLOOR_UP_GAP; 181 182 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 183 if (rssi < table[i]) { 184 new_level = i; 185 break; 186 } 187 } 188 189 return new_level; 190 } 191 192 struct rtw_phy_stat_iter_data { 193 struct rtw_dev *rtwdev; 194 u8 min_rssi; 195 }; 196 197 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) 198 { 199 struct rtw_phy_stat_iter_data *iter_data = data; 200 struct rtw_dev *rtwdev = iter_data->rtwdev; 201 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 202 u8 rssi; 203 204 rssi = ewma_rssi_read(&si->avg_rssi); 205 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); 206 207 rtw_fw_send_rssi_info(rtwdev, si); 208 209 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); 210 } 211 212 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) 213 { 214 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 215 struct rtw_phy_stat_iter_data data = {}; 216 217 data.rtwdev = rtwdev; 218 data.min_rssi = U8_MAX; 219 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); 220 221 dm_info->pre_min_rssi = dm_info->min_rssi; 222 dm_info->min_rssi = data.min_rssi; 223 } 224 225 static void rtw_phy_statistics(struct rtw_dev *rtwdev) 226 { 227 rtw_phy_stat_rssi(rtwdev); 228 rtw_phy_stat_false_alarm(rtwdev); 229 } 230 231 #define DIG_PERF_FA_TH_LOW 250 232 #define DIG_PERF_FA_TH_HIGH 500 233 #define DIG_PERF_FA_TH_EXTRA_HIGH 750 234 #define DIG_PERF_MAX 0x5a 235 #define DIG_PERF_MID 0x40 236 #define DIG_CVRG_FA_TH_LOW 2000 237 #define DIG_CVRG_FA_TH_HIGH 4000 238 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 239 #define DIG_CVRG_MAX 0x2a 240 #define DIG_CVRG_MID 0x26 241 #define DIG_CVRG_MIN 0x1c 242 #define DIG_RSSI_GAIN_OFFSET 15 243 244 static bool 245 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) 246 { 247 u16 fa_lo = DIG_PERF_FA_TH_LOW; 248 u16 fa_hi = DIG_PERF_FA_TH_HIGH; 249 u16 *fa_history; 250 u8 *igi_history; 251 u8 damping_rssi; 252 u8 min_rssi; 253 u8 diff; 254 u8 igi_bitmap; 255 bool damping = false; 256 257 min_rssi = dm_info->min_rssi; 258 if (dm_info->damping) { 259 damping_rssi = dm_info->damping_rssi; 260 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : 261 damping_rssi - min_rssi; 262 if (diff > 3 || dm_info->damping_cnt++ > 20) { 263 dm_info->damping = false; 264 return false; 265 } 266 267 return true; 268 } 269 270 igi_history = dm_info->igi_history; 271 fa_history = dm_info->fa_history; 272 igi_bitmap = dm_info->igi_bitmap & 0xf; 273 switch (igi_bitmap) { 274 case 5: 275 /* down -> up -> down -> up */ 276 if (igi_history[0] > igi_history[1] && 277 igi_history[2] > igi_history[3] && 278 igi_history[0] - igi_history[1] >= 2 && 279 igi_history[2] - igi_history[3] >= 2 && 280 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 281 fa_history[2] > fa_hi && fa_history[3] < fa_lo) 282 damping = true; 283 break; 284 case 9: 285 /* up -> down -> down -> up */ 286 if (igi_history[0] > igi_history[1] && 287 igi_history[3] > igi_history[2] && 288 igi_history[0] - igi_history[1] >= 4 && 289 igi_history[3] - igi_history[2] >= 2 && 290 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 291 fa_history[2] < fa_lo && fa_history[3] > fa_hi) 292 damping = true; 293 break; 294 default: 295 return false; 296 } 297 298 if (damping) { 299 dm_info->damping = true; 300 dm_info->damping_cnt = 0; 301 dm_info->damping_rssi = min_rssi; 302 } 303 304 return damping; 305 } 306 307 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, 308 u8 *upper, u8 *lower, bool linked) 309 { 310 u8 dig_max, dig_min, dig_mid; 311 u8 min_rssi; 312 313 if (linked) { 314 dig_max = DIG_PERF_MAX; 315 dig_mid = DIG_PERF_MID; 316 /* 22B=0x1c, 22C=0x20 */ 317 dig_min = 0x1c; 318 min_rssi = max_t(u8, dm_info->min_rssi, dig_min); 319 } else { 320 dig_max = DIG_CVRG_MAX; 321 dig_mid = DIG_CVRG_MID; 322 dig_min = DIG_CVRG_MIN; 323 min_rssi = dig_min; 324 } 325 326 /* DIG MAX should be bounded by minimum RSSI with offset +15 */ 327 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); 328 329 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); 330 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); 331 } 332 333 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, 334 u16 *fa_th, u8 *step, bool linked) 335 { 336 u8 min_rssi, pre_min_rssi; 337 338 min_rssi = dm_info->min_rssi; 339 pre_min_rssi = dm_info->pre_min_rssi; 340 step[0] = 4; 341 step[1] = 3; 342 step[2] = 2; 343 344 if (linked) { 345 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; 346 fa_th[1] = DIG_PERF_FA_TH_HIGH; 347 fa_th[2] = DIG_PERF_FA_TH_LOW; 348 if (pre_min_rssi > min_rssi) { 349 step[0] = 6; 350 step[1] = 4; 351 step[2] = 2; 352 } 353 } else { 354 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; 355 fa_th[1] = DIG_CVRG_FA_TH_HIGH; 356 fa_th[2] = DIG_CVRG_FA_TH_LOW; 357 } 358 } 359 360 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) 361 { 362 u8 *igi_history; 363 u16 *fa_history; 364 u8 igi_bitmap; 365 bool up; 366 367 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; 368 igi_history = dm_info->igi_history; 369 fa_history = dm_info->fa_history; 370 371 up = igi > igi_history[0]; 372 igi_bitmap |= up; 373 374 igi_history[3] = igi_history[2]; 375 igi_history[2] = igi_history[1]; 376 igi_history[1] = igi_history[0]; 377 igi_history[0] = igi; 378 379 fa_history[3] = fa_history[2]; 380 fa_history[2] = fa_history[1]; 381 fa_history[1] = fa_history[0]; 382 fa_history[0] = fa; 383 384 dm_info->igi_bitmap = igi_bitmap; 385 } 386 387 static void rtw_phy_dig(struct rtw_dev *rtwdev) 388 { 389 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 390 u8 upper_bound, lower_bound; 391 u8 pre_igi, cur_igi; 392 u16 fa_th[3], fa_cnt; 393 u8 level; 394 u8 step[3]; 395 bool linked; 396 397 if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE)) 398 return; 399 400 if (rtw_phy_dig_check_damping(dm_info)) 401 return; 402 403 linked = !!rtwdev->sta_cnt; 404 405 fa_cnt = dm_info->total_fa_cnt; 406 pre_igi = dm_info->igi_history[0]; 407 408 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); 409 410 /* test the false alarm count from the highest threshold level first, 411 * and increase it by corresponding step size 412 * 413 * note that the step size is offset by -2, compensate it afterall 414 */ 415 cur_igi = pre_igi; 416 for (level = 0; level < 3; level++) { 417 if (fa_cnt > fa_th[level]) { 418 cur_igi += step[level]; 419 break; 420 } 421 } 422 cur_igi -= 2; 423 424 /* calculate the upper/lower bound by the minimum rssi we have among 425 * the peers connected with us, meanwhile make sure the igi value does 426 * not beyond the hardware limitation 427 */ 428 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); 429 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); 430 431 /* record current igi value and false alarm statistics for further 432 * damping checks, and record the trend of igi values 433 */ 434 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); 435 436 if (cur_igi != pre_igi) 437 rtw_phy_dig_write(rtwdev, cur_igi); 438 } 439 440 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) 441 { 442 struct rtw_dev *rtwdev = data; 443 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 444 445 rtw_update_sta_info(rtwdev, si); 446 } 447 448 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) 449 { 450 if (rtwdev->watch_dog_cnt & 0x3) 451 return; 452 453 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); 454 } 455 456 static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) 457 { 458 struct rtw_chip_info *chip = rtwdev->chip; 459 460 if (chip->ops->dpk_track) 461 chip->ops->dpk_track(rtwdev); 462 } 463 464 #define CCK_PD_LV_MAX 5 465 #define CCK_PD_FA_LV1_MIN 1000 466 #define CCK_PD_FA_LV0_MAX 500 467 468 static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev) 469 { 470 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 471 u32 cck_fa_avg = dm_info->cck_fa_avg; 472 473 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 474 return 1; 475 476 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 477 return 0; 478 479 return CCK_PD_LV_MAX; 480 } 481 482 #define CCK_PD_IGI_LV4_VAL 0x38 483 #define CCK_PD_IGI_LV3_VAL 0x2a 484 #define CCK_PD_IGI_LV2_VAL 0x24 485 #define CCK_PD_RSSI_LV4_VAL 32 486 #define CCK_PD_RSSI_LV3_VAL 32 487 #define CCK_PD_RSSI_LV2_VAL 24 488 489 static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev) 490 { 491 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 492 u8 igi = dm_info->igi_history[0]; 493 u8 rssi = dm_info->min_rssi; 494 u32 cck_fa_avg = dm_info->cck_fa_avg; 495 496 if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL) 497 return 4; 498 if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL) 499 return 3; 500 if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL) 501 return 2; 502 if (cck_fa_avg > CCK_PD_FA_LV1_MIN) 503 return 1; 504 if (cck_fa_avg < CCK_PD_FA_LV0_MAX) 505 return 0; 506 507 return CCK_PD_LV_MAX; 508 } 509 510 static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev) 511 { 512 if (!rtw_is_assoc(rtwdev)) 513 return rtw_phy_cck_pd_lv_unlink(rtwdev); 514 else 515 return rtw_phy_cck_pd_lv_link(rtwdev); 516 } 517 518 static void rtw_phy_cck_pd(struct rtw_dev *rtwdev) 519 { 520 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 521 struct rtw_chip_info *chip = rtwdev->chip; 522 u32 cck_fa = dm_info->cck_fa_cnt; 523 u8 level; 524 525 if (rtwdev->hal.current_band_type != RTW_BAND_2G) 526 return; 527 528 if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET) 529 dm_info->cck_fa_avg = cck_fa; 530 else 531 dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2; 532 533 level = rtw_phy_cck_pd_lv(rtwdev); 534 535 if (level >= CCK_PD_LV_MAX) 536 return; 537 538 if (chip->ops->cck_pd_set) 539 chip->ops->cck_pd_set(rtwdev, level); 540 } 541 542 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) 543 { 544 /* for further calculation */ 545 rtw_phy_statistics(rtwdev); 546 rtw_phy_dig(rtwdev); 547 rtw_phy_cck_pd(rtwdev); 548 rtw_phy_ra_info_update(rtwdev); 549 rtw_phy_dpk_track(rtwdev); 550 } 551 552 #define FRAC_BITS 3 553 554 static u8 rtw_phy_power_2_db(s8 power) 555 { 556 if (power <= -100 || power >= 20) 557 return 0; 558 else if (power >= 0) 559 return 100; 560 else 561 return 100 + power; 562 } 563 564 static u64 rtw_phy_db_2_linear(u8 power_db) 565 { 566 u8 i, j; 567 u64 linear; 568 569 if (power_db > 96) 570 power_db = 96; 571 else if (power_db < 1) 572 return 1; 573 574 /* 1dB ~ 96dB */ 575 i = (power_db - 1) >> 3; 576 j = (power_db - 1) - (i << 3); 577 578 linear = db_invert_table[i][j]; 579 linear = i > 2 ? linear << FRAC_BITS : linear; 580 581 return linear; 582 } 583 584 static u8 rtw_phy_linear_2_db(u64 linear) 585 { 586 u8 i; 587 u8 j; 588 u32 dB; 589 590 if (linear >= db_invert_table[11][7]) 591 return 96; /* maximum 96 dB */ 592 593 for (i = 0; i < 12; i++) { 594 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) 595 break; 596 else if (i > 2 && linear <= db_invert_table[i][7]) 597 break; 598 } 599 600 for (j = 0; j < 8; j++) { 601 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) 602 break; 603 else if (i > 2 && linear <= db_invert_table[i][j]) 604 break; 605 } 606 607 if (j == 0 && i == 0) 608 goto end; 609 610 if (j == 0) { 611 if (i != 3) { 612 if (db_invert_table[i][0] - linear > 613 linear - db_invert_table[i - 1][7]) { 614 i = i - 1; 615 j = 7; 616 } 617 } else { 618 if (db_invert_table[3][0] - linear > 619 linear - db_invert_table[2][7]) { 620 i = 2; 621 j = 7; 622 } 623 } 624 } else { 625 if (db_invert_table[i][j] - linear > 626 linear - db_invert_table[i][j - 1]) { 627 j = j - 1; 628 } 629 } 630 end: 631 dB = (i << 3) + j + 1; 632 633 return dB; 634 } 635 636 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) 637 { 638 s8 power; 639 u8 power_db; 640 u64 linear; 641 u64 sum = 0; 642 u8 path; 643 644 for (path = 0; path < path_num; path++) { 645 power = rf_power[path]; 646 power_db = rtw_phy_power_2_db(power); 647 linear = rtw_phy_db_2_linear(power_db); 648 sum += linear; 649 } 650 651 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; 652 switch (path_num) { 653 case 2: 654 sum >>= 1; 655 break; 656 case 3: 657 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; 658 break; 659 case 4: 660 sum >>= 2; 661 break; 662 default: 663 break; 664 } 665 666 return rtw_phy_linear_2_db(sum); 667 } 668 669 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 670 u32 addr, u32 mask) 671 { 672 struct rtw_hal *hal = &rtwdev->hal; 673 struct rtw_chip_info *chip = rtwdev->chip; 674 const u32 *base_addr = chip->rf_base_addr; 675 u32 val, direct_addr; 676 677 if (rf_path >= hal->rf_path_num) { 678 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 679 return INV_RF_DATA; 680 } 681 682 addr &= 0xff; 683 direct_addr = base_addr[rf_path] + (addr << 2); 684 mask &= RFREG_MASK; 685 686 val = rtw_read32_mask(rtwdev, direct_addr, mask); 687 688 return val; 689 } 690 691 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 692 u32 addr, u32 mask, u32 data) 693 { 694 struct rtw_hal *hal = &rtwdev->hal; 695 struct rtw_chip_info *chip = rtwdev->chip; 696 u32 *sipi_addr = chip->rf_sipi_addr; 697 u32 data_and_addr; 698 u32 old_data = 0; 699 u32 shift; 700 701 if (rf_path >= hal->rf_path_num) { 702 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 703 return false; 704 } 705 706 addr &= 0xff; 707 mask &= RFREG_MASK; 708 709 if (mask != RFREG_MASK) { 710 old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK); 711 712 if (old_data == INV_RF_DATA) { 713 rtw_err(rtwdev, "Write fail, rf is disabled\n"); 714 return false; 715 } 716 717 shift = __ffs(mask); 718 data = ((old_data) & (~mask)) | (data << shift); 719 } 720 721 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; 722 723 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); 724 725 udelay(13); 726 727 return true; 728 } 729 730 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 731 u32 addr, u32 mask, u32 data) 732 { 733 struct rtw_hal *hal = &rtwdev->hal; 734 struct rtw_chip_info *chip = rtwdev->chip; 735 const u32 *base_addr = chip->rf_base_addr; 736 u32 direct_addr; 737 738 if (rf_path >= hal->rf_path_num) { 739 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 740 return false; 741 } 742 743 addr &= 0xff; 744 direct_addr = base_addr[rf_path] + (addr << 2); 745 mask &= RFREG_MASK; 746 747 if (addr == RF_CFGCH) { 748 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI); 749 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI); 750 } 751 752 rtw_write32_mask(rtwdev, direct_addr, mask, data); 753 754 udelay(1); 755 756 if (addr == RF_CFGCH) { 757 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI); 758 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI); 759 } 760 761 return true; 762 } 763 764 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 765 u32 addr, u32 mask, u32 data) 766 { 767 if (addr != 0x00) 768 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); 769 770 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); 771 } 772 773 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) 774 { 775 struct rtw_hal *hal = &rtwdev->hal; 776 struct rtw_efuse *efuse = &rtwdev->efuse; 777 struct rtw_phy_cond cond = {0}; 778 779 cond.cut = hal->cut_version ? hal->cut_version : 15; 780 cond.pkg = pkg ? pkg : 15; 781 cond.plat = 0x04; 782 cond.rfe = efuse->rfe_option; 783 784 switch (rtw_hci_type(rtwdev)) { 785 case RTW_HCI_TYPE_USB: 786 cond.intf = INTF_USB; 787 break; 788 case RTW_HCI_TYPE_SDIO: 789 cond.intf = INTF_SDIO; 790 break; 791 case RTW_HCI_TYPE_PCIE: 792 default: 793 cond.intf = INTF_PCIE; 794 break; 795 } 796 797 hal->phy_cond = cond; 798 799 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); 800 } 801 802 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) 803 { 804 struct rtw_hal *hal = &rtwdev->hal; 805 struct rtw_phy_cond drv_cond = hal->phy_cond; 806 807 if (cond.cut && cond.cut != drv_cond.cut) 808 return false; 809 810 if (cond.pkg && cond.pkg != drv_cond.pkg) 811 return false; 812 813 if (cond.intf && cond.intf != drv_cond.intf) 814 return false; 815 816 if (cond.rfe != drv_cond.rfe) 817 return false; 818 819 return true; 820 } 821 822 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 823 { 824 const union phy_table_tile *p = tbl->data; 825 const union phy_table_tile *end = p + tbl->size / 2; 826 struct rtw_phy_cond pos_cond = {0}; 827 bool is_matched = true, is_skipped = false; 828 829 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); 830 831 for (; p < end; p++) { 832 if (p->cond.pos) { 833 switch (p->cond.branch) { 834 case BRANCH_ENDIF: 835 is_matched = true; 836 is_skipped = false; 837 break; 838 case BRANCH_ELSE: 839 is_matched = is_skipped ? false : true; 840 break; 841 case BRANCH_IF: 842 case BRANCH_ELIF: 843 default: 844 pos_cond = p->cond; 845 break; 846 } 847 } else if (p->cond.neg) { 848 if (!is_skipped) { 849 if (check_positive(rtwdev, pos_cond)) { 850 is_matched = true; 851 is_skipped = true; 852 } else { 853 is_matched = false; 854 is_skipped = false; 855 } 856 } else { 857 is_matched = false; 858 } 859 } else if (is_matched) { 860 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); 861 } 862 } 863 } 864 865 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) 866 867 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) 868 { 869 if (rtwdev->chip->is_pwr_by_rate_dec) 870 return bcd_to_dec_pwr_by_rate(hex, i); 871 872 return (hex >> (i * 8)) & 0xFF; 873 } 874 875 static void 876 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, 877 u32 addr, u32 mask, u32 val, u8 *rate, 878 u8 *pwr_by_rate, u8 *rate_num) 879 { 880 int i; 881 882 switch (addr) { 883 case 0xE00: 884 case 0x830: 885 rate[0] = DESC_RATE6M; 886 rate[1] = DESC_RATE9M; 887 rate[2] = DESC_RATE12M; 888 rate[3] = DESC_RATE18M; 889 for (i = 0; i < 4; ++i) 890 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 891 *rate_num = 4; 892 break; 893 case 0xE04: 894 case 0x834: 895 rate[0] = DESC_RATE24M; 896 rate[1] = DESC_RATE36M; 897 rate[2] = DESC_RATE48M; 898 rate[3] = DESC_RATE54M; 899 for (i = 0; i < 4; ++i) 900 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 901 *rate_num = 4; 902 break; 903 case 0xE08: 904 rate[0] = DESC_RATE1M; 905 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); 906 *rate_num = 1; 907 break; 908 case 0x86C: 909 if (mask == 0xffffff00) { 910 rate[0] = DESC_RATE2M; 911 rate[1] = DESC_RATE5_5M; 912 rate[2] = DESC_RATE11M; 913 for (i = 1; i < 4; ++i) 914 pwr_by_rate[i - 1] = 915 tbl_to_dec_pwr_by_rate(rtwdev, val, i); 916 *rate_num = 3; 917 } else if (mask == 0x000000ff) { 918 rate[0] = DESC_RATE11M; 919 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); 920 *rate_num = 1; 921 } 922 break; 923 case 0xE10: 924 case 0x83C: 925 rate[0] = DESC_RATEMCS0; 926 rate[1] = DESC_RATEMCS1; 927 rate[2] = DESC_RATEMCS2; 928 rate[3] = DESC_RATEMCS3; 929 for (i = 0; i < 4; ++i) 930 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 931 *rate_num = 4; 932 break; 933 case 0xE14: 934 case 0x848: 935 rate[0] = DESC_RATEMCS4; 936 rate[1] = DESC_RATEMCS5; 937 rate[2] = DESC_RATEMCS6; 938 rate[3] = DESC_RATEMCS7; 939 for (i = 0; i < 4; ++i) 940 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 941 *rate_num = 4; 942 break; 943 case 0xE18: 944 case 0x84C: 945 rate[0] = DESC_RATEMCS8; 946 rate[1] = DESC_RATEMCS9; 947 rate[2] = DESC_RATEMCS10; 948 rate[3] = DESC_RATEMCS11; 949 for (i = 0; i < 4; ++i) 950 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 951 *rate_num = 4; 952 break; 953 case 0xE1C: 954 case 0x868: 955 rate[0] = DESC_RATEMCS12; 956 rate[1] = DESC_RATEMCS13; 957 rate[2] = DESC_RATEMCS14; 958 rate[3] = DESC_RATEMCS15; 959 for (i = 0; i < 4; ++i) 960 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 961 *rate_num = 4; 962 break; 963 case 0x838: 964 rate[0] = DESC_RATE1M; 965 rate[1] = DESC_RATE2M; 966 rate[2] = DESC_RATE5_5M; 967 for (i = 1; i < 4; ++i) 968 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev, 969 val, i); 970 *rate_num = 3; 971 break; 972 case 0xC20: 973 case 0xE20: 974 case 0x1820: 975 case 0x1A20: 976 rate[0] = DESC_RATE1M; 977 rate[1] = DESC_RATE2M; 978 rate[2] = DESC_RATE5_5M; 979 rate[3] = DESC_RATE11M; 980 for (i = 0; i < 4; ++i) 981 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 982 *rate_num = 4; 983 break; 984 case 0xC24: 985 case 0xE24: 986 case 0x1824: 987 case 0x1A24: 988 rate[0] = DESC_RATE6M; 989 rate[1] = DESC_RATE9M; 990 rate[2] = DESC_RATE12M; 991 rate[3] = DESC_RATE18M; 992 for (i = 0; i < 4; ++i) 993 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 994 *rate_num = 4; 995 break; 996 case 0xC28: 997 case 0xE28: 998 case 0x1828: 999 case 0x1A28: 1000 rate[0] = DESC_RATE24M; 1001 rate[1] = DESC_RATE36M; 1002 rate[2] = DESC_RATE48M; 1003 rate[3] = DESC_RATE54M; 1004 for (i = 0; i < 4; ++i) 1005 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1006 *rate_num = 4; 1007 break; 1008 case 0xC2C: 1009 case 0xE2C: 1010 case 0x182C: 1011 case 0x1A2C: 1012 rate[0] = DESC_RATEMCS0; 1013 rate[1] = DESC_RATEMCS1; 1014 rate[2] = DESC_RATEMCS2; 1015 rate[3] = DESC_RATEMCS3; 1016 for (i = 0; i < 4; ++i) 1017 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1018 *rate_num = 4; 1019 break; 1020 case 0xC30: 1021 case 0xE30: 1022 case 0x1830: 1023 case 0x1A30: 1024 rate[0] = DESC_RATEMCS4; 1025 rate[1] = DESC_RATEMCS5; 1026 rate[2] = DESC_RATEMCS6; 1027 rate[3] = DESC_RATEMCS7; 1028 for (i = 0; i < 4; ++i) 1029 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1030 *rate_num = 4; 1031 break; 1032 case 0xC34: 1033 case 0xE34: 1034 case 0x1834: 1035 case 0x1A34: 1036 rate[0] = DESC_RATEMCS8; 1037 rate[1] = DESC_RATEMCS9; 1038 rate[2] = DESC_RATEMCS10; 1039 rate[3] = DESC_RATEMCS11; 1040 for (i = 0; i < 4; ++i) 1041 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1042 *rate_num = 4; 1043 break; 1044 case 0xC38: 1045 case 0xE38: 1046 case 0x1838: 1047 case 0x1A38: 1048 rate[0] = DESC_RATEMCS12; 1049 rate[1] = DESC_RATEMCS13; 1050 rate[2] = DESC_RATEMCS14; 1051 rate[3] = DESC_RATEMCS15; 1052 for (i = 0; i < 4; ++i) 1053 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1054 *rate_num = 4; 1055 break; 1056 case 0xC3C: 1057 case 0xE3C: 1058 case 0x183C: 1059 case 0x1A3C: 1060 rate[0] = DESC_RATEVHT1SS_MCS0; 1061 rate[1] = DESC_RATEVHT1SS_MCS1; 1062 rate[2] = DESC_RATEVHT1SS_MCS2; 1063 rate[3] = DESC_RATEVHT1SS_MCS3; 1064 for (i = 0; i < 4; ++i) 1065 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1066 *rate_num = 4; 1067 break; 1068 case 0xC40: 1069 case 0xE40: 1070 case 0x1840: 1071 case 0x1A40: 1072 rate[0] = DESC_RATEVHT1SS_MCS4; 1073 rate[1] = DESC_RATEVHT1SS_MCS5; 1074 rate[2] = DESC_RATEVHT1SS_MCS6; 1075 rate[3] = DESC_RATEVHT1SS_MCS7; 1076 for (i = 0; i < 4; ++i) 1077 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1078 *rate_num = 4; 1079 break; 1080 case 0xC44: 1081 case 0xE44: 1082 case 0x1844: 1083 case 0x1A44: 1084 rate[0] = DESC_RATEVHT1SS_MCS8; 1085 rate[1] = DESC_RATEVHT1SS_MCS9; 1086 rate[2] = DESC_RATEVHT2SS_MCS0; 1087 rate[3] = DESC_RATEVHT2SS_MCS1; 1088 for (i = 0; i < 4; ++i) 1089 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1090 *rate_num = 4; 1091 break; 1092 case 0xC48: 1093 case 0xE48: 1094 case 0x1848: 1095 case 0x1A48: 1096 rate[0] = DESC_RATEVHT2SS_MCS2; 1097 rate[1] = DESC_RATEVHT2SS_MCS3; 1098 rate[2] = DESC_RATEVHT2SS_MCS4; 1099 rate[3] = DESC_RATEVHT2SS_MCS5; 1100 for (i = 0; i < 4; ++i) 1101 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1102 *rate_num = 4; 1103 break; 1104 case 0xC4C: 1105 case 0xE4C: 1106 case 0x184C: 1107 case 0x1A4C: 1108 rate[0] = DESC_RATEVHT2SS_MCS6; 1109 rate[1] = DESC_RATEVHT2SS_MCS7; 1110 rate[2] = DESC_RATEVHT2SS_MCS8; 1111 rate[3] = DESC_RATEVHT2SS_MCS9; 1112 for (i = 0; i < 4; ++i) 1113 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1114 *rate_num = 4; 1115 break; 1116 case 0xCD8: 1117 case 0xED8: 1118 case 0x18D8: 1119 case 0x1AD8: 1120 rate[0] = DESC_RATEMCS16; 1121 rate[1] = DESC_RATEMCS17; 1122 rate[2] = DESC_RATEMCS18; 1123 rate[3] = DESC_RATEMCS19; 1124 for (i = 0; i < 4; ++i) 1125 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1126 *rate_num = 4; 1127 break; 1128 case 0xCDC: 1129 case 0xEDC: 1130 case 0x18DC: 1131 case 0x1ADC: 1132 rate[0] = DESC_RATEMCS20; 1133 rate[1] = DESC_RATEMCS21; 1134 rate[2] = DESC_RATEMCS22; 1135 rate[3] = DESC_RATEMCS23; 1136 for (i = 0; i < 4; ++i) 1137 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1138 *rate_num = 4; 1139 break; 1140 case 0xCE0: 1141 case 0xEE0: 1142 case 0x18E0: 1143 case 0x1AE0: 1144 rate[0] = DESC_RATEVHT3SS_MCS0; 1145 rate[1] = DESC_RATEVHT3SS_MCS1; 1146 rate[2] = DESC_RATEVHT3SS_MCS2; 1147 rate[3] = DESC_RATEVHT3SS_MCS3; 1148 for (i = 0; i < 4; ++i) 1149 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1150 *rate_num = 4; 1151 break; 1152 case 0xCE4: 1153 case 0xEE4: 1154 case 0x18E4: 1155 case 0x1AE4: 1156 rate[0] = DESC_RATEVHT3SS_MCS4; 1157 rate[1] = DESC_RATEVHT3SS_MCS5; 1158 rate[2] = DESC_RATEVHT3SS_MCS6; 1159 rate[3] = DESC_RATEVHT3SS_MCS7; 1160 for (i = 0; i < 4; ++i) 1161 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1162 *rate_num = 4; 1163 break; 1164 case 0xCE8: 1165 case 0xEE8: 1166 case 0x18E8: 1167 case 0x1AE8: 1168 rate[0] = DESC_RATEVHT3SS_MCS8; 1169 rate[1] = DESC_RATEVHT3SS_MCS9; 1170 for (i = 0; i < 2; ++i) 1171 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1172 *rate_num = 2; 1173 break; 1174 default: 1175 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr); 1176 break; 1177 } 1178 } 1179 1180 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev, 1181 u32 band, u32 rfpath, u32 txnum, 1182 u32 regaddr, u32 bitmask, u32 data) 1183 { 1184 struct rtw_hal *hal = &rtwdev->hal; 1185 u8 rate_num = 0; 1186 u8 rate; 1187 u8 rates[RTW_RF_PATH_MAX] = {0}; 1188 s8 offset; 1189 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0}; 1190 int i; 1191 1192 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data, 1193 rates, pwr_by_rate, &rate_num); 1194 1195 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX || 1196 (band != PHY_BAND_2G && band != PHY_BAND_5G) || 1197 rate_num > RTW_RF_PATH_MAX)) 1198 return; 1199 1200 for (i = 0; i < rate_num; i++) { 1201 offset = pwr_by_rate[i]; 1202 rate = rates[i]; 1203 if (band == PHY_BAND_2G) 1204 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; 1205 else if (band == PHY_BAND_5G) 1206 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; 1207 else 1208 continue; 1209 } 1210 } 1211 1212 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 1213 { 1214 const struct phy_pg_cfg_pair *p = tbl->data; 1215 const struct phy_pg_cfg_pair *end = p + tbl->size / 6; 1216 1217 BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6); 1218 1219 for (; p < end; p++) { 1220 if (p->addr == 0xfe || p->addr == 0xffe) { 1221 msleep(50); 1222 continue; 1223 } 1224 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path, 1225 p->tx_num, p->addr, p->bitmask, 1226 p->data); 1227 } 1228 } 1229 1230 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 1231 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ 1232 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ 1233 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ 1234 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ 1235 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ 1236 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ 1237 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */ 1238 1239 static int rtw_channel_to_idx(u8 band, u8 channel) 1240 { 1241 int ch_idx; 1242 u8 n_channel; 1243 1244 if (band == PHY_BAND_2G) { 1245 ch_idx = channel - 1; 1246 n_channel = RTW_MAX_CHANNEL_NUM_2G; 1247 } else if (band == PHY_BAND_5G) { 1248 n_channel = RTW_MAX_CHANNEL_NUM_5G; 1249 for (ch_idx = 0; ch_idx < n_channel; ch_idx++) 1250 if (rtw_channel_idx_5g[ch_idx] == channel) 1251 break; 1252 } else { 1253 return -1; 1254 } 1255 1256 if (ch_idx >= n_channel) 1257 return -1; 1258 1259 return ch_idx; 1260 } 1261 1262 static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, 1263 u8 bw, u8 rs, u8 ch, s8 pwr_limit) 1264 { 1265 struct rtw_hal *hal = &rtwdev->hal; 1266 u8 max_power_index = rtwdev->chip->max_power_index; 1267 s8 ww; 1268 int ch_idx; 1269 1270 pwr_limit = clamp_t(s8, pwr_limit, 1271 -max_power_index, max_power_index); 1272 ch_idx = rtw_channel_to_idx(band, ch); 1273 1274 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || 1275 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { 1276 WARN(1, 1277 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", 1278 regd, band, bw, rs, ch_idx, pwr_limit); 1279 return; 1280 } 1281 1282 if (band == PHY_BAND_2G) { 1283 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit; 1284 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx]; 1285 ww = min_t(s8, ww, pwr_limit); 1286 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1287 } else if (band == PHY_BAND_5G) { 1288 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit; 1289 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx]; 1290 ww = min_t(s8, ww, pwr_limit); 1291 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1292 } 1293 } 1294 1295 /* cross-reference 5G power limits if values are not assigned */ 1296 static void 1297 rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd, 1298 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht) 1299 { 1300 struct rtw_hal *hal = &rtwdev->hal; 1301 u8 max_power_index = rtwdev->chip->max_power_index; 1302 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx]; 1303 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx]; 1304 1305 if (lmt_ht == lmt_vht) 1306 return; 1307 1308 if (lmt_ht == max_power_index) 1309 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht; 1310 1311 else if (lmt_vht == max_power_index) 1312 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht; 1313 } 1314 1315 /* cross-reference power limits for ht and vht */ 1316 static void 1317 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx) 1318 { 1319 u8 rs_idx, rs_ht, rs_vht; 1320 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S}, 1321 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} }; 1322 1323 for (rs_idx = 0; rs_idx < 2; rs_idx++) { 1324 rs_ht = rs_cmp[rs_idx][0]; 1325 rs_vht = rs_cmp[rs_idx][1]; 1326 1327 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht); 1328 } 1329 } 1330 1331 /* cross-reference power limits for 5G channels */ 1332 static void 1333 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw) 1334 { 1335 u8 ch_idx; 1336 1337 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++) 1338 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx); 1339 } 1340 1341 /* cross-reference power limits for 20/40M bandwidth */ 1342 static void 1343 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd) 1344 { 1345 u8 bw; 1346 1347 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++) 1348 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw); 1349 } 1350 1351 /* cross-reference power limits */ 1352 static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev) 1353 { 1354 u8 regd; 1355 1356 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1357 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd); 1358 } 1359 1360 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, 1361 const struct rtw_table *tbl) 1362 { 1363 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; 1364 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; 1365 1366 for (; p < end; p++) { 1367 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band, 1368 p->bw, p->rs, p->ch, p->txpwr_lmt); 1369 } 1370 1371 rtw_xref_txpwr_lmt(rtwdev); 1372 } 1373 1374 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1375 u32 addr, u32 data) 1376 { 1377 rtw_write8(rtwdev, addr, data); 1378 } 1379 1380 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1381 u32 addr, u32 data) 1382 { 1383 rtw_write32(rtwdev, addr, data); 1384 } 1385 1386 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1387 u32 addr, u32 data) 1388 { 1389 if (addr == 0xfe) 1390 msleep(50); 1391 else if (addr == 0xfd) 1392 mdelay(5); 1393 else if (addr == 0xfc) 1394 mdelay(1); 1395 else if (addr == 0xfb) 1396 usleep_range(50, 60); 1397 else if (addr == 0xfa) 1398 udelay(5); 1399 else if (addr == 0xf9) 1400 udelay(1); 1401 else 1402 rtw_write32(rtwdev, addr, data); 1403 } 1404 1405 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1406 u32 addr, u32 data) 1407 { 1408 if (addr == 0xffe) { 1409 msleep(50); 1410 } else if (addr == 0xfe) { 1411 usleep_range(100, 110); 1412 } else { 1413 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); 1414 udelay(1); 1415 } 1416 } 1417 1418 static void rtw_load_rfk_table(struct rtw_dev *rtwdev) 1419 { 1420 struct rtw_chip_info *chip = rtwdev->chip; 1421 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; 1422 1423 if (!chip->rfk_init_tbl) 1424 return; 1425 1426 rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1); 1427 rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1); 1428 rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1); 1429 rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1); 1430 rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0); 1431 1432 rtw_load_table(rtwdev, chip->rfk_init_tbl); 1433 1434 dpk_info->is_dpk_pwr_on = 1; 1435 } 1436 1437 void rtw_phy_load_tables(struct rtw_dev *rtwdev) 1438 { 1439 struct rtw_chip_info *chip = rtwdev->chip; 1440 u8 rf_path; 1441 1442 rtw_load_table(rtwdev, chip->mac_tbl); 1443 rtw_load_table(rtwdev, chip->bb_tbl); 1444 rtw_load_table(rtwdev, chip->agc_tbl); 1445 rtw_load_rfk_table(rtwdev); 1446 1447 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { 1448 const struct rtw_table *tbl; 1449 1450 tbl = chip->rf_tbl[rf_path]; 1451 rtw_load_table(rtwdev, tbl); 1452 } 1453 } 1454 1455 static u8 rtw_get_channel_group(u8 channel) 1456 { 1457 switch (channel) { 1458 default: 1459 WARN_ON(1); 1460 /* fall through */ 1461 case 1: 1462 case 2: 1463 case 36: 1464 case 38: 1465 case 40: 1466 case 42: 1467 return 0; 1468 case 3: 1469 case 4: 1470 case 5: 1471 case 44: 1472 case 46: 1473 case 48: 1474 case 50: 1475 return 1; 1476 case 6: 1477 case 7: 1478 case 8: 1479 case 52: 1480 case 54: 1481 case 56: 1482 case 58: 1483 return 2; 1484 case 9: 1485 case 10: 1486 case 11: 1487 case 60: 1488 case 62: 1489 case 64: 1490 return 3; 1491 case 12: 1492 case 13: 1493 case 100: 1494 case 102: 1495 case 104: 1496 case 106: 1497 return 4; 1498 case 14: 1499 case 108: 1500 case 110: 1501 case 112: 1502 case 114: 1503 return 5; 1504 case 116: 1505 case 118: 1506 case 120: 1507 case 122: 1508 return 6; 1509 case 124: 1510 case 126: 1511 case 128: 1512 case 130: 1513 return 7; 1514 case 132: 1515 case 134: 1516 case 136: 1517 case 138: 1518 return 8; 1519 case 140: 1520 case 142: 1521 case 144: 1522 return 9; 1523 case 149: 1524 case 151: 1525 case 153: 1526 case 155: 1527 return 10; 1528 case 157: 1529 case 159: 1530 case 161: 1531 return 11; 1532 case 165: 1533 case 167: 1534 case 169: 1535 case 171: 1536 return 12; 1537 case 173: 1538 case 175: 1539 case 177: 1540 return 13; 1541 } 1542 } 1543 1544 static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate) 1545 { 1546 struct rtw_chip_info *chip = rtwdev->chip; 1547 s8 dpd_diff = 0; 1548 1549 if (!chip->en_dis_dpd) 1550 return 0; 1551 1552 #define RTW_DPD_RATE_CHECK(_rate) \ 1553 case DESC_RATE ## _rate: \ 1554 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \ 1555 dpd_diff = -6 * chip->txgi_factor; \ 1556 break 1557 1558 switch (rate) { 1559 RTW_DPD_RATE_CHECK(6M); 1560 RTW_DPD_RATE_CHECK(9M); 1561 RTW_DPD_RATE_CHECK(MCS0); 1562 RTW_DPD_RATE_CHECK(MCS1); 1563 RTW_DPD_RATE_CHECK(MCS8); 1564 RTW_DPD_RATE_CHECK(MCS9); 1565 RTW_DPD_RATE_CHECK(VHT1SS_MCS0); 1566 RTW_DPD_RATE_CHECK(VHT1SS_MCS1); 1567 RTW_DPD_RATE_CHECK(VHT2SS_MCS0); 1568 RTW_DPD_RATE_CHECK(VHT2SS_MCS1); 1569 } 1570 #undef RTW_DPD_RATE_CHECK 1571 1572 return dpd_diff; 1573 } 1574 1575 static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, 1576 struct rtw_2g_txpwr_idx *pwr_idx_2g, 1577 enum rtw_bandwidth bandwidth, 1578 u8 rate, u8 group) 1579 { 1580 struct rtw_chip_info *chip = rtwdev->chip; 1581 u8 tx_power; 1582 bool mcs_rate; 1583 bool above_2ss; 1584 u8 factor = chip->txgi_factor; 1585 1586 if (rate <= DESC_RATE11M) 1587 tx_power = pwr_idx_2g->cck_base[group]; 1588 else 1589 tx_power = pwr_idx_2g->bw40_base[group]; 1590 1591 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1592 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; 1593 1594 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1595 (rate >= DESC_RATEVHT1SS_MCS0 && 1596 rate <= DESC_RATEVHT2SS_MCS9); 1597 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1598 (rate >= DESC_RATEVHT2SS_MCS0); 1599 1600 if (!mcs_rate) 1601 return tx_power; 1602 1603 switch (bandwidth) { 1604 default: 1605 WARN_ON(1); 1606 /* fall through */ 1607 case RTW_CHANNEL_WIDTH_20: 1608 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; 1609 if (above_2ss) 1610 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; 1611 break; 1612 case RTW_CHANNEL_WIDTH_40: 1613 /* bw40 is the base power */ 1614 if (above_2ss) 1615 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; 1616 break; 1617 } 1618 1619 return tx_power; 1620 } 1621 1622 static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, 1623 struct rtw_5g_txpwr_idx *pwr_idx_5g, 1624 enum rtw_bandwidth bandwidth, 1625 u8 rate, u8 group) 1626 { 1627 struct rtw_chip_info *chip = rtwdev->chip; 1628 u8 tx_power; 1629 u8 upper, lower; 1630 bool mcs_rate; 1631 bool above_2ss; 1632 u8 factor = chip->txgi_factor; 1633 1634 tx_power = pwr_idx_5g->bw40_base[group]; 1635 1636 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1637 (rate >= DESC_RATEVHT1SS_MCS0 && 1638 rate <= DESC_RATEVHT2SS_MCS9); 1639 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1640 (rate >= DESC_RATEVHT2SS_MCS0); 1641 1642 if (!mcs_rate) { 1643 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; 1644 return tx_power; 1645 } 1646 1647 switch (bandwidth) { 1648 default: 1649 WARN_ON(1); 1650 /* fall through */ 1651 case RTW_CHANNEL_WIDTH_20: 1652 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; 1653 if (above_2ss) 1654 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; 1655 break; 1656 case RTW_CHANNEL_WIDTH_40: 1657 /* bw40 is the base power */ 1658 if (above_2ss) 1659 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; 1660 break; 1661 case RTW_CHANNEL_WIDTH_80: 1662 /* the base idx of bw80 is the average of bw40+/bw40- */ 1663 lower = pwr_idx_5g->bw40_base[group]; 1664 upper = pwr_idx_5g->bw40_base[group + 1]; 1665 1666 tx_power = (lower + upper) / 2; 1667 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; 1668 if (above_2ss) 1669 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; 1670 break; 1671 } 1672 1673 return tx_power; 1674 } 1675 1676 static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1677 enum rtw_bandwidth bw, u8 rf_path, 1678 u8 rate, u8 channel, u8 regd) 1679 { 1680 struct rtw_hal *hal = &rtwdev->hal; 1681 u8 *cch_by_bw = hal->cch_by_bw; 1682 s8 power_limit = (s8)rtwdev->chip->max_power_index; 1683 u8 rs; 1684 int ch_idx; 1685 u8 cur_bw, cur_ch; 1686 s8 cur_lmt; 1687 1688 if (regd > RTW_REGD_WW) 1689 return power_limit; 1690 1691 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) 1692 rs = RTW_RATE_SECTION_CCK; 1693 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1694 rs = RTW_RATE_SECTION_OFDM; 1695 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) 1696 rs = RTW_RATE_SECTION_HT_1S; 1697 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) 1698 rs = RTW_RATE_SECTION_HT_2S; 1699 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) 1700 rs = RTW_RATE_SECTION_VHT_1S; 1701 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) 1702 rs = RTW_RATE_SECTION_VHT_2S; 1703 else 1704 goto err; 1705 1706 /* only 20M BW with cck and ofdm */ 1707 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM) 1708 bw = RTW_CHANNEL_WIDTH_20; 1709 1710 /* only 20/40M BW with ht */ 1711 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S) 1712 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40); 1713 1714 /* select min power limit among [20M BW ~ current BW] */ 1715 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) { 1716 cur_ch = cch_by_bw[cur_bw]; 1717 1718 ch_idx = rtw_channel_to_idx(band, cur_ch); 1719 if (ch_idx < 0) 1720 goto err; 1721 1722 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ? 1723 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] : 1724 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx]; 1725 1726 power_limit = min_t(s8, cur_lmt, power_limit); 1727 } 1728 1729 return power_limit; 1730 1731 err: 1732 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n", 1733 band, bw, rf_path, rate, channel); 1734 return (s8)rtwdev->chip->max_power_index; 1735 } 1736 1737 void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, 1738 u8 ch, u8 regd, struct rtw_power_params *pwr_param) 1739 { 1740 struct rtw_hal *hal = &rtwdev->hal; 1741 struct rtw_txpwr_idx *pwr_idx; 1742 u8 group, band; 1743 u8 *base = &pwr_param->pwr_base; 1744 s8 *offset = &pwr_param->pwr_offset; 1745 s8 *limit = &pwr_param->pwr_limit; 1746 1747 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; 1748 group = rtw_get_channel_group(ch); 1749 1750 /* base power index for 2.4G/5G */ 1751 if (ch <= 14) { 1752 band = PHY_BAND_2G; 1753 *base = rtw_phy_get_2g_tx_power_index(rtwdev, 1754 &pwr_idx->pwr_idx_2g, 1755 bw, rate, group); 1756 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate]; 1757 } else { 1758 band = PHY_BAND_5G; 1759 *base = rtw_phy_get_5g_tx_power_index(rtwdev, 1760 &pwr_idx->pwr_idx_5g, 1761 bw, rate, group); 1762 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate]; 1763 } 1764 1765 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path, 1766 rate, ch, regd); 1767 } 1768 1769 u8 1770 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, 1771 enum rtw_bandwidth bandwidth, u8 channel, u8 regd) 1772 { 1773 struct rtw_power_params pwr_param = {0}; 1774 u8 tx_power; 1775 s8 offset; 1776 1777 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth, 1778 channel, regd, &pwr_param); 1779 1780 tx_power = pwr_param.pwr_base; 1781 offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit); 1782 1783 if (rtwdev->chip->en_dis_dpd) 1784 offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate); 1785 1786 tx_power += offset; 1787 1788 if (tx_power > rtwdev->chip->max_power_index) 1789 tx_power = rtwdev->chip->max_power_index; 1790 1791 return tx_power; 1792 } 1793 1794 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, 1795 u8 ch, u8 path, u8 rs) 1796 { 1797 struct rtw_hal *hal = &rtwdev->hal; 1798 u8 regd = rtwdev->regd.txpwr_regd; 1799 u8 *rates; 1800 u8 size; 1801 u8 rate; 1802 u8 pwr_idx; 1803 u8 bw; 1804 int i; 1805 1806 if (rs >= RTW_RATE_SECTION_MAX) 1807 return; 1808 1809 rates = rtw_rate_section[rs]; 1810 size = rtw_rate_size[rs]; 1811 bw = hal->current_band_width; 1812 for (i = 0; i < size; i++) { 1813 rate = rates[i]; 1814 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate, 1815 bw, ch, regd); 1816 hal->tx_pwr_tbl[path][rate] = pwr_idx; 1817 } 1818 } 1819 1820 /* set tx power level by path for each rates, note that the order of the rates 1821 * are *very* important, bacause 8822B/8821C combines every four bytes of tx 1822 * power index into a four-byte power index register, and calls set_tx_agc to 1823 * write these values into hardware 1824 */ 1825 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, 1826 u8 ch, u8 path) 1827 { 1828 struct rtw_hal *hal = &rtwdev->hal; 1829 u8 rs; 1830 1831 /* do not need cck rates if we are not in 2.4G */ 1832 if (hal->current_band_type == RTW_BAND_2G) 1833 rs = RTW_RATE_SECTION_CCK; 1834 else 1835 rs = RTW_RATE_SECTION_OFDM; 1836 1837 for (; rs < RTW_RATE_SECTION_MAX; rs++) 1838 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); 1839 } 1840 1841 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) 1842 { 1843 struct rtw_chip_info *chip = rtwdev->chip; 1844 struct rtw_hal *hal = &rtwdev->hal; 1845 u8 path; 1846 1847 mutex_lock(&hal->tx_power_mutex); 1848 1849 for (path = 0; path < hal->rf_path_num; path++) 1850 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path); 1851 1852 chip->ops->set_tx_power_index(rtwdev); 1853 mutex_unlock(&hal->tx_power_mutex); 1854 } 1855 1856 static void 1857 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, 1858 u8 rs, u8 size, u8 *rates) 1859 { 1860 u8 rate; 1861 u8 base_idx, rate_idx; 1862 s8 base_2g, base_5g; 1863 1864 if (rs >= RTW_RATE_SECTION_VHT_1S) 1865 base_idx = rates[size - 3]; 1866 else 1867 base_idx = rates[size - 1]; 1868 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx]; 1869 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx]; 1870 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g; 1871 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g; 1872 for (rate = 0; rate < size; rate++) { 1873 rate_idx = rates[rate]; 1874 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g; 1875 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g; 1876 } 1877 } 1878 1879 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) 1880 { 1881 u8 path; 1882 1883 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1884 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1885 RTW_RATE_SECTION_CCK, 1886 rtw_cck_size, rtw_cck_rates); 1887 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1888 RTW_RATE_SECTION_OFDM, 1889 rtw_ofdm_size, rtw_ofdm_rates); 1890 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1891 RTW_RATE_SECTION_HT_1S, 1892 rtw_ht_1s_size, rtw_ht_1s_rates); 1893 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1894 RTW_RATE_SECTION_HT_2S, 1895 rtw_ht_2s_size, rtw_ht_2s_rates); 1896 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1897 RTW_RATE_SECTION_VHT_1S, 1898 rtw_vht_1s_size, rtw_vht_1s_rates); 1899 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1900 RTW_RATE_SECTION_VHT_2S, 1901 rtw_vht_2s_size, rtw_vht_2s_rates); 1902 } 1903 } 1904 1905 static void 1906 __rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1907 { 1908 s8 base; 1909 u8 ch; 1910 1911 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) { 1912 base = hal->tx_pwr_by_rate_base_2g[0][rs]; 1913 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base; 1914 } 1915 1916 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) { 1917 base = hal->tx_pwr_by_rate_base_5g[0][rs]; 1918 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base; 1919 } 1920 } 1921 1922 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) 1923 { 1924 u8 regd, bw, rs; 1925 1926 /* default at channel 1 */ 1927 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1; 1928 1929 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1930 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1931 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1932 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs); 1933 } 1934 1935 static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev, 1936 u8 regd, u8 bw, u8 rs) 1937 { 1938 struct rtw_hal *hal = &rtwdev->hal; 1939 s8 max_power_index = (s8)rtwdev->chip->max_power_index; 1940 u8 ch; 1941 1942 /* 2.4G channels */ 1943 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) 1944 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index; 1945 1946 /* 5G channels */ 1947 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) 1948 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index; 1949 } 1950 1951 void rtw_phy_init_tx_power(struct rtw_dev *rtwdev) 1952 { 1953 struct rtw_hal *hal = &rtwdev->hal; 1954 u8 regd, path, rate, rs, bw; 1955 1956 /* init tx power by rate offset */ 1957 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1958 for (rate = 0; rate < DESC_RATE_MAX; rate++) { 1959 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0; 1960 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0; 1961 } 1962 } 1963 1964 /* init tx power limit */ 1965 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1966 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1967 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1968 rtw_phy_init_tx_power_limit(rtwdev, regd, bw, 1969 rs); 1970 } 1971