1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/bcd.h> 6 7 #include "main.h" 8 #include "reg.h" 9 #include "fw.h" 10 #include "phy.h" 11 #include "debug.h" 12 13 struct phy_cfg_pair { 14 u32 addr; 15 u32 data; 16 }; 17 18 union phy_table_tile { 19 struct rtw_phy_cond cond; 20 struct phy_cfg_pair cfg; 21 }; 22 23 struct phy_pg_cfg_pair { 24 u32 band; 25 u32 rf_path; 26 u32 tx_num; 27 u32 addr; 28 u32 bitmask; 29 u32 data; 30 }; 31 32 static const u32 db_invert_table[12][8] = { 33 {10, 13, 16, 20, 34 25, 32, 40, 50}, 35 {64, 80, 101, 128, 36 160, 201, 256, 318}, 37 {401, 505, 635, 800, 38 1007, 1268, 1596, 2010}, 39 {316, 398, 501, 631, 40 794, 1000, 1259, 1585}, 41 {1995, 2512, 3162, 3981, 42 5012, 6310, 7943, 10000}, 43 {12589, 15849, 19953, 25119, 44 31623, 39811, 50119, 63098}, 45 {79433, 100000, 125893, 158489, 46 199526, 251189, 316228, 398107}, 47 {501187, 630957, 794328, 1000000, 48 1258925, 1584893, 1995262, 2511886}, 49 {3162278, 3981072, 5011872, 6309573, 50 7943282, 1000000, 12589254, 15848932}, 51 {19952623, 25118864, 31622777, 39810717, 52 50118723, 63095734, 79432823, 100000000}, 53 {125892541, 158489319, 199526232, 251188643, 54 316227766, 398107171, 501187234, 630957345}, 55 {794328235, 1000000000, 1258925412, 1584893192, 56 1995262315, 2511886432U, 3162277660U, 3981071706U} 57 }; 58 59 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; 60 u8 rtw_ofdm_rates[] = { 61 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, 62 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, 63 DESC_RATE48M, DESC_RATE54M 64 }; 65 u8 rtw_ht_1s_rates[] = { 66 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, 67 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, 68 DESC_RATEMCS6, DESC_RATEMCS7 69 }; 70 u8 rtw_ht_2s_rates[] = { 71 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, 72 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, 73 DESC_RATEMCS14, DESC_RATEMCS15 74 }; 75 u8 rtw_vht_1s_rates[] = { 76 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, 77 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, 78 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, 79 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, 80 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 81 }; 82 u8 rtw_vht_2s_rates[] = { 83 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, 84 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, 85 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, 86 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, 87 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 88 }; 89 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { 90 rtw_cck_rates, rtw_ofdm_rates, 91 rtw_ht_1s_rates, rtw_ht_2s_rates, 92 rtw_vht_1s_rates, rtw_vht_2s_rates 93 }; 94 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { 95 ARRAY_SIZE(rtw_cck_rates), 96 ARRAY_SIZE(rtw_ofdm_rates), 97 ARRAY_SIZE(rtw_ht_1s_rates), 98 ARRAY_SIZE(rtw_ht_2s_rates), 99 ARRAY_SIZE(rtw_vht_1s_rates), 100 ARRAY_SIZE(rtw_vht_2s_rates) 101 }; 102 static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); 103 static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); 104 static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); 105 static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); 106 static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); 107 static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); 108 109 enum rtw_phy_band_type { 110 PHY_BAND_2G = 0, 111 PHY_BAND_5G = 1, 112 }; 113 114 void rtw_phy_init(struct rtw_dev *rtwdev) 115 { 116 struct rtw_chip_info *chip = rtwdev->chip; 117 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 118 u32 addr, mask; 119 120 dm_info->fa_history[3] = 0; 121 dm_info->fa_history[2] = 0; 122 dm_info->fa_history[1] = 0; 123 dm_info->fa_history[0] = 0; 124 dm_info->igi_bitmap = 0; 125 dm_info->igi_history[3] = 0; 126 dm_info->igi_history[2] = 0; 127 dm_info->igi_history[1] = 0; 128 129 addr = chip->dig[0].addr; 130 mask = chip->dig[0].mask; 131 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); 132 } 133 134 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) 135 { 136 struct rtw_chip_info *chip = rtwdev->chip; 137 struct rtw_hal *hal = &rtwdev->hal; 138 u32 addr, mask; 139 u8 path; 140 141 for (path = 0; path < hal->rf_path_num; path++) { 142 addr = chip->dig[path].addr; 143 mask = chip->dig[path].mask; 144 rtw_write32_mask(rtwdev, addr, mask, igi); 145 } 146 } 147 148 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) 149 { 150 struct rtw_chip_info *chip = rtwdev->chip; 151 152 chip->ops->false_alarm_statistics(rtwdev); 153 } 154 155 #define RA_FLOOR_TABLE_SIZE 7 156 #define RA_FLOOR_UP_GAP 3 157 158 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) 159 { 160 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; 161 u8 new_level = 0; 162 int i; 163 164 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) 165 if (i >= old_level) 166 table[i] += RA_FLOOR_UP_GAP; 167 168 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 169 if (rssi < table[i]) { 170 new_level = i; 171 break; 172 } 173 } 174 175 return new_level; 176 } 177 178 struct rtw_phy_stat_iter_data { 179 struct rtw_dev *rtwdev; 180 u8 min_rssi; 181 }; 182 183 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) 184 { 185 struct rtw_phy_stat_iter_data *iter_data = data; 186 struct rtw_dev *rtwdev = iter_data->rtwdev; 187 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 188 u8 rssi; 189 190 rssi = ewma_rssi_read(&si->avg_rssi); 191 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); 192 193 rtw_fw_send_rssi_info(rtwdev, si); 194 195 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); 196 } 197 198 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) 199 { 200 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 201 struct rtw_phy_stat_iter_data data = {}; 202 203 data.rtwdev = rtwdev; 204 data.min_rssi = U8_MAX; 205 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); 206 207 dm_info->pre_min_rssi = dm_info->min_rssi; 208 dm_info->min_rssi = data.min_rssi; 209 } 210 211 static void rtw_phy_statistics(struct rtw_dev *rtwdev) 212 { 213 rtw_phy_stat_rssi(rtwdev); 214 rtw_phy_stat_false_alarm(rtwdev); 215 } 216 217 #define DIG_PERF_FA_TH_LOW 250 218 #define DIG_PERF_FA_TH_HIGH 500 219 #define DIG_PERF_FA_TH_EXTRA_HIGH 750 220 #define DIG_PERF_MAX 0x5a 221 #define DIG_PERF_MID 0x40 222 #define DIG_CVRG_FA_TH_LOW 2000 223 #define DIG_CVRG_FA_TH_HIGH 4000 224 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 225 #define DIG_CVRG_MAX 0x2a 226 #define DIG_CVRG_MID 0x26 227 #define DIG_CVRG_MIN 0x1c 228 #define DIG_RSSI_GAIN_OFFSET 15 229 230 static bool 231 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) 232 { 233 u16 fa_lo = DIG_PERF_FA_TH_LOW; 234 u16 fa_hi = DIG_PERF_FA_TH_HIGH; 235 u16 *fa_history; 236 u8 *igi_history; 237 u8 damping_rssi; 238 u8 min_rssi; 239 u8 diff; 240 u8 igi_bitmap; 241 bool damping = false; 242 243 min_rssi = dm_info->min_rssi; 244 if (dm_info->damping) { 245 damping_rssi = dm_info->damping_rssi; 246 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : 247 damping_rssi - min_rssi; 248 if (diff > 3 || dm_info->damping_cnt++ > 20) { 249 dm_info->damping = false; 250 return false; 251 } 252 253 return true; 254 } 255 256 igi_history = dm_info->igi_history; 257 fa_history = dm_info->fa_history; 258 igi_bitmap = dm_info->igi_bitmap & 0xf; 259 switch (igi_bitmap) { 260 case 5: 261 /* down -> up -> down -> up */ 262 if (igi_history[0] > igi_history[1] && 263 igi_history[2] > igi_history[3] && 264 igi_history[0] - igi_history[1] >= 2 && 265 igi_history[2] - igi_history[3] >= 2 && 266 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 267 fa_history[2] > fa_hi && fa_history[3] < fa_lo) 268 damping = true; 269 break; 270 case 9: 271 /* up -> down -> down -> up */ 272 if (igi_history[0] > igi_history[1] && 273 igi_history[3] > igi_history[2] && 274 igi_history[0] - igi_history[1] >= 4 && 275 igi_history[3] - igi_history[2] >= 2 && 276 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 277 fa_history[2] < fa_lo && fa_history[3] > fa_hi) 278 damping = true; 279 break; 280 default: 281 return false; 282 } 283 284 if (damping) { 285 dm_info->damping = true; 286 dm_info->damping_cnt = 0; 287 dm_info->damping_rssi = min_rssi; 288 } 289 290 return damping; 291 } 292 293 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, 294 u8 *upper, u8 *lower, bool linked) 295 { 296 u8 dig_max, dig_min, dig_mid; 297 u8 min_rssi; 298 299 if (linked) { 300 dig_max = DIG_PERF_MAX; 301 dig_mid = DIG_PERF_MID; 302 /* 22B=0x1c, 22C=0x20 */ 303 dig_min = 0x1c; 304 min_rssi = max_t(u8, dm_info->min_rssi, dig_min); 305 } else { 306 dig_max = DIG_CVRG_MAX; 307 dig_mid = DIG_CVRG_MID; 308 dig_min = DIG_CVRG_MIN; 309 min_rssi = dig_min; 310 } 311 312 /* DIG MAX should be bounded by minimum RSSI with offset +15 */ 313 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); 314 315 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); 316 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); 317 } 318 319 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, 320 u16 *fa_th, u8 *step, bool linked) 321 { 322 u8 min_rssi, pre_min_rssi; 323 324 min_rssi = dm_info->min_rssi; 325 pre_min_rssi = dm_info->pre_min_rssi; 326 step[0] = 4; 327 step[1] = 3; 328 step[2] = 2; 329 330 if (linked) { 331 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; 332 fa_th[1] = DIG_PERF_FA_TH_HIGH; 333 fa_th[2] = DIG_PERF_FA_TH_LOW; 334 if (pre_min_rssi > min_rssi) { 335 step[0] = 6; 336 step[1] = 4; 337 step[2] = 2; 338 } 339 } else { 340 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; 341 fa_th[1] = DIG_CVRG_FA_TH_HIGH; 342 fa_th[2] = DIG_CVRG_FA_TH_LOW; 343 } 344 } 345 346 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) 347 { 348 u8 *igi_history; 349 u16 *fa_history; 350 u8 igi_bitmap; 351 bool up; 352 353 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; 354 igi_history = dm_info->igi_history; 355 fa_history = dm_info->fa_history; 356 357 up = igi > igi_history[0]; 358 igi_bitmap |= up; 359 360 igi_history[3] = igi_history[2]; 361 igi_history[2] = igi_history[1]; 362 igi_history[1] = igi_history[0]; 363 igi_history[0] = igi; 364 365 fa_history[3] = fa_history[2]; 366 fa_history[2] = fa_history[1]; 367 fa_history[1] = fa_history[0]; 368 fa_history[0] = fa; 369 370 dm_info->igi_bitmap = igi_bitmap; 371 } 372 373 static void rtw_phy_dig(struct rtw_dev *rtwdev) 374 { 375 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 376 u8 upper_bound, lower_bound; 377 u8 pre_igi, cur_igi; 378 u16 fa_th[3], fa_cnt; 379 u8 level; 380 u8 step[3]; 381 bool linked; 382 383 if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE)) 384 return; 385 386 if (rtw_phy_dig_check_damping(dm_info)) 387 return; 388 389 linked = !!rtwdev->sta_cnt; 390 391 fa_cnt = dm_info->total_fa_cnt; 392 pre_igi = dm_info->igi_history[0]; 393 394 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); 395 396 /* test the false alarm count from the highest threshold level first, 397 * and increase it by corresponding step size 398 * 399 * note that the step size is offset by -2, compensate it afterall 400 */ 401 cur_igi = pre_igi; 402 for (level = 0; level < 3; level++) { 403 if (fa_cnt > fa_th[level]) { 404 cur_igi += step[level]; 405 break; 406 } 407 } 408 cur_igi -= 2; 409 410 /* calculate the upper/lower bound by the minimum rssi we have among 411 * the peers connected with us, meanwhile make sure the igi value does 412 * not beyond the hardware limitation 413 */ 414 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); 415 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); 416 417 /* record current igi value and false alarm statistics for further 418 * damping checks, and record the trend of igi values 419 */ 420 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); 421 422 if (cur_igi != pre_igi) 423 rtw_phy_dig_write(rtwdev, cur_igi); 424 } 425 426 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) 427 { 428 struct rtw_dev *rtwdev = data; 429 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 430 431 rtw_update_sta_info(rtwdev, si); 432 } 433 434 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) 435 { 436 if (rtwdev->watch_dog_cnt & 0x3) 437 return; 438 439 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); 440 } 441 442 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) 443 { 444 /* for further calculation */ 445 rtw_phy_statistics(rtwdev); 446 rtw_phy_dig(rtwdev); 447 rtw_phy_ra_info_update(rtwdev); 448 } 449 450 #define FRAC_BITS 3 451 452 static u8 rtw_phy_power_2_db(s8 power) 453 { 454 if (power <= -100 || power >= 20) 455 return 0; 456 else if (power >= 0) 457 return 100; 458 else 459 return 100 + power; 460 } 461 462 static u64 rtw_phy_db_2_linear(u8 power_db) 463 { 464 u8 i, j; 465 u64 linear; 466 467 if (power_db > 96) 468 power_db = 96; 469 else if (power_db < 1) 470 return 1; 471 472 /* 1dB ~ 96dB */ 473 i = (power_db - 1) >> 3; 474 j = (power_db - 1) - (i << 3); 475 476 linear = db_invert_table[i][j]; 477 linear = i > 2 ? linear << FRAC_BITS : linear; 478 479 return linear; 480 } 481 482 static u8 rtw_phy_linear_2_db(u64 linear) 483 { 484 u8 i; 485 u8 j; 486 u32 dB; 487 488 if (linear >= db_invert_table[11][7]) 489 return 96; /* maximum 96 dB */ 490 491 for (i = 0; i < 12; i++) { 492 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) 493 break; 494 else if (i > 2 && linear <= db_invert_table[i][7]) 495 break; 496 } 497 498 for (j = 0; j < 8; j++) { 499 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) 500 break; 501 else if (i > 2 && linear <= db_invert_table[i][j]) 502 break; 503 } 504 505 if (j == 0 && i == 0) 506 goto end; 507 508 if (j == 0) { 509 if (i != 3) { 510 if (db_invert_table[i][0] - linear > 511 linear - db_invert_table[i - 1][7]) { 512 i = i - 1; 513 j = 7; 514 } 515 } else { 516 if (db_invert_table[3][0] - linear > 517 linear - db_invert_table[2][7]) { 518 i = 2; 519 j = 7; 520 } 521 } 522 } else { 523 if (db_invert_table[i][j] - linear > 524 linear - db_invert_table[i][j - 1]) { 525 j = j - 1; 526 } 527 } 528 end: 529 dB = (i << 3) + j + 1; 530 531 return dB; 532 } 533 534 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) 535 { 536 s8 power; 537 u8 power_db; 538 u64 linear; 539 u64 sum = 0; 540 u8 path; 541 542 for (path = 0; path < path_num; path++) { 543 power = rf_power[path]; 544 power_db = rtw_phy_power_2_db(power); 545 linear = rtw_phy_db_2_linear(power_db); 546 sum += linear; 547 } 548 549 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; 550 switch (path_num) { 551 case 2: 552 sum >>= 1; 553 break; 554 case 3: 555 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; 556 break; 557 case 4: 558 sum >>= 2; 559 break; 560 default: 561 break; 562 } 563 564 return rtw_phy_linear_2_db(sum); 565 } 566 567 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 568 u32 addr, u32 mask) 569 { 570 struct rtw_hal *hal = &rtwdev->hal; 571 struct rtw_chip_info *chip = rtwdev->chip; 572 const u32 *base_addr = chip->rf_base_addr; 573 u32 val, direct_addr; 574 575 if (rf_path >= hal->rf_path_num) { 576 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 577 return INV_RF_DATA; 578 } 579 580 addr &= 0xff; 581 direct_addr = base_addr[rf_path] + (addr << 2); 582 mask &= RFREG_MASK; 583 584 val = rtw_read32_mask(rtwdev, direct_addr, mask); 585 586 return val; 587 } 588 589 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 590 u32 addr, u32 mask, u32 data) 591 { 592 struct rtw_hal *hal = &rtwdev->hal; 593 struct rtw_chip_info *chip = rtwdev->chip; 594 u32 *sipi_addr = chip->rf_sipi_addr; 595 u32 data_and_addr; 596 u32 old_data = 0; 597 u32 shift; 598 599 if (rf_path >= hal->rf_path_num) { 600 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 601 return false; 602 } 603 604 addr &= 0xff; 605 mask &= RFREG_MASK; 606 607 if (mask != RFREG_MASK) { 608 old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK); 609 610 if (old_data == INV_RF_DATA) { 611 rtw_err(rtwdev, "Write fail, rf is disabled\n"); 612 return false; 613 } 614 615 shift = __ffs(mask); 616 data = ((old_data) & (~mask)) | (data << shift); 617 } 618 619 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; 620 621 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); 622 623 udelay(13); 624 625 return true; 626 } 627 628 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 629 u32 addr, u32 mask, u32 data) 630 { 631 struct rtw_hal *hal = &rtwdev->hal; 632 struct rtw_chip_info *chip = rtwdev->chip; 633 const u32 *base_addr = chip->rf_base_addr; 634 u32 direct_addr; 635 636 if (rf_path >= hal->rf_path_num) { 637 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 638 return false; 639 } 640 641 addr &= 0xff; 642 direct_addr = base_addr[rf_path] + (addr << 2); 643 mask &= RFREG_MASK; 644 645 if (addr == RF_CFGCH) { 646 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI); 647 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI); 648 } 649 650 rtw_write32_mask(rtwdev, direct_addr, mask, data); 651 652 udelay(1); 653 654 if (addr == RF_CFGCH) { 655 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI); 656 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI); 657 } 658 659 return true; 660 } 661 662 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 663 u32 addr, u32 mask, u32 data) 664 { 665 if (addr != 0x00) 666 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); 667 668 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); 669 } 670 671 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) 672 { 673 struct rtw_hal *hal = &rtwdev->hal; 674 struct rtw_efuse *efuse = &rtwdev->efuse; 675 struct rtw_phy_cond cond = {0}; 676 677 cond.cut = hal->cut_version ? hal->cut_version : 15; 678 cond.pkg = pkg ? pkg : 15; 679 cond.plat = 0x04; 680 cond.rfe = efuse->rfe_option; 681 682 switch (rtw_hci_type(rtwdev)) { 683 case RTW_HCI_TYPE_USB: 684 cond.intf = INTF_USB; 685 break; 686 case RTW_HCI_TYPE_SDIO: 687 cond.intf = INTF_SDIO; 688 break; 689 case RTW_HCI_TYPE_PCIE: 690 default: 691 cond.intf = INTF_PCIE; 692 break; 693 } 694 695 hal->phy_cond = cond; 696 697 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); 698 } 699 700 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) 701 { 702 struct rtw_hal *hal = &rtwdev->hal; 703 struct rtw_phy_cond drv_cond = hal->phy_cond; 704 705 if (cond.cut && cond.cut != drv_cond.cut) 706 return false; 707 708 if (cond.pkg && cond.pkg != drv_cond.pkg) 709 return false; 710 711 if (cond.intf && cond.intf != drv_cond.intf) 712 return false; 713 714 if (cond.rfe != drv_cond.rfe) 715 return false; 716 717 return true; 718 } 719 720 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 721 { 722 const union phy_table_tile *p = tbl->data; 723 const union phy_table_tile *end = p + tbl->size / 2; 724 struct rtw_phy_cond pos_cond = {0}; 725 bool is_matched = true, is_skipped = false; 726 727 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); 728 729 for (; p < end; p++) { 730 if (p->cond.pos) { 731 switch (p->cond.branch) { 732 case BRANCH_ENDIF: 733 is_matched = true; 734 is_skipped = false; 735 break; 736 case BRANCH_ELSE: 737 is_matched = is_skipped ? false : true; 738 break; 739 case BRANCH_IF: 740 case BRANCH_ELIF: 741 default: 742 pos_cond = p->cond; 743 break; 744 } 745 } else if (p->cond.neg) { 746 if (!is_skipped) { 747 if (check_positive(rtwdev, pos_cond)) { 748 is_matched = true; 749 is_skipped = true; 750 } else { 751 is_matched = false; 752 is_skipped = false; 753 } 754 } else { 755 is_matched = false; 756 } 757 } else if (is_matched) { 758 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); 759 } 760 } 761 } 762 763 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) 764 765 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) 766 { 767 if (rtwdev->chip->is_pwr_by_rate_dec) 768 return bcd_to_dec_pwr_by_rate(hex, i); 769 770 return (hex >> (i * 8)) & 0xFF; 771 } 772 773 static void 774 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, 775 u32 addr, u32 mask, u32 val, u8 *rate, 776 u8 *pwr_by_rate, u8 *rate_num) 777 { 778 int i; 779 780 switch (addr) { 781 case 0xE00: 782 case 0x830: 783 rate[0] = DESC_RATE6M; 784 rate[1] = DESC_RATE9M; 785 rate[2] = DESC_RATE12M; 786 rate[3] = DESC_RATE18M; 787 for (i = 0; i < 4; ++i) 788 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 789 *rate_num = 4; 790 break; 791 case 0xE04: 792 case 0x834: 793 rate[0] = DESC_RATE24M; 794 rate[1] = DESC_RATE36M; 795 rate[2] = DESC_RATE48M; 796 rate[3] = DESC_RATE54M; 797 for (i = 0; i < 4; ++i) 798 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 799 *rate_num = 4; 800 break; 801 case 0xE08: 802 rate[0] = DESC_RATE1M; 803 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); 804 *rate_num = 1; 805 break; 806 case 0x86C: 807 if (mask == 0xffffff00) { 808 rate[0] = DESC_RATE2M; 809 rate[1] = DESC_RATE5_5M; 810 rate[2] = DESC_RATE11M; 811 for (i = 1; i < 4; ++i) 812 pwr_by_rate[i - 1] = 813 tbl_to_dec_pwr_by_rate(rtwdev, val, i); 814 *rate_num = 3; 815 } else if (mask == 0x000000ff) { 816 rate[0] = DESC_RATE11M; 817 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); 818 *rate_num = 1; 819 } 820 break; 821 case 0xE10: 822 case 0x83C: 823 rate[0] = DESC_RATEMCS0; 824 rate[1] = DESC_RATEMCS1; 825 rate[2] = DESC_RATEMCS2; 826 rate[3] = DESC_RATEMCS3; 827 for (i = 0; i < 4; ++i) 828 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 829 *rate_num = 4; 830 break; 831 case 0xE14: 832 case 0x848: 833 rate[0] = DESC_RATEMCS4; 834 rate[1] = DESC_RATEMCS5; 835 rate[2] = DESC_RATEMCS6; 836 rate[3] = DESC_RATEMCS7; 837 for (i = 0; i < 4; ++i) 838 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 839 *rate_num = 4; 840 break; 841 case 0xE18: 842 case 0x84C: 843 rate[0] = DESC_RATEMCS8; 844 rate[1] = DESC_RATEMCS9; 845 rate[2] = DESC_RATEMCS10; 846 rate[3] = DESC_RATEMCS11; 847 for (i = 0; i < 4; ++i) 848 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 849 *rate_num = 4; 850 break; 851 case 0xE1C: 852 case 0x868: 853 rate[0] = DESC_RATEMCS12; 854 rate[1] = DESC_RATEMCS13; 855 rate[2] = DESC_RATEMCS14; 856 rate[3] = DESC_RATEMCS15; 857 for (i = 0; i < 4; ++i) 858 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 859 *rate_num = 4; 860 break; 861 case 0x838: 862 rate[0] = DESC_RATE1M; 863 rate[1] = DESC_RATE2M; 864 rate[2] = DESC_RATE5_5M; 865 for (i = 1; i < 4; ++i) 866 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev, 867 val, i); 868 *rate_num = 3; 869 break; 870 case 0xC20: 871 case 0xE20: 872 case 0x1820: 873 case 0x1A20: 874 rate[0] = DESC_RATE1M; 875 rate[1] = DESC_RATE2M; 876 rate[2] = DESC_RATE5_5M; 877 rate[3] = DESC_RATE11M; 878 for (i = 0; i < 4; ++i) 879 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 880 *rate_num = 4; 881 break; 882 case 0xC24: 883 case 0xE24: 884 case 0x1824: 885 case 0x1A24: 886 rate[0] = DESC_RATE6M; 887 rate[1] = DESC_RATE9M; 888 rate[2] = DESC_RATE12M; 889 rate[3] = DESC_RATE18M; 890 for (i = 0; i < 4; ++i) 891 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 892 *rate_num = 4; 893 break; 894 case 0xC28: 895 case 0xE28: 896 case 0x1828: 897 case 0x1A28: 898 rate[0] = DESC_RATE24M; 899 rate[1] = DESC_RATE36M; 900 rate[2] = DESC_RATE48M; 901 rate[3] = DESC_RATE54M; 902 for (i = 0; i < 4; ++i) 903 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 904 *rate_num = 4; 905 break; 906 case 0xC2C: 907 case 0xE2C: 908 case 0x182C: 909 case 0x1A2C: 910 rate[0] = DESC_RATEMCS0; 911 rate[1] = DESC_RATEMCS1; 912 rate[2] = DESC_RATEMCS2; 913 rate[3] = DESC_RATEMCS3; 914 for (i = 0; i < 4; ++i) 915 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 916 *rate_num = 4; 917 break; 918 case 0xC30: 919 case 0xE30: 920 case 0x1830: 921 case 0x1A30: 922 rate[0] = DESC_RATEMCS4; 923 rate[1] = DESC_RATEMCS5; 924 rate[2] = DESC_RATEMCS6; 925 rate[3] = DESC_RATEMCS7; 926 for (i = 0; i < 4; ++i) 927 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 928 *rate_num = 4; 929 break; 930 case 0xC34: 931 case 0xE34: 932 case 0x1834: 933 case 0x1A34: 934 rate[0] = DESC_RATEMCS8; 935 rate[1] = DESC_RATEMCS9; 936 rate[2] = DESC_RATEMCS10; 937 rate[3] = DESC_RATEMCS11; 938 for (i = 0; i < 4; ++i) 939 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 940 *rate_num = 4; 941 break; 942 case 0xC38: 943 case 0xE38: 944 case 0x1838: 945 case 0x1A38: 946 rate[0] = DESC_RATEMCS12; 947 rate[1] = DESC_RATEMCS13; 948 rate[2] = DESC_RATEMCS14; 949 rate[3] = DESC_RATEMCS15; 950 for (i = 0; i < 4; ++i) 951 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 952 *rate_num = 4; 953 break; 954 case 0xC3C: 955 case 0xE3C: 956 case 0x183C: 957 case 0x1A3C: 958 rate[0] = DESC_RATEVHT1SS_MCS0; 959 rate[1] = DESC_RATEVHT1SS_MCS1; 960 rate[2] = DESC_RATEVHT1SS_MCS2; 961 rate[3] = DESC_RATEVHT1SS_MCS3; 962 for (i = 0; i < 4; ++i) 963 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 964 *rate_num = 4; 965 break; 966 case 0xC40: 967 case 0xE40: 968 case 0x1840: 969 case 0x1A40: 970 rate[0] = DESC_RATEVHT1SS_MCS4; 971 rate[1] = DESC_RATEVHT1SS_MCS5; 972 rate[2] = DESC_RATEVHT1SS_MCS6; 973 rate[3] = DESC_RATEVHT1SS_MCS7; 974 for (i = 0; i < 4; ++i) 975 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 976 *rate_num = 4; 977 break; 978 case 0xC44: 979 case 0xE44: 980 case 0x1844: 981 case 0x1A44: 982 rate[0] = DESC_RATEVHT1SS_MCS8; 983 rate[1] = DESC_RATEVHT1SS_MCS9; 984 rate[2] = DESC_RATEVHT2SS_MCS0; 985 rate[3] = DESC_RATEVHT2SS_MCS1; 986 for (i = 0; i < 4; ++i) 987 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 988 *rate_num = 4; 989 break; 990 case 0xC48: 991 case 0xE48: 992 case 0x1848: 993 case 0x1A48: 994 rate[0] = DESC_RATEVHT2SS_MCS2; 995 rate[1] = DESC_RATEVHT2SS_MCS3; 996 rate[2] = DESC_RATEVHT2SS_MCS4; 997 rate[3] = DESC_RATEVHT2SS_MCS5; 998 for (i = 0; i < 4; ++i) 999 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1000 *rate_num = 4; 1001 break; 1002 case 0xC4C: 1003 case 0xE4C: 1004 case 0x184C: 1005 case 0x1A4C: 1006 rate[0] = DESC_RATEVHT2SS_MCS6; 1007 rate[1] = DESC_RATEVHT2SS_MCS7; 1008 rate[2] = DESC_RATEVHT2SS_MCS8; 1009 rate[3] = DESC_RATEVHT2SS_MCS9; 1010 for (i = 0; i < 4; ++i) 1011 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1012 *rate_num = 4; 1013 break; 1014 case 0xCD8: 1015 case 0xED8: 1016 case 0x18D8: 1017 case 0x1AD8: 1018 rate[0] = DESC_RATEMCS16; 1019 rate[1] = DESC_RATEMCS17; 1020 rate[2] = DESC_RATEMCS18; 1021 rate[3] = DESC_RATEMCS19; 1022 for (i = 0; i < 4; ++i) 1023 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1024 *rate_num = 4; 1025 break; 1026 case 0xCDC: 1027 case 0xEDC: 1028 case 0x18DC: 1029 case 0x1ADC: 1030 rate[0] = DESC_RATEMCS20; 1031 rate[1] = DESC_RATEMCS21; 1032 rate[2] = DESC_RATEMCS22; 1033 rate[3] = DESC_RATEMCS23; 1034 for (i = 0; i < 4; ++i) 1035 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1036 *rate_num = 4; 1037 break; 1038 case 0xCE0: 1039 case 0xEE0: 1040 case 0x18E0: 1041 case 0x1AE0: 1042 rate[0] = DESC_RATEVHT3SS_MCS0; 1043 rate[1] = DESC_RATEVHT3SS_MCS1; 1044 rate[2] = DESC_RATEVHT3SS_MCS2; 1045 rate[3] = DESC_RATEVHT3SS_MCS3; 1046 for (i = 0; i < 4; ++i) 1047 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1048 *rate_num = 4; 1049 break; 1050 case 0xCE4: 1051 case 0xEE4: 1052 case 0x18E4: 1053 case 0x1AE4: 1054 rate[0] = DESC_RATEVHT3SS_MCS4; 1055 rate[1] = DESC_RATEVHT3SS_MCS5; 1056 rate[2] = DESC_RATEVHT3SS_MCS6; 1057 rate[3] = DESC_RATEVHT3SS_MCS7; 1058 for (i = 0; i < 4; ++i) 1059 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1060 *rate_num = 4; 1061 break; 1062 case 0xCE8: 1063 case 0xEE8: 1064 case 0x18E8: 1065 case 0x1AE8: 1066 rate[0] = DESC_RATEVHT3SS_MCS8; 1067 rate[1] = DESC_RATEVHT3SS_MCS9; 1068 for (i = 0; i < 2; ++i) 1069 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1070 *rate_num = 2; 1071 break; 1072 default: 1073 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr); 1074 break; 1075 } 1076 } 1077 1078 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev, 1079 u32 band, u32 rfpath, u32 txnum, 1080 u32 regaddr, u32 bitmask, u32 data) 1081 { 1082 struct rtw_hal *hal = &rtwdev->hal; 1083 u8 rate_num = 0; 1084 u8 rate; 1085 u8 rates[RTW_RF_PATH_MAX] = {0}; 1086 s8 offset; 1087 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0}; 1088 int i; 1089 1090 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data, 1091 rates, pwr_by_rate, &rate_num); 1092 1093 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX || 1094 (band != PHY_BAND_2G && band != PHY_BAND_5G) || 1095 rate_num > RTW_RF_PATH_MAX)) 1096 return; 1097 1098 for (i = 0; i < rate_num; i++) { 1099 offset = pwr_by_rate[i]; 1100 rate = rates[i]; 1101 if (band == PHY_BAND_2G) 1102 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; 1103 else if (band == PHY_BAND_5G) 1104 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; 1105 else 1106 continue; 1107 } 1108 } 1109 1110 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 1111 { 1112 const struct phy_pg_cfg_pair *p = tbl->data; 1113 const struct phy_pg_cfg_pair *end = p + tbl->size / 6; 1114 1115 BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6); 1116 1117 for (; p < end; p++) { 1118 if (p->addr == 0xfe || p->addr == 0xffe) { 1119 msleep(50); 1120 continue; 1121 } 1122 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path, 1123 p->tx_num, p->addr, p->bitmask, 1124 p->data); 1125 } 1126 } 1127 1128 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 1129 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ 1130 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ 1131 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ 1132 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ 1133 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ 1134 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ 1135 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */ 1136 1137 static int rtw_channel_to_idx(u8 band, u8 channel) 1138 { 1139 int ch_idx; 1140 u8 n_channel; 1141 1142 if (band == PHY_BAND_2G) { 1143 ch_idx = channel - 1; 1144 n_channel = RTW_MAX_CHANNEL_NUM_2G; 1145 } else if (band == PHY_BAND_5G) { 1146 n_channel = RTW_MAX_CHANNEL_NUM_5G; 1147 for (ch_idx = 0; ch_idx < n_channel; ch_idx++) 1148 if (rtw_channel_idx_5g[ch_idx] == channel) 1149 break; 1150 } else { 1151 return -1; 1152 } 1153 1154 if (ch_idx >= n_channel) 1155 return -1; 1156 1157 return ch_idx; 1158 } 1159 1160 static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, 1161 u8 bw, u8 rs, u8 ch, s8 pwr_limit) 1162 { 1163 struct rtw_hal *hal = &rtwdev->hal; 1164 u8 max_power_index = rtwdev->chip->max_power_index; 1165 s8 ww; 1166 int ch_idx; 1167 1168 pwr_limit = clamp_t(s8, pwr_limit, 1169 -max_power_index, max_power_index); 1170 ch_idx = rtw_channel_to_idx(band, ch); 1171 1172 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || 1173 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { 1174 WARN(1, 1175 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", 1176 regd, band, bw, rs, ch_idx, pwr_limit); 1177 return; 1178 } 1179 1180 if (band == PHY_BAND_2G) { 1181 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit; 1182 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx]; 1183 ww = min_t(s8, ww, pwr_limit); 1184 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1185 } else if (band == PHY_BAND_5G) { 1186 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit; 1187 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx]; 1188 ww = min_t(s8, ww, pwr_limit); 1189 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww; 1190 } 1191 } 1192 1193 /* cross-reference 5G power limits if values are not assigned */ 1194 static void 1195 rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd, 1196 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht) 1197 { 1198 struct rtw_hal *hal = &rtwdev->hal; 1199 u8 max_power_index = rtwdev->chip->max_power_index; 1200 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx]; 1201 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx]; 1202 1203 if (lmt_ht == lmt_vht) 1204 return; 1205 1206 if (lmt_ht == max_power_index) 1207 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht; 1208 1209 else if (lmt_vht == max_power_index) 1210 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht; 1211 } 1212 1213 /* cross-reference power limits for ht and vht */ 1214 static void 1215 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx) 1216 { 1217 u8 rs_idx, rs_ht, rs_vht; 1218 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S}, 1219 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} }; 1220 1221 for (rs_idx = 0; rs_idx < 2; rs_idx++) { 1222 rs_ht = rs_cmp[rs_idx][0]; 1223 rs_vht = rs_cmp[rs_idx][1]; 1224 1225 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht); 1226 } 1227 } 1228 1229 /* cross-reference power limits for 5G channels */ 1230 static void 1231 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw) 1232 { 1233 u8 ch_idx; 1234 1235 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++) 1236 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx); 1237 } 1238 1239 /* cross-reference power limits for 20/40M bandwidth */ 1240 static void 1241 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd) 1242 { 1243 u8 bw; 1244 1245 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++) 1246 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw); 1247 } 1248 1249 /* cross-reference power limits */ 1250 static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev) 1251 { 1252 u8 regd; 1253 1254 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1255 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd); 1256 } 1257 1258 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, 1259 const struct rtw_table *tbl) 1260 { 1261 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; 1262 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; 1263 1264 for (; p < end; p++) { 1265 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band, 1266 p->bw, p->rs, p->ch, p->txpwr_lmt); 1267 } 1268 1269 rtw_xref_txpwr_lmt(rtwdev); 1270 } 1271 1272 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1273 u32 addr, u32 data) 1274 { 1275 rtw_write8(rtwdev, addr, data); 1276 } 1277 1278 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1279 u32 addr, u32 data) 1280 { 1281 rtw_write32(rtwdev, addr, data); 1282 } 1283 1284 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1285 u32 addr, u32 data) 1286 { 1287 if (addr == 0xfe) 1288 msleep(50); 1289 else if (addr == 0xfd) 1290 mdelay(5); 1291 else if (addr == 0xfc) 1292 mdelay(1); 1293 else if (addr == 0xfb) 1294 usleep_range(50, 60); 1295 else if (addr == 0xfa) 1296 udelay(5); 1297 else if (addr == 0xf9) 1298 udelay(1); 1299 else 1300 rtw_write32(rtwdev, addr, data); 1301 } 1302 1303 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 1304 u32 addr, u32 data) 1305 { 1306 if (addr == 0xffe) { 1307 msleep(50); 1308 } else if (addr == 0xfe) { 1309 usleep_range(100, 110); 1310 } else { 1311 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); 1312 udelay(1); 1313 } 1314 } 1315 1316 static void rtw_load_rfk_table(struct rtw_dev *rtwdev) 1317 { 1318 struct rtw_chip_info *chip = rtwdev->chip; 1319 1320 if (!chip->rfk_init_tbl) 1321 return; 1322 1323 rtw_load_table(rtwdev, chip->rfk_init_tbl); 1324 } 1325 1326 void rtw_phy_load_tables(struct rtw_dev *rtwdev) 1327 { 1328 struct rtw_chip_info *chip = rtwdev->chip; 1329 u8 rf_path; 1330 1331 rtw_load_table(rtwdev, chip->mac_tbl); 1332 rtw_load_table(rtwdev, chip->bb_tbl); 1333 rtw_load_table(rtwdev, chip->agc_tbl); 1334 rtw_load_rfk_table(rtwdev); 1335 1336 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { 1337 const struct rtw_table *tbl; 1338 1339 tbl = chip->rf_tbl[rf_path]; 1340 rtw_load_table(rtwdev, tbl); 1341 } 1342 } 1343 1344 static u8 rtw_get_channel_group(u8 channel) 1345 { 1346 switch (channel) { 1347 default: 1348 WARN_ON(1); 1349 /* fall through */ 1350 case 1: 1351 case 2: 1352 case 36: 1353 case 38: 1354 case 40: 1355 case 42: 1356 return 0; 1357 case 3: 1358 case 4: 1359 case 5: 1360 case 44: 1361 case 46: 1362 case 48: 1363 case 50: 1364 return 1; 1365 case 6: 1366 case 7: 1367 case 8: 1368 case 52: 1369 case 54: 1370 case 56: 1371 case 58: 1372 return 2; 1373 case 9: 1374 case 10: 1375 case 11: 1376 case 60: 1377 case 62: 1378 case 64: 1379 return 3; 1380 case 12: 1381 case 13: 1382 case 100: 1383 case 102: 1384 case 104: 1385 case 106: 1386 return 4; 1387 case 14: 1388 case 108: 1389 case 110: 1390 case 112: 1391 case 114: 1392 return 5; 1393 case 116: 1394 case 118: 1395 case 120: 1396 case 122: 1397 return 6; 1398 case 124: 1399 case 126: 1400 case 128: 1401 case 130: 1402 return 7; 1403 case 132: 1404 case 134: 1405 case 136: 1406 case 138: 1407 return 8; 1408 case 140: 1409 case 142: 1410 case 144: 1411 return 9; 1412 case 149: 1413 case 151: 1414 case 153: 1415 case 155: 1416 return 10; 1417 case 157: 1418 case 159: 1419 case 161: 1420 return 11; 1421 case 165: 1422 case 167: 1423 case 169: 1424 case 171: 1425 return 12; 1426 case 173: 1427 case 175: 1428 case 177: 1429 return 13; 1430 } 1431 } 1432 1433 static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, 1434 struct rtw_2g_txpwr_idx *pwr_idx_2g, 1435 enum rtw_bandwidth bandwidth, 1436 u8 rate, u8 group) 1437 { 1438 struct rtw_chip_info *chip = rtwdev->chip; 1439 u8 tx_power; 1440 bool mcs_rate; 1441 bool above_2ss; 1442 u8 factor = chip->txgi_factor; 1443 1444 if (rate <= DESC_RATE11M) 1445 tx_power = pwr_idx_2g->cck_base[group]; 1446 else 1447 tx_power = pwr_idx_2g->bw40_base[group]; 1448 1449 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1450 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; 1451 1452 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1453 (rate >= DESC_RATEVHT1SS_MCS0 && 1454 rate <= DESC_RATEVHT2SS_MCS9); 1455 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1456 (rate >= DESC_RATEVHT2SS_MCS0); 1457 1458 if (!mcs_rate) 1459 return tx_power; 1460 1461 switch (bandwidth) { 1462 default: 1463 WARN_ON(1); 1464 /* fall through */ 1465 case RTW_CHANNEL_WIDTH_20: 1466 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; 1467 if (above_2ss) 1468 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; 1469 break; 1470 case RTW_CHANNEL_WIDTH_40: 1471 /* bw40 is the base power */ 1472 if (above_2ss) 1473 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; 1474 break; 1475 } 1476 1477 return tx_power; 1478 } 1479 1480 static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, 1481 struct rtw_5g_txpwr_idx *pwr_idx_5g, 1482 enum rtw_bandwidth bandwidth, 1483 u8 rate, u8 group) 1484 { 1485 struct rtw_chip_info *chip = rtwdev->chip; 1486 u8 tx_power; 1487 u8 upper, lower; 1488 bool mcs_rate; 1489 bool above_2ss; 1490 u8 factor = chip->txgi_factor; 1491 1492 tx_power = pwr_idx_5g->bw40_base[group]; 1493 1494 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1495 (rate >= DESC_RATEVHT1SS_MCS0 && 1496 rate <= DESC_RATEVHT2SS_MCS9); 1497 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1498 (rate >= DESC_RATEVHT2SS_MCS0); 1499 1500 if (!mcs_rate) { 1501 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; 1502 return tx_power; 1503 } 1504 1505 switch (bandwidth) { 1506 default: 1507 WARN_ON(1); 1508 /* fall through */ 1509 case RTW_CHANNEL_WIDTH_20: 1510 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; 1511 if (above_2ss) 1512 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; 1513 break; 1514 case RTW_CHANNEL_WIDTH_40: 1515 /* bw40 is the base power */ 1516 if (above_2ss) 1517 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; 1518 break; 1519 case RTW_CHANNEL_WIDTH_80: 1520 /* the base idx of bw80 is the average of bw40+/bw40- */ 1521 lower = pwr_idx_5g->bw40_base[group]; 1522 upper = pwr_idx_5g->bw40_base[group + 1]; 1523 1524 tx_power = (lower + upper) / 2; 1525 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; 1526 if (above_2ss) 1527 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; 1528 break; 1529 } 1530 1531 return tx_power; 1532 } 1533 1534 static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1535 enum rtw_bandwidth bw, u8 rf_path, 1536 u8 rate, u8 channel, u8 regd) 1537 { 1538 struct rtw_hal *hal = &rtwdev->hal; 1539 u8 *cch_by_bw = hal->cch_by_bw; 1540 s8 power_limit = (s8)rtwdev->chip->max_power_index; 1541 u8 rs; 1542 int ch_idx; 1543 u8 cur_bw, cur_ch; 1544 s8 cur_lmt; 1545 1546 if (regd > RTW_REGD_WW) 1547 return power_limit; 1548 1549 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) 1550 rs = RTW_RATE_SECTION_CCK; 1551 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1552 rs = RTW_RATE_SECTION_OFDM; 1553 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) 1554 rs = RTW_RATE_SECTION_HT_1S; 1555 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) 1556 rs = RTW_RATE_SECTION_HT_2S; 1557 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) 1558 rs = RTW_RATE_SECTION_VHT_1S; 1559 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) 1560 rs = RTW_RATE_SECTION_VHT_2S; 1561 else 1562 goto err; 1563 1564 /* only 20M BW with cck and ofdm */ 1565 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM) 1566 bw = RTW_CHANNEL_WIDTH_20; 1567 1568 /* only 20/40M BW with ht */ 1569 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S) 1570 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40); 1571 1572 /* select min power limit among [20M BW ~ current BW] */ 1573 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) { 1574 cur_ch = cch_by_bw[cur_bw]; 1575 1576 ch_idx = rtw_channel_to_idx(band, cur_ch); 1577 if (ch_idx < 0) 1578 goto err; 1579 1580 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ? 1581 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] : 1582 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx]; 1583 1584 power_limit = min_t(s8, cur_lmt, power_limit); 1585 } 1586 1587 return power_limit; 1588 1589 err: 1590 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n", 1591 band, bw, rf_path, rate, channel); 1592 return (s8)rtwdev->chip->max_power_index; 1593 } 1594 1595 void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, 1596 u8 ch, u8 regd, struct rtw_power_params *pwr_param) 1597 { 1598 struct rtw_hal *hal = &rtwdev->hal; 1599 struct rtw_txpwr_idx *pwr_idx; 1600 u8 group, band; 1601 u8 *base = &pwr_param->pwr_base; 1602 s8 *offset = &pwr_param->pwr_offset; 1603 s8 *limit = &pwr_param->pwr_limit; 1604 1605 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; 1606 group = rtw_get_channel_group(ch); 1607 1608 /* base power index for 2.4G/5G */ 1609 if (ch <= 14) { 1610 band = PHY_BAND_2G; 1611 *base = rtw_phy_get_2g_tx_power_index(rtwdev, 1612 &pwr_idx->pwr_idx_2g, 1613 bw, rate, group); 1614 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate]; 1615 } else { 1616 band = PHY_BAND_5G; 1617 *base = rtw_phy_get_5g_tx_power_index(rtwdev, 1618 &pwr_idx->pwr_idx_5g, 1619 bw, rate, group); 1620 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate]; 1621 } 1622 1623 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path, 1624 rate, ch, regd); 1625 } 1626 1627 u8 1628 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, 1629 enum rtw_bandwidth bandwidth, u8 channel, u8 regd) 1630 { 1631 struct rtw_power_params pwr_param = {0}; 1632 u8 tx_power; 1633 s8 offset; 1634 1635 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth, 1636 channel, regd, &pwr_param); 1637 1638 tx_power = pwr_param.pwr_base; 1639 offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit); 1640 1641 tx_power += offset; 1642 1643 if (tx_power > rtwdev->chip->max_power_index) 1644 tx_power = rtwdev->chip->max_power_index; 1645 1646 return tx_power; 1647 } 1648 1649 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, 1650 u8 ch, u8 path, u8 rs) 1651 { 1652 struct rtw_hal *hal = &rtwdev->hal; 1653 u8 regd = rtwdev->regd.txpwr_regd; 1654 u8 *rates; 1655 u8 size; 1656 u8 rate; 1657 u8 pwr_idx; 1658 u8 bw; 1659 int i; 1660 1661 if (rs >= RTW_RATE_SECTION_MAX) 1662 return; 1663 1664 rates = rtw_rate_section[rs]; 1665 size = rtw_rate_size[rs]; 1666 bw = hal->current_band_width; 1667 for (i = 0; i < size; i++) { 1668 rate = rates[i]; 1669 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate, 1670 bw, ch, regd); 1671 hal->tx_pwr_tbl[path][rate] = pwr_idx; 1672 } 1673 } 1674 1675 /* set tx power level by path for each rates, note that the order of the rates 1676 * are *very* important, bacause 8822B/8821C combines every four bytes of tx 1677 * power index into a four-byte power index register, and calls set_tx_agc to 1678 * write these values into hardware 1679 */ 1680 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, 1681 u8 ch, u8 path) 1682 { 1683 struct rtw_hal *hal = &rtwdev->hal; 1684 u8 rs; 1685 1686 /* do not need cck rates if we are not in 2.4G */ 1687 if (hal->current_band_type == RTW_BAND_2G) 1688 rs = RTW_RATE_SECTION_CCK; 1689 else 1690 rs = RTW_RATE_SECTION_OFDM; 1691 1692 for (; rs < RTW_RATE_SECTION_MAX; rs++) 1693 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); 1694 } 1695 1696 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) 1697 { 1698 struct rtw_chip_info *chip = rtwdev->chip; 1699 struct rtw_hal *hal = &rtwdev->hal; 1700 u8 path; 1701 1702 mutex_lock(&hal->tx_power_mutex); 1703 1704 for (path = 0; path < hal->rf_path_num; path++) 1705 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path); 1706 1707 chip->ops->set_tx_power_index(rtwdev); 1708 mutex_unlock(&hal->tx_power_mutex); 1709 } 1710 1711 static void 1712 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, 1713 u8 rs, u8 size, u8 *rates) 1714 { 1715 u8 rate; 1716 u8 base_idx, rate_idx; 1717 s8 base_2g, base_5g; 1718 1719 if (rs >= RTW_RATE_SECTION_VHT_1S) 1720 base_idx = rates[size - 3]; 1721 else 1722 base_idx = rates[size - 1]; 1723 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx]; 1724 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx]; 1725 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g; 1726 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g; 1727 for (rate = 0; rate < size; rate++) { 1728 rate_idx = rates[rate]; 1729 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g; 1730 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g; 1731 } 1732 } 1733 1734 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) 1735 { 1736 u8 path; 1737 1738 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1739 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1740 RTW_RATE_SECTION_CCK, 1741 rtw_cck_size, rtw_cck_rates); 1742 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1743 RTW_RATE_SECTION_OFDM, 1744 rtw_ofdm_size, rtw_ofdm_rates); 1745 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1746 RTW_RATE_SECTION_HT_1S, 1747 rtw_ht_1s_size, rtw_ht_1s_rates); 1748 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1749 RTW_RATE_SECTION_HT_2S, 1750 rtw_ht_2s_size, rtw_ht_2s_rates); 1751 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1752 RTW_RATE_SECTION_VHT_1S, 1753 rtw_vht_1s_size, rtw_vht_1s_rates); 1754 rtw_phy_tx_power_by_rate_config_by_path(hal, path, 1755 RTW_RATE_SECTION_VHT_2S, 1756 rtw_vht_2s_size, rtw_vht_2s_rates); 1757 } 1758 } 1759 1760 static void 1761 __rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1762 { 1763 s8 base; 1764 u8 ch; 1765 1766 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) { 1767 base = hal->tx_pwr_by_rate_base_2g[0][rs]; 1768 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base; 1769 } 1770 1771 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) { 1772 base = hal->tx_pwr_by_rate_base_5g[0][rs]; 1773 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base; 1774 } 1775 } 1776 1777 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) 1778 { 1779 u8 regd, bw, rs; 1780 1781 /* default at channel 1 */ 1782 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1; 1783 1784 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1785 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1786 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1787 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs); 1788 } 1789 1790 static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev, 1791 u8 regd, u8 bw, u8 rs) 1792 { 1793 struct rtw_hal *hal = &rtwdev->hal; 1794 s8 max_power_index = (s8)rtwdev->chip->max_power_index; 1795 u8 ch; 1796 1797 /* 2.4G channels */ 1798 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) 1799 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index; 1800 1801 /* 5G channels */ 1802 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) 1803 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index; 1804 } 1805 1806 void rtw_phy_init_tx_power(struct rtw_dev *rtwdev) 1807 { 1808 struct rtw_hal *hal = &rtwdev->hal; 1809 u8 regd, path, rate, rs, bw; 1810 1811 /* init tx power by rate offset */ 1812 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1813 for (rate = 0; rate < DESC_RATE_MAX; rate++) { 1814 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0; 1815 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0; 1816 } 1817 } 1818 1819 /* init tx power limit */ 1820 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1821 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1822 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1823 rtw_phy_init_tx_power_limit(rtwdev, regd, bw, 1824 rs); 1825 } 1826