1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/bcd.h> 6 7 #include "main.h" 8 #include "reg.h" 9 #include "fw.h" 10 #include "phy.h" 11 #include "debug.h" 12 13 struct phy_cfg_pair { 14 u32 addr; 15 u32 data; 16 }; 17 18 union phy_table_tile { 19 struct rtw_phy_cond cond; 20 struct phy_cfg_pair cfg; 21 }; 22 23 struct phy_pg_cfg_pair { 24 u32 band; 25 u32 rf_path; 26 u32 tx_num; 27 u32 addr; 28 u32 bitmask; 29 u32 data; 30 }; 31 32 struct txpwr_lmt_cfg_pair { 33 u8 regd; 34 u8 band; 35 u8 bw; 36 u8 rs; 37 u8 ch; 38 s8 txpwr_lmt; 39 }; 40 41 static const u32 db_invert_table[12][8] = { 42 {10, 13, 16, 20, 43 25, 32, 40, 50}, 44 {64, 80, 101, 128, 45 160, 201, 256, 318}, 46 {401, 505, 635, 800, 47 1007, 1268, 1596, 2010}, 48 {316, 398, 501, 631, 49 794, 1000, 1259, 1585}, 50 {1995, 2512, 3162, 3981, 51 5012, 6310, 7943, 10000}, 52 {12589, 15849, 19953, 25119, 53 31623, 39811, 50119, 63098}, 54 {79433, 100000, 125893, 158489, 55 199526, 251189, 316228, 398107}, 56 {501187, 630957, 794328, 1000000, 57 1258925, 1584893, 1995262, 2511886}, 58 {3162278, 3981072, 5011872, 6309573, 59 7943282, 1000000, 12589254, 15848932}, 60 {19952623, 25118864, 31622777, 39810717, 61 50118723, 63095734, 79432823, 100000000}, 62 {125892541, 158489319, 199526232, 251188643, 63 316227766, 398107171, 501187234, 630957345}, 64 {794328235, 1000000000, 1258925412, 1584893192, 65 1995262315, 2511886432U, 3162277660U, 3981071706U} 66 }; 67 68 enum rtw_phy_band_type { 69 PHY_BAND_2G = 0, 70 PHY_BAND_5G = 1, 71 }; 72 73 void rtw_phy_init(struct rtw_dev *rtwdev) 74 { 75 struct rtw_chip_info *chip = rtwdev->chip; 76 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 77 u32 addr, mask; 78 79 dm_info->fa_history[3] = 0; 80 dm_info->fa_history[2] = 0; 81 dm_info->fa_history[1] = 0; 82 dm_info->fa_history[0] = 0; 83 dm_info->igi_bitmap = 0; 84 dm_info->igi_history[3] = 0; 85 dm_info->igi_history[2] = 0; 86 dm_info->igi_history[1] = 0; 87 88 addr = chip->dig[0].addr; 89 mask = chip->dig[0].mask; 90 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); 91 } 92 93 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) 94 { 95 struct rtw_chip_info *chip = rtwdev->chip; 96 struct rtw_hal *hal = &rtwdev->hal; 97 u32 addr, mask; 98 u8 path; 99 100 for (path = 0; path < hal->rf_path_num; path++) { 101 addr = chip->dig[path].addr; 102 mask = chip->dig[path].mask; 103 rtw_write32_mask(rtwdev, addr, mask, igi); 104 } 105 } 106 107 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) 108 { 109 struct rtw_chip_info *chip = rtwdev->chip; 110 111 chip->ops->false_alarm_statistics(rtwdev); 112 } 113 114 #define RA_FLOOR_TABLE_SIZE 7 115 #define RA_FLOOR_UP_GAP 3 116 117 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) 118 { 119 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; 120 u8 new_level = 0; 121 int i; 122 123 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) 124 if (i >= old_level) 125 table[i] += RA_FLOOR_UP_GAP; 126 127 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 128 if (rssi < table[i]) { 129 new_level = i; 130 break; 131 } 132 } 133 134 return new_level; 135 } 136 137 struct rtw_phy_stat_iter_data { 138 struct rtw_dev *rtwdev; 139 u8 min_rssi; 140 }; 141 142 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) 143 { 144 struct rtw_phy_stat_iter_data *iter_data = data; 145 struct rtw_dev *rtwdev = iter_data->rtwdev; 146 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 147 u8 rssi; 148 149 rssi = ewma_rssi_read(&si->avg_rssi); 150 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); 151 152 rtw_fw_send_rssi_info(rtwdev, si); 153 154 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); 155 } 156 157 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) 158 { 159 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 160 struct rtw_phy_stat_iter_data data = {}; 161 162 data.rtwdev = rtwdev; 163 data.min_rssi = U8_MAX; 164 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); 165 166 dm_info->pre_min_rssi = dm_info->min_rssi; 167 dm_info->min_rssi = data.min_rssi; 168 } 169 170 static void rtw_phy_statistics(struct rtw_dev *rtwdev) 171 { 172 rtw_phy_stat_rssi(rtwdev); 173 rtw_phy_stat_false_alarm(rtwdev); 174 } 175 176 #define DIG_PERF_FA_TH_LOW 250 177 #define DIG_PERF_FA_TH_HIGH 500 178 #define DIG_PERF_FA_TH_EXTRA_HIGH 750 179 #define DIG_PERF_MAX 0x5a 180 #define DIG_PERF_MID 0x40 181 #define DIG_CVRG_FA_TH_LOW 2000 182 #define DIG_CVRG_FA_TH_HIGH 4000 183 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 184 #define DIG_CVRG_MAX 0x2a 185 #define DIG_CVRG_MID 0x26 186 #define DIG_CVRG_MIN 0x1c 187 #define DIG_RSSI_GAIN_OFFSET 15 188 189 static bool 190 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) 191 { 192 u16 fa_lo = DIG_PERF_FA_TH_LOW; 193 u16 fa_hi = DIG_PERF_FA_TH_HIGH; 194 u16 *fa_history; 195 u8 *igi_history; 196 u8 damping_rssi; 197 u8 min_rssi; 198 u8 diff; 199 u8 igi_bitmap; 200 bool damping = false; 201 202 min_rssi = dm_info->min_rssi; 203 if (dm_info->damping) { 204 damping_rssi = dm_info->damping_rssi; 205 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : 206 damping_rssi - min_rssi; 207 if (diff > 3 || dm_info->damping_cnt++ > 20) { 208 dm_info->damping = false; 209 return false; 210 } 211 212 return true; 213 } 214 215 igi_history = dm_info->igi_history; 216 fa_history = dm_info->fa_history; 217 igi_bitmap = dm_info->igi_bitmap & 0xf; 218 switch (igi_bitmap) { 219 case 5: 220 /* down -> up -> down -> up */ 221 if (igi_history[0] > igi_history[1] && 222 igi_history[2] > igi_history[3] && 223 igi_history[0] - igi_history[1] >= 2 && 224 igi_history[2] - igi_history[3] >= 2 && 225 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 226 fa_history[2] > fa_hi && fa_history[3] < fa_lo) 227 damping = true; 228 break; 229 case 9: 230 /* up -> down -> down -> up */ 231 if (igi_history[0] > igi_history[1] && 232 igi_history[3] > igi_history[2] && 233 igi_history[0] - igi_history[1] >= 4 && 234 igi_history[3] - igi_history[2] >= 2 && 235 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 236 fa_history[2] < fa_lo && fa_history[3] > fa_hi) 237 damping = true; 238 break; 239 default: 240 return false; 241 } 242 243 if (damping) { 244 dm_info->damping = true; 245 dm_info->damping_cnt = 0; 246 dm_info->damping_rssi = min_rssi; 247 } 248 249 return damping; 250 } 251 252 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, 253 u8 *upper, u8 *lower, bool linked) 254 { 255 u8 dig_max, dig_min, dig_mid; 256 u8 min_rssi; 257 258 if (linked) { 259 dig_max = DIG_PERF_MAX; 260 dig_mid = DIG_PERF_MID; 261 /* 22B=0x1c, 22C=0x20 */ 262 dig_min = 0x1c; 263 min_rssi = max_t(u8, dm_info->min_rssi, dig_min); 264 } else { 265 dig_max = DIG_CVRG_MAX; 266 dig_mid = DIG_CVRG_MID; 267 dig_min = DIG_CVRG_MIN; 268 min_rssi = dig_min; 269 } 270 271 /* DIG MAX should be bounded by minimum RSSI with offset +15 */ 272 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); 273 274 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); 275 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); 276 } 277 278 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, 279 u16 *fa_th, u8 *step, bool linked) 280 { 281 u8 min_rssi, pre_min_rssi; 282 283 min_rssi = dm_info->min_rssi; 284 pre_min_rssi = dm_info->pre_min_rssi; 285 step[0] = 4; 286 step[1] = 3; 287 step[2] = 2; 288 289 if (linked) { 290 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; 291 fa_th[1] = DIG_PERF_FA_TH_HIGH; 292 fa_th[2] = DIG_PERF_FA_TH_LOW; 293 if (pre_min_rssi > min_rssi) { 294 step[0] = 6; 295 step[1] = 4; 296 step[2] = 2; 297 } 298 } else { 299 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; 300 fa_th[1] = DIG_CVRG_FA_TH_HIGH; 301 fa_th[2] = DIG_CVRG_FA_TH_LOW; 302 } 303 } 304 305 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) 306 { 307 u8 *igi_history; 308 u16 *fa_history; 309 u8 igi_bitmap; 310 bool up; 311 312 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; 313 igi_history = dm_info->igi_history; 314 fa_history = dm_info->fa_history; 315 316 up = igi > igi_history[0]; 317 igi_bitmap |= up; 318 319 igi_history[3] = igi_history[2]; 320 igi_history[2] = igi_history[1]; 321 igi_history[1] = igi_history[0]; 322 igi_history[0] = igi; 323 324 fa_history[3] = fa_history[2]; 325 fa_history[2] = fa_history[1]; 326 fa_history[1] = fa_history[0]; 327 fa_history[0] = fa; 328 329 dm_info->igi_bitmap = igi_bitmap; 330 } 331 332 static void rtw_phy_dig(struct rtw_dev *rtwdev) 333 { 334 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 335 u8 upper_bound, lower_bound; 336 u8 pre_igi, cur_igi; 337 u16 fa_th[3], fa_cnt; 338 u8 level; 339 u8 step[3]; 340 bool linked; 341 342 if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE)) 343 return; 344 345 if (rtw_phy_dig_check_damping(dm_info)) 346 return; 347 348 linked = !!rtwdev->sta_cnt; 349 350 fa_cnt = dm_info->total_fa_cnt; 351 pre_igi = dm_info->igi_history[0]; 352 353 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); 354 355 /* test the false alarm count from the highest threshold level first, 356 * and increase it by corresponding step size 357 * 358 * note that the step size is offset by -2, compensate it afterall 359 */ 360 cur_igi = pre_igi; 361 for (level = 0; level < 3; level++) { 362 if (fa_cnt > fa_th[level]) { 363 cur_igi += step[level]; 364 break; 365 } 366 } 367 cur_igi -= 2; 368 369 /* calculate the upper/lower bound by the minimum rssi we have among 370 * the peers connected with us, meanwhile make sure the igi value does 371 * not beyond the hardware limitation 372 */ 373 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); 374 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); 375 376 /* record current igi value and false alarm statistics for further 377 * damping checks, and record the trend of igi values 378 */ 379 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); 380 381 if (cur_igi != pre_igi) 382 rtw_phy_dig_write(rtwdev, cur_igi); 383 } 384 385 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) 386 { 387 struct rtw_dev *rtwdev = data; 388 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 389 390 rtw_update_sta_info(rtwdev, si); 391 } 392 393 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) 394 { 395 if (rtwdev->watch_dog_cnt & 0x3) 396 return; 397 398 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); 399 } 400 401 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) 402 { 403 /* for further calculation */ 404 rtw_phy_statistics(rtwdev); 405 rtw_phy_dig(rtwdev); 406 rtw_phy_ra_info_update(rtwdev); 407 } 408 409 #define FRAC_BITS 3 410 411 static u8 rtw_phy_power_2_db(s8 power) 412 { 413 if (power <= -100 || power >= 20) 414 return 0; 415 else if (power >= 0) 416 return 100; 417 else 418 return 100 + power; 419 } 420 421 static u64 rtw_phy_db_2_linear(u8 power_db) 422 { 423 u8 i, j; 424 u64 linear; 425 426 if (power_db > 96) 427 power_db = 96; 428 else if (power_db < 1) 429 return 1; 430 431 /* 1dB ~ 96dB */ 432 i = (power_db - 1) >> 3; 433 j = (power_db - 1) - (i << 3); 434 435 linear = db_invert_table[i][j]; 436 linear = i > 2 ? linear << FRAC_BITS : linear; 437 438 return linear; 439 } 440 441 static u8 rtw_phy_linear_2_db(u64 linear) 442 { 443 u8 i; 444 u8 j; 445 u32 dB; 446 447 if (linear >= db_invert_table[11][7]) 448 return 96; /* maximum 96 dB */ 449 450 for (i = 0; i < 12; i++) { 451 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) 452 break; 453 else if (i > 2 && linear <= db_invert_table[i][7]) 454 break; 455 } 456 457 for (j = 0; j < 8; j++) { 458 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) 459 break; 460 else if (i > 2 && linear <= db_invert_table[i][j]) 461 break; 462 } 463 464 if (j == 0 && i == 0) 465 goto end; 466 467 if (j == 0) { 468 if (i != 3) { 469 if (db_invert_table[i][0] - linear > 470 linear - db_invert_table[i - 1][7]) { 471 i = i - 1; 472 j = 7; 473 } 474 } else { 475 if (db_invert_table[3][0] - linear > 476 linear - db_invert_table[2][7]) { 477 i = 2; 478 j = 7; 479 } 480 } 481 } else { 482 if (db_invert_table[i][j] - linear > 483 linear - db_invert_table[i][j - 1]) { 484 j = j - 1; 485 } 486 } 487 end: 488 dB = (i << 3) + j + 1; 489 490 return dB; 491 } 492 493 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) 494 { 495 s8 power; 496 u8 power_db; 497 u64 linear; 498 u64 sum = 0; 499 u8 path; 500 501 for (path = 0; path < path_num; path++) { 502 power = rf_power[path]; 503 power_db = rtw_phy_power_2_db(power); 504 linear = rtw_phy_db_2_linear(power_db); 505 sum += linear; 506 } 507 508 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; 509 switch (path_num) { 510 case 2: 511 sum >>= 1; 512 break; 513 case 3: 514 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; 515 break; 516 case 4: 517 sum >>= 2; 518 break; 519 default: 520 break; 521 } 522 523 return rtw_phy_linear_2_db(sum); 524 } 525 526 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 527 u32 addr, u32 mask) 528 { 529 struct rtw_hal *hal = &rtwdev->hal; 530 struct rtw_chip_info *chip = rtwdev->chip; 531 const u32 *base_addr = chip->rf_base_addr; 532 u32 val, direct_addr; 533 534 if (rf_path >= hal->rf_path_num) { 535 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 536 return INV_RF_DATA; 537 } 538 539 addr &= 0xff; 540 direct_addr = base_addr[rf_path] + (addr << 2); 541 mask &= RFREG_MASK; 542 543 val = rtw_read32_mask(rtwdev, direct_addr, mask); 544 545 return val; 546 } 547 548 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 549 u32 addr, u32 mask, u32 data) 550 { 551 struct rtw_hal *hal = &rtwdev->hal; 552 struct rtw_chip_info *chip = rtwdev->chip; 553 u32 *sipi_addr = chip->rf_sipi_addr; 554 u32 data_and_addr; 555 u32 old_data = 0; 556 u32 shift; 557 558 if (rf_path >= hal->rf_path_num) { 559 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 560 return false; 561 } 562 563 addr &= 0xff; 564 mask &= RFREG_MASK; 565 566 if (mask != RFREG_MASK) { 567 old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK); 568 569 if (old_data == INV_RF_DATA) { 570 rtw_err(rtwdev, "Write fail, rf is disabled\n"); 571 return false; 572 } 573 574 shift = __ffs(mask); 575 data = ((old_data) & (~mask)) | (data << shift); 576 } 577 578 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; 579 580 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); 581 582 udelay(13); 583 584 return true; 585 } 586 587 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 588 u32 addr, u32 mask, u32 data) 589 { 590 struct rtw_hal *hal = &rtwdev->hal; 591 struct rtw_chip_info *chip = rtwdev->chip; 592 const u32 *base_addr = chip->rf_base_addr; 593 u32 direct_addr; 594 595 if (rf_path >= hal->rf_path_num) { 596 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 597 return false; 598 } 599 600 addr &= 0xff; 601 direct_addr = base_addr[rf_path] + (addr << 2); 602 mask &= RFREG_MASK; 603 604 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI); 605 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI); 606 rtw_write32_mask(rtwdev, direct_addr, mask, data); 607 608 udelay(1); 609 610 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI); 611 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI); 612 613 return true; 614 } 615 616 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 617 u32 addr, u32 mask, u32 data) 618 { 619 if (addr != 0x00) 620 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); 621 622 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); 623 } 624 625 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) 626 { 627 struct rtw_hal *hal = &rtwdev->hal; 628 struct rtw_efuse *efuse = &rtwdev->efuse; 629 struct rtw_phy_cond cond = {0}; 630 631 cond.cut = hal->cut_version ? hal->cut_version : 15; 632 cond.pkg = pkg ? pkg : 15; 633 cond.plat = 0x04; 634 cond.rfe = efuse->rfe_option; 635 636 switch (rtw_hci_type(rtwdev)) { 637 case RTW_HCI_TYPE_USB: 638 cond.intf = INTF_USB; 639 break; 640 case RTW_HCI_TYPE_SDIO: 641 cond.intf = INTF_SDIO; 642 break; 643 case RTW_HCI_TYPE_PCIE: 644 default: 645 cond.intf = INTF_PCIE; 646 break; 647 } 648 649 hal->phy_cond = cond; 650 651 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); 652 } 653 654 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) 655 { 656 struct rtw_hal *hal = &rtwdev->hal; 657 struct rtw_phy_cond drv_cond = hal->phy_cond; 658 659 if (cond.cut && cond.cut != drv_cond.cut) 660 return false; 661 662 if (cond.pkg && cond.pkg != drv_cond.pkg) 663 return false; 664 665 if (cond.intf && cond.intf != drv_cond.intf) 666 return false; 667 668 if (cond.rfe != drv_cond.rfe) 669 return false; 670 671 return true; 672 } 673 674 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 675 { 676 const union phy_table_tile *p = tbl->data; 677 const union phy_table_tile *end = p + tbl->size / 2; 678 struct rtw_phy_cond pos_cond = {0}; 679 bool is_matched = true, is_skipped = false; 680 681 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); 682 683 for (; p < end; p++) { 684 if (p->cond.pos) { 685 switch (p->cond.branch) { 686 case BRANCH_ENDIF: 687 is_matched = true; 688 is_skipped = false; 689 break; 690 case BRANCH_ELSE: 691 is_matched = is_skipped ? false : true; 692 break; 693 case BRANCH_IF: 694 case BRANCH_ELIF: 695 default: 696 pos_cond = p->cond; 697 break; 698 } 699 } else if (p->cond.neg) { 700 if (!is_skipped) { 701 if (check_positive(rtwdev, pos_cond)) { 702 is_matched = true; 703 is_skipped = true; 704 } else { 705 is_matched = false; 706 is_skipped = false; 707 } 708 } else { 709 is_matched = false; 710 } 711 } else if (is_matched) { 712 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); 713 } 714 } 715 } 716 717 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 718 { 719 const struct phy_pg_cfg_pair *p = tbl->data; 720 const struct phy_pg_cfg_pair *end = p + tbl->size / 6; 721 722 BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6); 723 724 for (; p < end; p++) { 725 if (p->addr == 0xfe || p->addr == 0xffe) { 726 msleep(50); 727 continue; 728 } 729 phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path, 730 p->tx_num, p->addr, p->bitmask, 731 p->data); 732 } 733 } 734 735 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, 736 const struct rtw_table *tbl) 737 { 738 const struct txpwr_lmt_cfg_pair *p = tbl->data; 739 const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6; 740 741 BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6); 742 743 for (; p < end; p++) { 744 phy_set_tx_power_limit(rtwdev, p->regd, p->band, 745 p->bw, p->rs, 746 p->ch, p->txpwr_lmt); 747 } 748 } 749 750 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 751 u32 addr, u32 data) 752 { 753 rtw_write8(rtwdev, addr, data); 754 } 755 756 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 757 u32 addr, u32 data) 758 { 759 rtw_write32(rtwdev, addr, data); 760 } 761 762 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 763 u32 addr, u32 data) 764 { 765 if (addr == 0xfe) 766 msleep(50); 767 else if (addr == 0xfd) 768 mdelay(5); 769 else if (addr == 0xfc) 770 mdelay(1); 771 else if (addr == 0xfb) 772 usleep_range(50, 60); 773 else if (addr == 0xfa) 774 udelay(5); 775 else if (addr == 0xf9) 776 udelay(1); 777 else 778 rtw_write32(rtwdev, addr, data); 779 } 780 781 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 782 u32 addr, u32 data) 783 { 784 if (addr == 0xffe) { 785 msleep(50); 786 } else if (addr == 0xfe) { 787 usleep_range(100, 110); 788 } else { 789 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); 790 udelay(1); 791 } 792 } 793 794 static void rtw_load_rfk_table(struct rtw_dev *rtwdev) 795 { 796 struct rtw_chip_info *chip = rtwdev->chip; 797 798 if (!chip->rfk_init_tbl) 799 return; 800 801 rtw_load_table(rtwdev, chip->rfk_init_tbl); 802 } 803 804 void rtw_phy_load_tables(struct rtw_dev *rtwdev) 805 { 806 struct rtw_chip_info *chip = rtwdev->chip; 807 u8 rf_path; 808 809 rtw_load_table(rtwdev, chip->mac_tbl); 810 rtw_load_table(rtwdev, chip->bb_tbl); 811 rtw_load_table(rtwdev, chip->agc_tbl); 812 rtw_load_rfk_table(rtwdev); 813 814 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { 815 const struct rtw_table *tbl; 816 817 tbl = chip->rf_tbl[rf_path]; 818 rtw_load_table(rtwdev, tbl); 819 } 820 } 821 822 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) 823 824 #define RTW_MAX_POWER_INDEX 0x3F 825 826 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; 827 u8 rtw_ofdm_rates[] = { 828 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, 829 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, 830 DESC_RATE48M, DESC_RATE54M 831 }; 832 u8 rtw_ht_1s_rates[] = { 833 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, 834 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, 835 DESC_RATEMCS6, DESC_RATEMCS7 836 }; 837 u8 rtw_ht_2s_rates[] = { 838 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, 839 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, 840 DESC_RATEMCS14, DESC_RATEMCS15 841 }; 842 u8 rtw_vht_1s_rates[] = { 843 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, 844 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, 845 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, 846 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, 847 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 848 }; 849 u8 rtw_vht_2s_rates[] = { 850 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, 851 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, 852 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, 853 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, 854 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 855 }; 856 857 static u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); 858 static u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); 859 static u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); 860 static u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); 861 static u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); 862 static u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); 863 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { 864 rtw_cck_rates, rtw_ofdm_rates, 865 rtw_ht_1s_rates, rtw_ht_2s_rates, 866 rtw_vht_1s_rates, rtw_vht_2s_rates 867 }; 868 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { 869 ARRAY_SIZE(rtw_cck_rates), 870 ARRAY_SIZE(rtw_ofdm_rates), 871 ARRAY_SIZE(rtw_ht_1s_rates), 872 ARRAY_SIZE(rtw_ht_2s_rates), 873 ARRAY_SIZE(rtw_vht_1s_rates), 874 ARRAY_SIZE(rtw_vht_2s_rates) 875 }; 876 877 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 878 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ 879 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ 880 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ 881 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ 882 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ 883 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ 884 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */ 885 886 static int rtw_channel_to_idx(u8 band, u8 channel) 887 { 888 int ch_idx; 889 u8 n_channel; 890 891 if (band == PHY_BAND_2G) { 892 ch_idx = channel - 1; 893 n_channel = RTW_MAX_CHANNEL_NUM_2G; 894 } else if (band == PHY_BAND_5G) { 895 n_channel = RTW_MAX_CHANNEL_NUM_5G; 896 for (ch_idx = 0; ch_idx < n_channel; ch_idx++) 897 if (rtw_channel_idx_5g[ch_idx] == channel) 898 break; 899 } else { 900 return -1; 901 } 902 903 if (ch_idx >= n_channel) 904 return -1; 905 906 return ch_idx; 907 } 908 909 static u8 rtw_get_channel_group(u8 channel) 910 { 911 switch (channel) { 912 default: 913 WARN_ON(1); 914 /* fall through */ 915 case 1: 916 case 2: 917 case 36: 918 case 38: 919 case 40: 920 case 42: 921 return 0; 922 case 3: 923 case 4: 924 case 5: 925 case 44: 926 case 46: 927 case 48: 928 case 50: 929 return 1; 930 case 6: 931 case 7: 932 case 8: 933 case 52: 934 case 54: 935 case 56: 936 case 58: 937 return 2; 938 case 9: 939 case 10: 940 case 11: 941 case 60: 942 case 62: 943 case 64: 944 return 3; 945 case 12: 946 case 13: 947 case 100: 948 case 102: 949 case 104: 950 case 106: 951 return 4; 952 case 14: 953 case 108: 954 case 110: 955 case 112: 956 case 114: 957 return 5; 958 case 116: 959 case 118: 960 case 120: 961 case 122: 962 return 6; 963 case 124: 964 case 126: 965 case 128: 966 case 130: 967 return 7; 968 case 132: 969 case 134: 970 case 136: 971 case 138: 972 return 8; 973 case 140: 974 case 142: 975 case 144: 976 return 9; 977 case 149: 978 case 151: 979 case 153: 980 case 155: 981 return 10; 982 case 157: 983 case 159: 984 case 161: 985 return 11; 986 case 165: 987 case 167: 988 case 169: 989 case 171: 990 return 12; 991 case 173: 992 case 175: 993 case 177: 994 return 13; 995 } 996 } 997 998 static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, 999 struct rtw_2g_txpwr_idx *pwr_idx_2g, 1000 enum rtw_bandwidth bandwidth, 1001 u8 rate, u8 group) 1002 { 1003 struct rtw_chip_info *chip = rtwdev->chip; 1004 u8 tx_power; 1005 bool mcs_rate; 1006 bool above_2ss; 1007 u8 factor = chip->txgi_factor; 1008 1009 if (rate <= DESC_RATE11M) 1010 tx_power = pwr_idx_2g->cck_base[group]; 1011 else 1012 tx_power = pwr_idx_2g->bw40_base[group]; 1013 1014 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1015 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; 1016 1017 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1018 (rate >= DESC_RATEVHT1SS_MCS0 && 1019 rate <= DESC_RATEVHT2SS_MCS9); 1020 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1021 (rate >= DESC_RATEVHT2SS_MCS0); 1022 1023 if (!mcs_rate) 1024 return tx_power; 1025 1026 switch (bandwidth) { 1027 default: 1028 WARN_ON(1); 1029 /* fall through */ 1030 case RTW_CHANNEL_WIDTH_20: 1031 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; 1032 if (above_2ss) 1033 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; 1034 break; 1035 case RTW_CHANNEL_WIDTH_40: 1036 /* bw40 is the base power */ 1037 if (above_2ss) 1038 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; 1039 break; 1040 } 1041 1042 return tx_power; 1043 } 1044 1045 static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, 1046 struct rtw_5g_txpwr_idx *pwr_idx_5g, 1047 enum rtw_bandwidth bandwidth, 1048 u8 rate, u8 group) 1049 { 1050 struct rtw_chip_info *chip = rtwdev->chip; 1051 u8 tx_power; 1052 u8 upper, lower; 1053 bool mcs_rate; 1054 bool above_2ss; 1055 u8 factor = chip->txgi_factor; 1056 1057 tx_power = pwr_idx_5g->bw40_base[group]; 1058 1059 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1060 (rate >= DESC_RATEVHT1SS_MCS0 && 1061 rate <= DESC_RATEVHT2SS_MCS9); 1062 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1063 (rate >= DESC_RATEVHT2SS_MCS0); 1064 1065 if (!mcs_rate) { 1066 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; 1067 return tx_power; 1068 } 1069 1070 switch (bandwidth) { 1071 default: 1072 WARN_ON(1); 1073 /* fall through */ 1074 case RTW_CHANNEL_WIDTH_20: 1075 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; 1076 if (above_2ss) 1077 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; 1078 break; 1079 case RTW_CHANNEL_WIDTH_40: 1080 /* bw40 is the base power */ 1081 if (above_2ss) 1082 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; 1083 break; 1084 case RTW_CHANNEL_WIDTH_80: 1085 /* the base idx of bw80 is the average of bw40+/bw40- */ 1086 lower = pwr_idx_5g->bw40_base[group]; 1087 upper = pwr_idx_5g->bw40_base[group + 1]; 1088 1089 tx_power = (lower + upper) / 2; 1090 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; 1091 if (above_2ss) 1092 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; 1093 break; 1094 } 1095 1096 return tx_power; 1097 } 1098 1099 /* set tx power level by path for each rates, note that the order of the rates 1100 * are *very* important, bacause 8822B/8821C combines every four bytes of tx 1101 * power index into a four-byte power index register, and calls set_tx_agc to 1102 * write these values into hardware 1103 */ 1104 static 1105 void phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, u8 ch, u8 path) 1106 { 1107 struct rtw_hal *hal = &rtwdev->hal; 1108 u8 rs; 1109 1110 /* do not need cck rates if we are not in 2.4G */ 1111 if (hal->current_band_type == RTW_BAND_2G) 1112 rs = RTW_RATE_SECTION_CCK; 1113 else 1114 rs = RTW_RATE_SECTION_OFDM; 1115 1116 for (; rs < RTW_RATE_SECTION_MAX; rs++) 1117 phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); 1118 } 1119 1120 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) 1121 { 1122 struct rtw_chip_info *chip = rtwdev->chip; 1123 struct rtw_hal *hal = &rtwdev->hal; 1124 u8 path; 1125 1126 mutex_lock(&hal->tx_power_mutex); 1127 1128 for (path = 0; path < hal->rf_path_num; path++) 1129 phy_set_tx_power_level_by_path(rtwdev, channel, path); 1130 1131 chip->ops->set_tx_power_index(rtwdev); 1132 mutex_unlock(&hal->tx_power_mutex); 1133 } 1134 1135 s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1136 enum rtw_bandwidth bandwidth, u8 rf_path, 1137 u8 rate, u8 channel, u8 regd); 1138 1139 static 1140 u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate, 1141 enum rtw_bandwidth bandwidth, u8 channel, u8 regd) 1142 { 1143 struct rtw_dev *rtwdev = adapter; 1144 struct rtw_hal *hal = &rtwdev->hal; 1145 struct rtw_txpwr_idx *pwr_idx; 1146 u8 tx_power; 1147 u8 group; 1148 u8 band; 1149 s8 offset, limit; 1150 1151 pwr_idx = &rtwdev->efuse.txpwr_idx_table[rf_path]; 1152 group = rtw_get_channel_group(channel); 1153 1154 /* base power index for 2.4G/5G */ 1155 if (channel <= 14) { 1156 band = PHY_BAND_2G; 1157 tx_power = phy_get_2g_tx_power_index(rtwdev, 1158 &pwr_idx->pwr_idx_2g, 1159 bandwidth, rate, group); 1160 offset = hal->tx_pwr_by_rate_offset_2g[rf_path][rate]; 1161 } else { 1162 band = PHY_BAND_5G; 1163 tx_power = phy_get_5g_tx_power_index(rtwdev, 1164 &pwr_idx->pwr_idx_5g, 1165 bandwidth, rate, group); 1166 offset = hal->tx_pwr_by_rate_offset_5g[rf_path][rate]; 1167 } 1168 1169 limit = phy_get_tx_power_limit(rtwdev, band, bandwidth, rf_path, 1170 rate, channel, regd); 1171 1172 if (offset > limit) 1173 offset = limit; 1174 1175 tx_power += offset; 1176 1177 if (tx_power > rtwdev->chip->max_power_index) 1178 tx_power = rtwdev->chip->max_power_index; 1179 1180 return tx_power; 1181 } 1182 1183 void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs) 1184 { 1185 struct rtw_dev *rtwdev = adapter; 1186 struct rtw_hal *hal = &rtwdev->hal; 1187 u8 regd = rtwdev->regd.txpwr_regd; 1188 u8 *rates; 1189 u8 size; 1190 u8 rate; 1191 u8 pwr_idx; 1192 u8 bw; 1193 int i; 1194 1195 if (rs >= RTW_RATE_SECTION_MAX) 1196 return; 1197 1198 rates = rtw_rate_section[rs]; 1199 size = rtw_rate_size[rs]; 1200 bw = hal->current_band_width; 1201 for (i = 0; i < size; i++) { 1202 rate = rates[i]; 1203 pwr_idx = phy_get_tx_power_index(adapter, path, rate, bw, ch, 1204 regd); 1205 hal->tx_pwr_tbl[path][rate] = pwr_idx; 1206 } 1207 } 1208 1209 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) 1210 { 1211 if (rtwdev->chip->is_pwr_by_rate_dec) 1212 return bcd_to_dec_pwr_by_rate(hex, i); 1213 else 1214 return (hex >> (i * 8)) & 0xFF; 1215 } 1216 1217 static void phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, 1218 u32 addr, u32 mask, 1219 u32 val, u8 *rate, 1220 u8 *pwr_by_rate, u8 *rate_num) 1221 { 1222 int i; 1223 1224 switch (addr) { 1225 case 0xE00: 1226 case 0x830: 1227 rate[0] = DESC_RATE6M; 1228 rate[1] = DESC_RATE9M; 1229 rate[2] = DESC_RATE12M; 1230 rate[3] = DESC_RATE18M; 1231 for (i = 0; i < 4; ++i) 1232 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1233 *rate_num = 4; 1234 break; 1235 case 0xE04: 1236 case 0x834: 1237 rate[0] = DESC_RATE24M; 1238 rate[1] = DESC_RATE36M; 1239 rate[2] = DESC_RATE48M; 1240 rate[3] = DESC_RATE54M; 1241 for (i = 0; i < 4; ++i) 1242 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1243 *rate_num = 4; 1244 break; 1245 case 0xE08: 1246 rate[0] = DESC_RATE1M; 1247 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); 1248 *rate_num = 1; 1249 break; 1250 case 0x86C: 1251 if (mask == 0xffffff00) { 1252 rate[0] = DESC_RATE2M; 1253 rate[1] = DESC_RATE5_5M; 1254 rate[2] = DESC_RATE11M; 1255 for (i = 1; i < 4; ++i) 1256 pwr_by_rate[i - 1] = 1257 tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1258 *rate_num = 3; 1259 } else if (mask == 0x000000ff) { 1260 rate[0] = DESC_RATE11M; 1261 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); 1262 *rate_num = 1; 1263 } 1264 break; 1265 case 0xE10: 1266 case 0x83C: 1267 rate[0] = DESC_RATEMCS0; 1268 rate[1] = DESC_RATEMCS1; 1269 rate[2] = DESC_RATEMCS2; 1270 rate[3] = DESC_RATEMCS3; 1271 for (i = 0; i < 4; ++i) 1272 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1273 *rate_num = 4; 1274 break; 1275 case 0xE14: 1276 case 0x848: 1277 rate[0] = DESC_RATEMCS4; 1278 rate[1] = DESC_RATEMCS5; 1279 rate[2] = DESC_RATEMCS6; 1280 rate[3] = DESC_RATEMCS7; 1281 for (i = 0; i < 4; ++i) 1282 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1283 *rate_num = 4; 1284 break; 1285 case 0xE18: 1286 case 0x84C: 1287 rate[0] = DESC_RATEMCS8; 1288 rate[1] = DESC_RATEMCS9; 1289 rate[2] = DESC_RATEMCS10; 1290 rate[3] = DESC_RATEMCS11; 1291 for (i = 0; i < 4; ++i) 1292 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1293 *rate_num = 4; 1294 break; 1295 case 0xE1C: 1296 case 0x868: 1297 rate[0] = DESC_RATEMCS12; 1298 rate[1] = DESC_RATEMCS13; 1299 rate[2] = DESC_RATEMCS14; 1300 rate[3] = DESC_RATEMCS15; 1301 for (i = 0; i < 4; ++i) 1302 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1303 *rate_num = 4; 1304 1305 break; 1306 case 0x838: 1307 rate[0] = DESC_RATE1M; 1308 rate[1] = DESC_RATE2M; 1309 rate[2] = DESC_RATE5_5M; 1310 for (i = 1; i < 4; ++i) 1311 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev, 1312 val, i); 1313 *rate_num = 3; 1314 break; 1315 case 0xC20: 1316 case 0xE20: 1317 case 0x1820: 1318 case 0x1A20: 1319 rate[0] = DESC_RATE1M; 1320 rate[1] = DESC_RATE2M; 1321 rate[2] = DESC_RATE5_5M; 1322 rate[3] = DESC_RATE11M; 1323 for (i = 0; i < 4; ++i) 1324 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1325 *rate_num = 4; 1326 break; 1327 case 0xC24: 1328 case 0xE24: 1329 case 0x1824: 1330 case 0x1A24: 1331 rate[0] = DESC_RATE6M; 1332 rate[1] = DESC_RATE9M; 1333 rate[2] = DESC_RATE12M; 1334 rate[3] = DESC_RATE18M; 1335 for (i = 0; i < 4; ++i) 1336 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1337 *rate_num = 4; 1338 break; 1339 case 0xC28: 1340 case 0xE28: 1341 case 0x1828: 1342 case 0x1A28: 1343 rate[0] = DESC_RATE24M; 1344 rate[1] = DESC_RATE36M; 1345 rate[2] = DESC_RATE48M; 1346 rate[3] = DESC_RATE54M; 1347 for (i = 0; i < 4; ++i) 1348 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1349 *rate_num = 4; 1350 break; 1351 case 0xC2C: 1352 case 0xE2C: 1353 case 0x182C: 1354 case 0x1A2C: 1355 rate[0] = DESC_RATEMCS0; 1356 rate[1] = DESC_RATEMCS1; 1357 rate[2] = DESC_RATEMCS2; 1358 rate[3] = DESC_RATEMCS3; 1359 for (i = 0; i < 4; ++i) 1360 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1361 *rate_num = 4; 1362 break; 1363 case 0xC30: 1364 case 0xE30: 1365 case 0x1830: 1366 case 0x1A30: 1367 rate[0] = DESC_RATEMCS4; 1368 rate[1] = DESC_RATEMCS5; 1369 rate[2] = DESC_RATEMCS6; 1370 rate[3] = DESC_RATEMCS7; 1371 for (i = 0; i < 4; ++i) 1372 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1373 *rate_num = 4; 1374 break; 1375 case 0xC34: 1376 case 0xE34: 1377 case 0x1834: 1378 case 0x1A34: 1379 rate[0] = DESC_RATEMCS8; 1380 rate[1] = DESC_RATEMCS9; 1381 rate[2] = DESC_RATEMCS10; 1382 rate[3] = DESC_RATEMCS11; 1383 for (i = 0; i < 4; ++i) 1384 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1385 *rate_num = 4; 1386 break; 1387 case 0xC38: 1388 case 0xE38: 1389 case 0x1838: 1390 case 0x1A38: 1391 rate[0] = DESC_RATEMCS12; 1392 rate[1] = DESC_RATEMCS13; 1393 rate[2] = DESC_RATEMCS14; 1394 rate[3] = DESC_RATEMCS15; 1395 for (i = 0; i < 4; ++i) 1396 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1397 *rate_num = 4; 1398 break; 1399 case 0xC3C: 1400 case 0xE3C: 1401 case 0x183C: 1402 case 0x1A3C: 1403 rate[0] = DESC_RATEVHT1SS_MCS0; 1404 rate[1] = DESC_RATEVHT1SS_MCS1; 1405 rate[2] = DESC_RATEVHT1SS_MCS2; 1406 rate[3] = DESC_RATEVHT1SS_MCS3; 1407 for (i = 0; i < 4; ++i) 1408 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1409 *rate_num = 4; 1410 break; 1411 case 0xC40: 1412 case 0xE40: 1413 case 0x1840: 1414 case 0x1A40: 1415 rate[0] = DESC_RATEVHT1SS_MCS4; 1416 rate[1] = DESC_RATEVHT1SS_MCS5; 1417 rate[2] = DESC_RATEVHT1SS_MCS6; 1418 rate[3] = DESC_RATEVHT1SS_MCS7; 1419 for (i = 0; i < 4; ++i) 1420 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1421 *rate_num = 4; 1422 break; 1423 case 0xC44: 1424 case 0xE44: 1425 case 0x1844: 1426 case 0x1A44: 1427 rate[0] = DESC_RATEVHT1SS_MCS8; 1428 rate[1] = DESC_RATEVHT1SS_MCS9; 1429 rate[2] = DESC_RATEVHT2SS_MCS0; 1430 rate[3] = DESC_RATEVHT2SS_MCS1; 1431 for (i = 0; i < 4; ++i) 1432 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1433 *rate_num = 4; 1434 break; 1435 case 0xC48: 1436 case 0xE48: 1437 case 0x1848: 1438 case 0x1A48: 1439 rate[0] = DESC_RATEVHT2SS_MCS2; 1440 rate[1] = DESC_RATEVHT2SS_MCS3; 1441 rate[2] = DESC_RATEVHT2SS_MCS4; 1442 rate[3] = DESC_RATEVHT2SS_MCS5; 1443 for (i = 0; i < 4; ++i) 1444 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1445 *rate_num = 4; 1446 break; 1447 case 0xC4C: 1448 case 0xE4C: 1449 case 0x184C: 1450 case 0x1A4C: 1451 rate[0] = DESC_RATEVHT2SS_MCS6; 1452 rate[1] = DESC_RATEVHT2SS_MCS7; 1453 rate[2] = DESC_RATEVHT2SS_MCS8; 1454 rate[3] = DESC_RATEVHT2SS_MCS9; 1455 for (i = 0; i < 4; ++i) 1456 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1457 *rate_num = 4; 1458 break; 1459 case 0xCD8: 1460 case 0xED8: 1461 case 0x18D8: 1462 case 0x1AD8: 1463 rate[0] = DESC_RATEMCS16; 1464 rate[1] = DESC_RATEMCS17; 1465 rate[2] = DESC_RATEMCS18; 1466 rate[3] = DESC_RATEMCS19; 1467 for (i = 0; i < 4; ++i) 1468 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1469 *rate_num = 4; 1470 break; 1471 case 0xCDC: 1472 case 0xEDC: 1473 case 0x18DC: 1474 case 0x1ADC: 1475 rate[0] = DESC_RATEMCS20; 1476 rate[1] = DESC_RATEMCS21; 1477 rate[2] = DESC_RATEMCS22; 1478 rate[3] = DESC_RATEMCS23; 1479 for (i = 0; i < 4; ++i) 1480 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1481 *rate_num = 4; 1482 break; 1483 case 0xCE0: 1484 case 0xEE0: 1485 case 0x18E0: 1486 case 0x1AE0: 1487 rate[0] = DESC_RATEVHT3SS_MCS0; 1488 rate[1] = DESC_RATEVHT3SS_MCS1; 1489 rate[2] = DESC_RATEVHT3SS_MCS2; 1490 rate[3] = DESC_RATEVHT3SS_MCS3; 1491 for (i = 0; i < 4; ++i) 1492 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1493 *rate_num = 4; 1494 break; 1495 case 0xCE4: 1496 case 0xEE4: 1497 case 0x18E4: 1498 case 0x1AE4: 1499 rate[0] = DESC_RATEVHT3SS_MCS4; 1500 rate[1] = DESC_RATEVHT3SS_MCS5; 1501 rate[2] = DESC_RATEVHT3SS_MCS6; 1502 rate[3] = DESC_RATEVHT3SS_MCS7; 1503 for (i = 0; i < 4; ++i) 1504 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1505 *rate_num = 4; 1506 break; 1507 case 0xCE8: 1508 case 0xEE8: 1509 case 0x18E8: 1510 case 0x1AE8: 1511 rate[0] = DESC_RATEVHT3SS_MCS8; 1512 rate[1] = DESC_RATEVHT3SS_MCS9; 1513 for (i = 0; i < 2; ++i) 1514 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1515 *rate_num = 2; 1516 break; 1517 default: 1518 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr); 1519 break; 1520 } 1521 } 1522 1523 void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum, 1524 u32 regaddr, u32 bitmask, u32 data) 1525 { 1526 struct rtw_dev *rtwdev = adapter; 1527 struct rtw_hal *hal = &rtwdev->hal; 1528 u8 rate_num = 0; 1529 u8 rate; 1530 u8 rates[RTW_RF_PATH_MAX] = {0}; 1531 s8 offset; 1532 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0}; 1533 int i; 1534 1535 phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data, 1536 rates, pwr_by_rate, &rate_num); 1537 1538 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX || 1539 (band != PHY_BAND_2G && band != PHY_BAND_5G) || 1540 rate_num > RTW_RF_PATH_MAX)) 1541 return; 1542 1543 for (i = 0; i < rate_num; i++) { 1544 offset = pwr_by_rate[i]; 1545 rate = rates[i]; 1546 if (band == PHY_BAND_2G) 1547 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; 1548 else if (band == PHY_BAND_5G) 1549 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; 1550 else 1551 continue; 1552 } 1553 } 1554 1555 static 1556 void phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, 1557 u8 rs, u8 size, u8 *rates) 1558 { 1559 u8 rate; 1560 u8 base_idx, rate_idx; 1561 s8 base_2g, base_5g; 1562 1563 if (rs >= RTW_RATE_SECTION_VHT_1S) 1564 base_idx = rates[size - 3]; 1565 else 1566 base_idx = rates[size - 1]; 1567 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx]; 1568 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx]; 1569 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g; 1570 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g; 1571 for (rate = 0; rate < size; rate++) { 1572 rate_idx = rates[rate]; 1573 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g; 1574 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g; 1575 } 1576 } 1577 1578 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) 1579 { 1580 u8 path; 1581 1582 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1583 phy_tx_power_by_rate_config_by_path(hal, path, 1584 RTW_RATE_SECTION_CCK, 1585 rtw_cck_size, rtw_cck_rates); 1586 phy_tx_power_by_rate_config_by_path(hal, path, 1587 RTW_RATE_SECTION_OFDM, 1588 rtw_ofdm_size, rtw_ofdm_rates); 1589 phy_tx_power_by_rate_config_by_path(hal, path, 1590 RTW_RATE_SECTION_HT_1S, 1591 rtw_ht_1s_size, rtw_ht_1s_rates); 1592 phy_tx_power_by_rate_config_by_path(hal, path, 1593 RTW_RATE_SECTION_HT_2S, 1594 rtw_ht_2s_size, rtw_ht_2s_rates); 1595 phy_tx_power_by_rate_config_by_path(hal, path, 1596 RTW_RATE_SECTION_VHT_1S, 1597 rtw_vht_1s_size, rtw_vht_1s_rates); 1598 phy_tx_power_by_rate_config_by_path(hal, path, 1599 RTW_RATE_SECTION_VHT_2S, 1600 rtw_vht_2s_size, rtw_vht_2s_rates); 1601 } 1602 } 1603 1604 static void 1605 phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1606 { 1607 s8 base, orig; 1608 u8 ch; 1609 1610 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) { 1611 base = hal->tx_pwr_by_rate_base_2g[0][rs]; 1612 orig = hal->tx_pwr_limit_2g[regd][bw][rs][ch]; 1613 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base; 1614 } 1615 1616 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) { 1617 base = hal->tx_pwr_by_rate_base_5g[0][rs]; 1618 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base; 1619 } 1620 } 1621 1622 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) 1623 { 1624 u8 regd, bw, rs; 1625 1626 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1627 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1628 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1629 phy_tx_power_limit_config(hal, regd, bw, rs); 1630 } 1631 1632 static s8 get_tx_power_limit(struct rtw_hal *hal, u8 bw, u8 rs, u8 ch, u8 regd) 1633 { 1634 if (regd > RTW_REGD_WW) 1635 return RTW_MAX_POWER_INDEX; 1636 1637 return hal->tx_pwr_limit_2g[regd][bw][rs][ch]; 1638 } 1639 1640 s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1641 enum rtw_bandwidth bw, u8 rf_path, 1642 u8 rate, u8 channel, u8 regd) 1643 { 1644 struct rtw_hal *hal = &rtwdev->hal; 1645 s8 power_limit; 1646 u8 rs; 1647 int ch_idx; 1648 1649 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) 1650 rs = RTW_RATE_SECTION_CCK; 1651 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1652 rs = RTW_RATE_SECTION_OFDM; 1653 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) 1654 rs = RTW_RATE_SECTION_HT_1S; 1655 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) 1656 rs = RTW_RATE_SECTION_HT_2S; 1657 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) 1658 rs = RTW_RATE_SECTION_VHT_1S; 1659 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) 1660 rs = RTW_RATE_SECTION_VHT_2S; 1661 else 1662 goto err; 1663 1664 ch_idx = rtw_channel_to_idx(band, channel); 1665 if (ch_idx < 0) 1666 goto err; 1667 1668 power_limit = get_tx_power_limit(hal, bw, rs, ch_idx, regd); 1669 1670 return power_limit; 1671 1672 err: 1673 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n", 1674 band, bw, rf_path, rate, channel); 1675 return RTW_MAX_POWER_INDEX; 1676 } 1677 1678 void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, 1679 u8 bw, u8 rs, u8 ch, s8 pwr_limit) 1680 { 1681 struct rtw_hal *hal = &rtwdev->hal; 1682 int ch_idx; 1683 1684 pwr_limit = clamp_t(s8, pwr_limit, 1685 -RTW_MAX_POWER_INDEX, RTW_MAX_POWER_INDEX); 1686 ch_idx = rtw_channel_to_idx(band, ch); 1687 1688 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || 1689 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { 1690 WARN(1, 1691 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", 1692 regd, band, bw, rs, ch_idx, pwr_limit); 1693 return; 1694 } 1695 1696 if (band == PHY_BAND_2G) 1697 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit; 1698 else if (band == PHY_BAND_5G) 1699 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit; 1700 } 1701 1702 static 1703 void rtw_hw_tx_power_limit_init(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1704 { 1705 u8 ch; 1706 1707 /* 2.4G channels */ 1708 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) 1709 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX; 1710 1711 /* 5G channels */ 1712 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) 1713 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX; 1714 } 1715 1716 void rtw_hw_init_tx_power(struct rtw_hal *hal) 1717 { 1718 u8 regd, path, rate, rs, bw; 1719 1720 /* init tx power by rate offset */ 1721 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1722 for (rate = 0; rate < DESC_RATE_MAX; rate++) { 1723 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0; 1724 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0; 1725 } 1726 } 1727 1728 /* init tx power limit */ 1729 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1730 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1731 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1732 rtw_hw_tx_power_limit_init(hal, regd, bw, rs); 1733 } 1734