1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/bcd.h> 6 7 #include "main.h" 8 #include "reg.h" 9 #include "fw.h" 10 #include "phy.h" 11 #include "debug.h" 12 13 struct phy_cfg_pair { 14 u32 addr; 15 u32 data; 16 }; 17 18 union phy_table_tile { 19 struct rtw_phy_cond cond; 20 struct phy_cfg_pair cfg; 21 }; 22 23 struct phy_pg_cfg_pair { 24 u32 band; 25 u32 rf_path; 26 u32 tx_num; 27 u32 addr; 28 u32 bitmask; 29 u32 data; 30 }; 31 32 struct txpwr_lmt_cfg_pair { 33 u8 regd; 34 u8 band; 35 u8 bw; 36 u8 rs; 37 u8 ch; 38 s8 txpwr_lmt; 39 }; 40 41 static const u32 db_invert_table[12][8] = { 42 {10, 13, 16, 20, 43 25, 32, 40, 50}, 44 {64, 80, 101, 128, 45 160, 201, 256, 318}, 46 {401, 505, 635, 800, 47 1007, 1268, 1596, 2010}, 48 {316, 398, 501, 631, 49 794, 1000, 1259, 1585}, 50 {1995, 2512, 3162, 3981, 51 5012, 6310, 7943, 10000}, 52 {12589, 15849, 19953, 25119, 53 31623, 39811, 50119, 63098}, 54 {79433, 100000, 125893, 158489, 55 199526, 251189, 316228, 398107}, 56 {501187, 630957, 794328, 1000000, 57 1258925, 1584893, 1995262, 2511886}, 58 {3162278, 3981072, 5011872, 6309573, 59 7943282, 1000000, 12589254, 15848932}, 60 {19952623, 25118864, 31622777, 39810717, 61 50118723, 63095734, 79432823, 100000000}, 62 {125892541, 158489319, 199526232, 251188643, 63 316227766, 398107171, 501187234, 630957345}, 64 {794328235, 1000000000, 1258925412, 1584893192, 65 1995262315, 2511886432U, 3162277660U, 3981071706U} 66 }; 67 68 enum rtw_phy_band_type { 69 PHY_BAND_2G = 0, 70 PHY_BAND_5G = 1, 71 }; 72 73 void rtw_phy_init(struct rtw_dev *rtwdev) 74 { 75 struct rtw_chip_info *chip = rtwdev->chip; 76 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 77 u32 addr, mask; 78 79 dm_info->fa_history[3] = 0; 80 dm_info->fa_history[2] = 0; 81 dm_info->fa_history[1] = 0; 82 dm_info->fa_history[0] = 0; 83 dm_info->igi_bitmap = 0; 84 dm_info->igi_history[3] = 0; 85 dm_info->igi_history[2] = 0; 86 dm_info->igi_history[1] = 0; 87 88 addr = chip->dig[0].addr; 89 mask = chip->dig[0].mask; 90 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); 91 } 92 93 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) 94 { 95 struct rtw_chip_info *chip = rtwdev->chip; 96 struct rtw_hal *hal = &rtwdev->hal; 97 u32 addr, mask; 98 u8 path; 99 100 for (path = 0; path < hal->rf_path_num; path++) { 101 addr = chip->dig[path].addr; 102 mask = chip->dig[path].mask; 103 rtw_write32_mask(rtwdev, addr, mask, igi); 104 } 105 } 106 107 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) 108 { 109 struct rtw_chip_info *chip = rtwdev->chip; 110 111 chip->ops->false_alarm_statistics(rtwdev); 112 } 113 114 #define RA_FLOOR_TABLE_SIZE 7 115 #define RA_FLOOR_UP_GAP 3 116 117 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) 118 { 119 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; 120 u8 new_level = 0; 121 int i; 122 123 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) 124 if (i >= old_level) 125 table[i] += RA_FLOOR_UP_GAP; 126 127 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 128 if (rssi < table[i]) { 129 new_level = i; 130 break; 131 } 132 } 133 134 return new_level; 135 } 136 137 struct rtw_phy_stat_iter_data { 138 struct rtw_dev *rtwdev; 139 u8 min_rssi; 140 }; 141 142 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) 143 { 144 struct rtw_phy_stat_iter_data *iter_data = data; 145 struct rtw_dev *rtwdev = iter_data->rtwdev; 146 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 147 u8 rssi, rssi_level; 148 149 rssi = ewma_rssi_read(&si->avg_rssi); 150 rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); 151 152 rtw_fw_send_rssi_info(rtwdev, si); 153 154 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); 155 } 156 157 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) 158 { 159 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 160 struct rtw_phy_stat_iter_data data = {}; 161 162 data.rtwdev = rtwdev; 163 data.min_rssi = U8_MAX; 164 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); 165 166 dm_info->pre_min_rssi = dm_info->min_rssi; 167 dm_info->min_rssi = data.min_rssi; 168 } 169 170 static void rtw_phy_statistics(struct rtw_dev *rtwdev) 171 { 172 rtw_phy_stat_rssi(rtwdev); 173 rtw_phy_stat_false_alarm(rtwdev); 174 } 175 176 #define DIG_PERF_FA_TH_LOW 250 177 #define DIG_PERF_FA_TH_HIGH 500 178 #define DIG_PERF_FA_TH_EXTRA_HIGH 750 179 #define DIG_PERF_MAX 0x5a 180 #define DIG_PERF_MID 0x40 181 #define DIG_CVRG_FA_TH_LOW 2000 182 #define DIG_CVRG_FA_TH_HIGH 4000 183 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 184 #define DIG_CVRG_MAX 0x2a 185 #define DIG_CVRG_MID 0x26 186 #define DIG_CVRG_MIN 0x1c 187 #define DIG_RSSI_GAIN_OFFSET 15 188 189 static bool 190 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) 191 { 192 u16 fa_lo = DIG_PERF_FA_TH_LOW; 193 u16 fa_hi = DIG_PERF_FA_TH_HIGH; 194 u16 *fa_history; 195 u8 *igi_history; 196 u8 damping_rssi; 197 u8 min_rssi; 198 u8 diff; 199 u8 igi_bitmap; 200 bool damping = false; 201 202 min_rssi = dm_info->min_rssi; 203 if (dm_info->damping) { 204 damping_rssi = dm_info->damping_rssi; 205 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : 206 damping_rssi - min_rssi; 207 if (diff > 3 || dm_info->damping_cnt++ > 20) { 208 dm_info->damping = false; 209 return false; 210 } 211 212 return true; 213 } 214 215 igi_history = dm_info->igi_history; 216 fa_history = dm_info->fa_history; 217 igi_bitmap = dm_info->igi_bitmap & 0xf; 218 switch (igi_bitmap) { 219 case 5: 220 /* down -> up -> down -> up */ 221 if (igi_history[0] > igi_history[1] && 222 igi_history[2] > igi_history[3] && 223 igi_history[0] - igi_history[1] >= 2 && 224 igi_history[2] - igi_history[3] >= 2 && 225 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 226 fa_history[2] > fa_hi && fa_history[3] < fa_lo) 227 damping = true; 228 break; 229 case 9: 230 /* up -> down -> down -> up */ 231 if (igi_history[0] > igi_history[1] && 232 igi_history[3] > igi_history[2] && 233 igi_history[0] - igi_history[1] >= 4 && 234 igi_history[3] - igi_history[2] >= 2 && 235 fa_history[0] > fa_hi && fa_history[1] < fa_lo && 236 fa_history[2] < fa_lo && fa_history[3] > fa_hi) 237 damping = true; 238 break; 239 default: 240 return false; 241 } 242 243 if (damping) { 244 dm_info->damping = true; 245 dm_info->damping_cnt = 0; 246 dm_info->damping_rssi = min_rssi; 247 } 248 249 return damping; 250 } 251 252 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, 253 u8 *upper, u8 *lower, bool linked) 254 { 255 u8 dig_max, dig_min, dig_mid; 256 u8 min_rssi; 257 258 if (linked) { 259 dig_max = DIG_PERF_MAX; 260 dig_mid = DIG_PERF_MID; 261 /* 22B=0x1c, 22C=0x20 */ 262 dig_min = 0x1c; 263 min_rssi = max_t(u8, dm_info->min_rssi, dig_min); 264 } else { 265 dig_max = DIG_CVRG_MAX; 266 dig_mid = DIG_CVRG_MID; 267 dig_min = DIG_CVRG_MIN; 268 min_rssi = dig_min; 269 } 270 271 /* DIG MAX should be bounded by minimum RSSI with offset +15 */ 272 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); 273 274 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); 275 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); 276 } 277 278 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, 279 u16 *fa_th, u8 *step, bool linked) 280 { 281 u8 min_rssi, pre_min_rssi; 282 283 min_rssi = dm_info->min_rssi; 284 pre_min_rssi = dm_info->pre_min_rssi; 285 step[0] = 4; 286 step[1] = 3; 287 step[2] = 2; 288 289 if (linked) { 290 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; 291 fa_th[1] = DIG_PERF_FA_TH_HIGH; 292 fa_th[2] = DIG_PERF_FA_TH_LOW; 293 if (pre_min_rssi > min_rssi) { 294 step[0] = 6; 295 step[1] = 4; 296 step[2] = 2; 297 } 298 } else { 299 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; 300 fa_th[1] = DIG_CVRG_FA_TH_HIGH; 301 fa_th[2] = DIG_CVRG_FA_TH_LOW; 302 } 303 } 304 305 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) 306 { 307 u8 *igi_history; 308 u16 *fa_history; 309 u8 igi_bitmap; 310 bool up; 311 312 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; 313 igi_history = dm_info->igi_history; 314 fa_history = dm_info->fa_history; 315 316 up = igi > igi_history[0]; 317 igi_bitmap |= up; 318 319 igi_history[3] = igi_history[2]; 320 igi_history[2] = igi_history[1]; 321 igi_history[1] = igi_history[0]; 322 igi_history[0] = igi; 323 324 fa_history[3] = fa_history[2]; 325 fa_history[2] = fa_history[1]; 326 fa_history[1] = fa_history[0]; 327 fa_history[0] = fa; 328 329 dm_info->igi_bitmap = igi_bitmap; 330 } 331 332 static void rtw_phy_dig(struct rtw_dev *rtwdev) 333 { 334 struct rtw_dm_info *dm_info = &rtwdev->dm_info; 335 u8 upper_bound, lower_bound; 336 u8 pre_igi, cur_igi; 337 u16 fa_th[3], fa_cnt; 338 u8 level; 339 u8 step[3]; 340 bool linked; 341 342 if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE)) 343 return; 344 345 if (rtw_phy_dig_check_damping(dm_info)) 346 return; 347 348 linked = !!rtwdev->sta_cnt; 349 350 fa_cnt = dm_info->total_fa_cnt; 351 pre_igi = dm_info->igi_history[0]; 352 353 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); 354 355 /* test the false alarm count from the highest threshold level first, 356 * and increase it by corresponding step size 357 * 358 * note that the step size is offset by -2, compensate it afterall 359 */ 360 cur_igi = pre_igi; 361 for (level = 0; level < 3; level++) { 362 if (fa_cnt > fa_th[level]) { 363 cur_igi += step[level]; 364 break; 365 } 366 } 367 cur_igi -= 2; 368 369 /* calculate the upper/lower bound by the minimum rssi we have among 370 * the peers connected with us, meanwhile make sure the igi value does 371 * not beyond the hardware limitation 372 */ 373 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); 374 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); 375 376 /* record current igi value and false alarm statistics for further 377 * damping checks, and record the trend of igi values 378 */ 379 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); 380 381 if (cur_igi != pre_igi) 382 rtw_phy_dig_write(rtwdev, cur_igi); 383 } 384 385 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) 386 { 387 struct rtw_dev *rtwdev = data; 388 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 389 390 rtw_update_sta_info(rtwdev, si); 391 } 392 393 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) 394 { 395 if (rtwdev->watch_dog_cnt & 0x3) 396 return; 397 398 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); 399 } 400 401 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) 402 { 403 /* for further calculation */ 404 rtw_phy_statistics(rtwdev); 405 rtw_phy_dig(rtwdev); 406 rtw_phy_ra_info_update(rtwdev); 407 } 408 409 #define FRAC_BITS 3 410 411 static u8 rtw_phy_power_2_db(s8 power) 412 { 413 if (power <= -100 || power >= 20) 414 return 0; 415 else if (power >= 0) 416 return 100; 417 else 418 return 100 + power; 419 } 420 421 static u64 rtw_phy_db_2_linear(u8 power_db) 422 { 423 u8 i, j; 424 u64 linear; 425 426 /* 1dB ~ 96dB */ 427 i = (power_db - 1) >> 3; 428 j = (power_db - 1) - (i << 3); 429 430 linear = db_invert_table[i][j]; 431 linear = i > 2 ? linear << FRAC_BITS : linear; 432 433 return linear; 434 } 435 436 static u8 rtw_phy_linear_2_db(u64 linear) 437 { 438 u8 i; 439 u8 j; 440 u32 dB; 441 442 if (linear >= db_invert_table[11][7]) 443 return 96; /* maximum 96 dB */ 444 445 for (i = 0; i < 12; i++) { 446 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) 447 break; 448 else if (i > 2 && linear <= db_invert_table[i][7]) 449 break; 450 } 451 452 for (j = 0; j < 8; j++) { 453 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) 454 break; 455 else if (i > 2 && linear <= db_invert_table[i][j]) 456 break; 457 } 458 459 if (j == 0 && i == 0) 460 goto end; 461 462 if (j == 0) { 463 if (i != 3) { 464 if (db_invert_table[i][0] - linear > 465 linear - db_invert_table[i - 1][7]) { 466 i = i - 1; 467 j = 7; 468 } 469 } else { 470 if (db_invert_table[3][0] - linear > 471 linear - db_invert_table[2][7]) { 472 i = 2; 473 j = 7; 474 } 475 } 476 } else { 477 if (db_invert_table[i][j] - linear > 478 linear - db_invert_table[i][j - 1]) { 479 j = j - 1; 480 } 481 } 482 end: 483 dB = (i << 3) + j + 1; 484 485 return dB; 486 } 487 488 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) 489 { 490 s8 power; 491 u8 power_db; 492 u64 linear; 493 u64 sum = 0; 494 u8 path; 495 496 for (path = 0; path < path_num; path++) { 497 power = rf_power[path]; 498 power_db = rtw_phy_power_2_db(power); 499 linear = rtw_phy_db_2_linear(power_db); 500 sum += linear; 501 } 502 503 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; 504 switch (path_num) { 505 case 2: 506 sum >>= 1; 507 break; 508 case 3: 509 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; 510 break; 511 case 4: 512 sum >>= 2; 513 break; 514 default: 515 break; 516 } 517 518 return rtw_phy_linear_2_db(sum); 519 } 520 521 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 522 u32 addr, u32 mask) 523 { 524 struct rtw_hal *hal = &rtwdev->hal; 525 struct rtw_chip_info *chip = rtwdev->chip; 526 const u32 *base_addr = chip->rf_base_addr; 527 u32 val, direct_addr; 528 529 if (rf_path >= hal->rf_path_num) { 530 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 531 return INV_RF_DATA; 532 } 533 534 addr &= 0xff; 535 direct_addr = base_addr[rf_path] + (addr << 2); 536 mask &= RFREG_MASK; 537 538 val = rtw_read32_mask(rtwdev, direct_addr, mask); 539 540 return val; 541 } 542 543 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 544 u32 addr, u32 mask, u32 data) 545 { 546 struct rtw_hal *hal = &rtwdev->hal; 547 struct rtw_chip_info *chip = rtwdev->chip; 548 u32 *sipi_addr = chip->rf_sipi_addr; 549 u32 data_and_addr; 550 u32 old_data = 0; 551 u32 shift; 552 553 if (rf_path >= hal->rf_path_num) { 554 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 555 return false; 556 } 557 558 addr &= 0xff; 559 mask &= RFREG_MASK; 560 561 if (mask != RFREG_MASK) { 562 old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK); 563 564 if (old_data == INV_RF_DATA) { 565 rtw_err(rtwdev, "Write fail, rf is disabled\n"); 566 return false; 567 } 568 569 shift = __ffs(mask); 570 data = ((old_data) & (~mask)) | (data << shift); 571 } 572 573 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; 574 575 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); 576 577 udelay(13); 578 579 return true; 580 } 581 582 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 583 u32 addr, u32 mask, u32 data) 584 { 585 struct rtw_hal *hal = &rtwdev->hal; 586 struct rtw_chip_info *chip = rtwdev->chip; 587 const u32 *base_addr = chip->rf_base_addr; 588 u32 direct_addr; 589 590 if (rf_path >= hal->rf_path_num) { 591 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 592 return false; 593 } 594 595 addr &= 0xff; 596 direct_addr = base_addr[rf_path] + (addr << 2); 597 mask &= RFREG_MASK; 598 599 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI); 600 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI); 601 rtw_write32_mask(rtwdev, direct_addr, mask, data); 602 603 udelay(1); 604 605 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI); 606 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI); 607 608 return true; 609 } 610 611 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 612 u32 addr, u32 mask, u32 data) 613 { 614 if (addr != 0x00) 615 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); 616 617 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); 618 } 619 620 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) 621 { 622 struct rtw_hal *hal = &rtwdev->hal; 623 struct rtw_efuse *efuse = &rtwdev->efuse; 624 struct rtw_phy_cond cond = {0}; 625 626 cond.cut = hal->cut_version ? hal->cut_version : 15; 627 cond.pkg = pkg ? pkg : 15; 628 cond.plat = 0x04; 629 cond.rfe = efuse->rfe_option; 630 631 switch (rtw_hci_type(rtwdev)) { 632 case RTW_HCI_TYPE_USB: 633 cond.intf = INTF_USB; 634 break; 635 case RTW_HCI_TYPE_SDIO: 636 cond.intf = INTF_SDIO; 637 break; 638 case RTW_HCI_TYPE_PCIE: 639 default: 640 cond.intf = INTF_PCIE; 641 break; 642 } 643 644 hal->phy_cond = cond; 645 646 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); 647 } 648 649 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) 650 { 651 struct rtw_hal *hal = &rtwdev->hal; 652 struct rtw_phy_cond drv_cond = hal->phy_cond; 653 654 if (cond.cut && cond.cut != drv_cond.cut) 655 return false; 656 657 if (cond.pkg && cond.pkg != drv_cond.pkg) 658 return false; 659 660 if (cond.intf && cond.intf != drv_cond.intf) 661 return false; 662 663 if (cond.rfe != drv_cond.rfe) 664 return false; 665 666 return true; 667 } 668 669 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 670 { 671 const union phy_table_tile *p = tbl->data; 672 const union phy_table_tile *end = p + tbl->size / 2; 673 struct rtw_phy_cond pos_cond = {0}; 674 bool is_matched = true, is_skipped = false; 675 676 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); 677 678 for (; p < end; p++) { 679 if (p->cond.pos) { 680 switch (p->cond.branch) { 681 case BRANCH_ENDIF: 682 is_matched = true; 683 is_skipped = false; 684 break; 685 case BRANCH_ELSE: 686 is_matched = is_skipped ? false : true; 687 break; 688 case BRANCH_IF: 689 case BRANCH_ELIF: 690 default: 691 pos_cond = p->cond; 692 break; 693 } 694 } else if (p->cond.neg) { 695 if (!is_skipped) { 696 if (check_positive(rtwdev, pos_cond)) { 697 is_matched = true; 698 is_skipped = true; 699 } else { 700 is_matched = false; 701 is_skipped = false; 702 } 703 } else { 704 is_matched = false; 705 } 706 } else if (is_matched) { 707 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); 708 } 709 } 710 } 711 712 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) 713 { 714 const struct phy_pg_cfg_pair *p = tbl->data; 715 const struct phy_pg_cfg_pair *end = p + tbl->size / 6; 716 717 BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6); 718 719 for (; p < end; p++) { 720 if (p->addr == 0xfe || p->addr == 0xffe) { 721 msleep(50); 722 continue; 723 } 724 phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path, 725 p->tx_num, p->addr, p->bitmask, 726 p->data); 727 } 728 } 729 730 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, 731 const struct rtw_table *tbl) 732 { 733 const struct txpwr_lmt_cfg_pair *p = tbl->data; 734 const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6; 735 736 BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6); 737 738 for (; p < end; p++) { 739 phy_set_tx_power_limit(rtwdev, p->regd, p->band, 740 p->bw, p->rs, 741 p->ch, p->txpwr_lmt); 742 } 743 } 744 745 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 746 u32 addr, u32 data) 747 { 748 rtw_write8(rtwdev, addr, data); 749 } 750 751 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 752 u32 addr, u32 data) 753 { 754 rtw_write32(rtwdev, addr, data); 755 } 756 757 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 758 u32 addr, u32 data) 759 { 760 if (addr == 0xfe) 761 msleep(50); 762 else if (addr == 0xfd) 763 mdelay(5); 764 else if (addr == 0xfc) 765 mdelay(1); 766 else if (addr == 0xfb) 767 usleep_range(50, 60); 768 else if (addr == 0xfa) 769 udelay(5); 770 else if (addr == 0xf9) 771 udelay(1); 772 else 773 rtw_write32(rtwdev, addr, data); 774 } 775 776 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 777 u32 addr, u32 data) 778 { 779 if (addr == 0xffe) { 780 msleep(50); 781 } else if (addr == 0xfe) { 782 usleep_range(100, 110); 783 } else { 784 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); 785 udelay(1); 786 } 787 } 788 789 static void rtw_load_rfk_table(struct rtw_dev *rtwdev) 790 { 791 struct rtw_chip_info *chip = rtwdev->chip; 792 793 if (!chip->rfk_init_tbl) 794 return; 795 796 rtw_load_table(rtwdev, chip->rfk_init_tbl); 797 } 798 799 void rtw_phy_load_tables(struct rtw_dev *rtwdev) 800 { 801 struct rtw_chip_info *chip = rtwdev->chip; 802 u8 rf_path; 803 804 rtw_load_table(rtwdev, chip->mac_tbl); 805 rtw_load_table(rtwdev, chip->bb_tbl); 806 rtw_load_table(rtwdev, chip->agc_tbl); 807 rtw_load_rfk_table(rtwdev); 808 809 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { 810 const struct rtw_table *tbl; 811 812 tbl = chip->rf_tbl[rf_path]; 813 rtw_load_table(rtwdev, tbl); 814 } 815 } 816 817 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) 818 819 #define RTW_MAX_POWER_INDEX 0x3F 820 821 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; 822 u8 rtw_ofdm_rates[] = { 823 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, 824 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, 825 DESC_RATE48M, DESC_RATE54M 826 }; 827 u8 rtw_ht_1s_rates[] = { 828 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, 829 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, 830 DESC_RATEMCS6, DESC_RATEMCS7 831 }; 832 u8 rtw_ht_2s_rates[] = { 833 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, 834 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, 835 DESC_RATEMCS14, DESC_RATEMCS15 836 }; 837 u8 rtw_vht_1s_rates[] = { 838 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, 839 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, 840 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, 841 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, 842 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 843 }; 844 u8 rtw_vht_2s_rates[] = { 845 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, 846 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, 847 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, 848 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, 849 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 850 }; 851 u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); 852 u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); 853 u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); 854 u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); 855 u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); 856 u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); 857 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { 858 rtw_cck_rates, rtw_ofdm_rates, 859 rtw_ht_1s_rates, rtw_ht_2s_rates, 860 rtw_vht_1s_rates, rtw_vht_2s_rates 861 }; 862 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { 863 ARRAY_SIZE(rtw_cck_rates), 864 ARRAY_SIZE(rtw_ofdm_rates), 865 ARRAY_SIZE(rtw_ht_1s_rates), 866 ARRAY_SIZE(rtw_ht_2s_rates), 867 ARRAY_SIZE(rtw_vht_1s_rates), 868 ARRAY_SIZE(rtw_vht_2s_rates) 869 }; 870 871 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 872 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ 873 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ 874 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ 875 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ 876 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ 877 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ 878 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */ 879 880 static int rtw_channel_to_idx(u8 band, u8 channel) 881 { 882 int ch_idx; 883 u8 n_channel; 884 885 if (band == PHY_BAND_2G) { 886 ch_idx = channel - 1; 887 n_channel = RTW_MAX_CHANNEL_NUM_2G; 888 } else if (band == PHY_BAND_5G) { 889 n_channel = RTW_MAX_CHANNEL_NUM_5G; 890 for (ch_idx = 0; ch_idx < n_channel; ch_idx++) 891 if (rtw_channel_idx_5g[ch_idx] == channel) 892 break; 893 } else { 894 return -1; 895 } 896 897 if (ch_idx >= n_channel) 898 return -1; 899 900 return ch_idx; 901 } 902 903 static u8 rtw_get_channel_group(u8 channel) 904 { 905 switch (channel) { 906 default: 907 WARN_ON(1); 908 /* fall through */ 909 case 1: 910 case 2: 911 case 36: 912 case 38: 913 case 40: 914 case 42: 915 return 0; 916 case 3: 917 case 4: 918 case 5: 919 case 44: 920 case 46: 921 case 48: 922 case 50: 923 return 1; 924 case 6: 925 case 7: 926 case 8: 927 case 52: 928 case 54: 929 case 56: 930 case 58: 931 return 2; 932 case 9: 933 case 10: 934 case 11: 935 case 60: 936 case 62: 937 case 64: 938 return 3; 939 case 12: 940 case 13: 941 case 100: 942 case 102: 943 case 104: 944 case 106: 945 return 4; 946 case 14: 947 case 108: 948 case 110: 949 case 112: 950 case 114: 951 return 5; 952 case 116: 953 case 118: 954 case 120: 955 case 122: 956 return 6; 957 case 124: 958 case 126: 959 case 128: 960 case 130: 961 return 7; 962 case 132: 963 case 134: 964 case 136: 965 case 138: 966 return 8; 967 case 140: 968 case 142: 969 case 144: 970 return 9; 971 case 149: 972 case 151: 973 case 153: 974 case 155: 975 return 10; 976 case 157: 977 case 159: 978 case 161: 979 return 11; 980 case 165: 981 case 167: 982 case 169: 983 case 171: 984 return 12; 985 case 173: 986 case 175: 987 case 177: 988 return 13; 989 } 990 } 991 992 static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, 993 struct rtw_2g_txpwr_idx *pwr_idx_2g, 994 enum rtw_bandwidth bandwidth, 995 u8 rate, u8 group) 996 { 997 struct rtw_chip_info *chip = rtwdev->chip; 998 u8 tx_power; 999 bool mcs_rate; 1000 bool above_2ss; 1001 u8 factor = chip->txgi_factor; 1002 1003 if (rate <= DESC_RATE11M) 1004 tx_power = pwr_idx_2g->cck_base[group]; 1005 else 1006 tx_power = pwr_idx_2g->bw40_base[group]; 1007 1008 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1009 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; 1010 1011 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1012 (rate >= DESC_RATEVHT1SS_MCS0 && 1013 rate <= DESC_RATEVHT2SS_MCS9); 1014 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1015 (rate >= DESC_RATEVHT2SS_MCS0); 1016 1017 if (!mcs_rate) 1018 return tx_power; 1019 1020 switch (bandwidth) { 1021 default: 1022 WARN_ON(1); 1023 /* fall through */ 1024 case RTW_CHANNEL_WIDTH_20: 1025 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; 1026 if (above_2ss) 1027 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; 1028 break; 1029 case RTW_CHANNEL_WIDTH_40: 1030 /* bw40 is the base power */ 1031 if (above_2ss) 1032 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; 1033 break; 1034 } 1035 1036 return tx_power; 1037 } 1038 1039 static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, 1040 struct rtw_5g_txpwr_idx *pwr_idx_5g, 1041 enum rtw_bandwidth bandwidth, 1042 u8 rate, u8 group) 1043 { 1044 struct rtw_chip_info *chip = rtwdev->chip; 1045 u8 tx_power; 1046 u8 upper, lower; 1047 bool mcs_rate; 1048 bool above_2ss; 1049 u8 factor = chip->txgi_factor; 1050 1051 tx_power = pwr_idx_5g->bw40_base[group]; 1052 1053 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || 1054 (rate >= DESC_RATEVHT1SS_MCS0 && 1055 rate <= DESC_RATEVHT2SS_MCS9); 1056 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || 1057 (rate >= DESC_RATEVHT2SS_MCS0); 1058 1059 if (!mcs_rate) { 1060 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; 1061 return tx_power; 1062 } 1063 1064 switch (bandwidth) { 1065 default: 1066 WARN_ON(1); 1067 /* fall through */ 1068 case RTW_CHANNEL_WIDTH_20: 1069 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; 1070 if (above_2ss) 1071 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; 1072 break; 1073 case RTW_CHANNEL_WIDTH_40: 1074 /* bw40 is the base power */ 1075 if (above_2ss) 1076 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; 1077 break; 1078 case RTW_CHANNEL_WIDTH_80: 1079 /* the base idx of bw80 is the average of bw40+/bw40- */ 1080 lower = pwr_idx_5g->bw40_base[group]; 1081 upper = pwr_idx_5g->bw40_base[group + 1]; 1082 1083 tx_power = (lower + upper) / 2; 1084 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; 1085 if (above_2ss) 1086 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; 1087 break; 1088 } 1089 1090 return tx_power; 1091 } 1092 1093 /* set tx power level by path for each rates, note that the order of the rates 1094 * are *very* important, bacause 8822B/8821C combines every four bytes of tx 1095 * power index into a four-byte power index register, and calls set_tx_agc to 1096 * write these values into hardware 1097 */ 1098 static 1099 void phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, u8 ch, u8 path) 1100 { 1101 struct rtw_hal *hal = &rtwdev->hal; 1102 u8 rs; 1103 1104 /* do not need cck rates if we are not in 2.4G */ 1105 if (hal->current_band_type == RTW_BAND_2G) 1106 rs = RTW_RATE_SECTION_CCK; 1107 else 1108 rs = RTW_RATE_SECTION_OFDM; 1109 1110 for (; rs < RTW_RATE_SECTION_MAX; rs++) 1111 phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); 1112 } 1113 1114 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) 1115 { 1116 struct rtw_chip_info *chip = rtwdev->chip; 1117 struct rtw_hal *hal = &rtwdev->hal; 1118 u8 path; 1119 1120 mutex_lock(&hal->tx_power_mutex); 1121 1122 for (path = 0; path < hal->rf_path_num; path++) 1123 phy_set_tx_power_level_by_path(rtwdev, channel, path); 1124 1125 chip->ops->set_tx_power_index(rtwdev); 1126 mutex_unlock(&hal->tx_power_mutex); 1127 } 1128 1129 s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1130 enum rtw_bandwidth bandwidth, u8 rf_path, 1131 u8 rate, u8 channel, u8 regd); 1132 1133 static 1134 u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate, 1135 enum rtw_bandwidth bandwidth, u8 channel, u8 regd) 1136 { 1137 struct rtw_dev *rtwdev = adapter; 1138 struct rtw_hal *hal = &rtwdev->hal; 1139 struct rtw_txpwr_idx *pwr_idx; 1140 u8 tx_power; 1141 u8 group; 1142 u8 band; 1143 s8 offset, limit; 1144 1145 pwr_idx = &rtwdev->efuse.txpwr_idx_table[rf_path]; 1146 group = rtw_get_channel_group(channel); 1147 1148 /* base power index for 2.4G/5G */ 1149 if (channel <= 14) { 1150 band = PHY_BAND_2G; 1151 tx_power = phy_get_2g_tx_power_index(rtwdev, 1152 &pwr_idx->pwr_idx_2g, 1153 bandwidth, rate, group); 1154 offset = hal->tx_pwr_by_rate_offset_2g[rf_path][rate]; 1155 } else { 1156 band = PHY_BAND_5G; 1157 tx_power = phy_get_5g_tx_power_index(rtwdev, 1158 &pwr_idx->pwr_idx_5g, 1159 bandwidth, rate, group); 1160 offset = hal->tx_pwr_by_rate_offset_5g[rf_path][rate]; 1161 } 1162 1163 limit = phy_get_tx_power_limit(rtwdev, band, bandwidth, rf_path, 1164 rate, channel, regd); 1165 1166 if (offset > limit) 1167 offset = limit; 1168 1169 tx_power += offset; 1170 1171 if (tx_power > rtwdev->chip->max_power_index) 1172 tx_power = rtwdev->chip->max_power_index; 1173 1174 return tx_power; 1175 } 1176 1177 void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs) 1178 { 1179 struct rtw_dev *rtwdev = adapter; 1180 struct rtw_hal *hal = &rtwdev->hal; 1181 u8 regd = rtwdev->regd.txpwr_regd; 1182 u8 *rates; 1183 u8 size; 1184 u8 rate; 1185 u8 pwr_idx; 1186 u8 bw; 1187 int i; 1188 1189 if (rs >= RTW_RATE_SECTION_MAX) 1190 return; 1191 1192 rates = rtw_rate_section[rs]; 1193 size = rtw_rate_size[rs]; 1194 bw = hal->current_band_width; 1195 for (i = 0; i < size; i++) { 1196 rate = rates[i]; 1197 pwr_idx = phy_get_tx_power_index(adapter, path, rate, bw, ch, 1198 regd); 1199 hal->tx_pwr_tbl[path][rate] = pwr_idx; 1200 } 1201 } 1202 1203 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) 1204 { 1205 if (rtwdev->chip->is_pwr_by_rate_dec) 1206 return bcd_to_dec_pwr_by_rate(hex, i); 1207 else 1208 return (hex >> (i * 8)) & 0xFF; 1209 } 1210 1211 static void phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, 1212 u32 addr, u32 mask, 1213 u32 val, u8 *rate, 1214 u8 *pwr_by_rate, u8 *rate_num) 1215 { 1216 int i; 1217 1218 switch (addr) { 1219 case 0xE00: 1220 case 0x830: 1221 rate[0] = DESC_RATE6M; 1222 rate[1] = DESC_RATE9M; 1223 rate[2] = DESC_RATE12M; 1224 rate[3] = DESC_RATE18M; 1225 for (i = 0; i < 4; ++i) 1226 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1227 *rate_num = 4; 1228 break; 1229 case 0xE04: 1230 case 0x834: 1231 rate[0] = DESC_RATE24M; 1232 rate[1] = DESC_RATE36M; 1233 rate[2] = DESC_RATE48M; 1234 rate[3] = DESC_RATE54M; 1235 for (i = 0; i < 4; ++i) 1236 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1237 *rate_num = 4; 1238 break; 1239 case 0xE08: 1240 rate[0] = DESC_RATE1M; 1241 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); 1242 *rate_num = 1; 1243 break; 1244 case 0x86C: 1245 if (mask == 0xffffff00) { 1246 rate[0] = DESC_RATE2M; 1247 rate[1] = DESC_RATE5_5M; 1248 rate[2] = DESC_RATE11M; 1249 for (i = 1; i < 4; ++i) 1250 pwr_by_rate[i - 1] = 1251 tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1252 *rate_num = 3; 1253 } else if (mask == 0x000000ff) { 1254 rate[0] = DESC_RATE11M; 1255 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); 1256 *rate_num = 1; 1257 } 1258 break; 1259 case 0xE10: 1260 case 0x83C: 1261 rate[0] = DESC_RATEMCS0; 1262 rate[1] = DESC_RATEMCS1; 1263 rate[2] = DESC_RATEMCS2; 1264 rate[3] = DESC_RATEMCS3; 1265 for (i = 0; i < 4; ++i) 1266 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1267 *rate_num = 4; 1268 break; 1269 case 0xE14: 1270 case 0x848: 1271 rate[0] = DESC_RATEMCS4; 1272 rate[1] = DESC_RATEMCS5; 1273 rate[2] = DESC_RATEMCS6; 1274 rate[3] = DESC_RATEMCS7; 1275 for (i = 0; i < 4; ++i) 1276 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1277 *rate_num = 4; 1278 break; 1279 case 0xE18: 1280 case 0x84C: 1281 rate[0] = DESC_RATEMCS8; 1282 rate[1] = DESC_RATEMCS9; 1283 rate[2] = DESC_RATEMCS10; 1284 rate[3] = DESC_RATEMCS11; 1285 for (i = 0; i < 4; ++i) 1286 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1287 *rate_num = 4; 1288 break; 1289 case 0xE1C: 1290 case 0x868: 1291 rate[0] = DESC_RATEMCS12; 1292 rate[1] = DESC_RATEMCS13; 1293 rate[2] = DESC_RATEMCS14; 1294 rate[3] = DESC_RATEMCS15; 1295 for (i = 0; i < 4; ++i) 1296 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1297 *rate_num = 4; 1298 1299 break; 1300 case 0x838: 1301 rate[0] = DESC_RATE1M; 1302 rate[1] = DESC_RATE2M; 1303 rate[2] = DESC_RATE5_5M; 1304 for (i = 1; i < 4; ++i) 1305 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev, 1306 val, i); 1307 *rate_num = 3; 1308 break; 1309 case 0xC20: 1310 case 0xE20: 1311 case 0x1820: 1312 case 0x1A20: 1313 rate[0] = DESC_RATE1M; 1314 rate[1] = DESC_RATE2M; 1315 rate[2] = DESC_RATE5_5M; 1316 rate[3] = DESC_RATE11M; 1317 for (i = 0; i < 4; ++i) 1318 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1319 *rate_num = 4; 1320 break; 1321 case 0xC24: 1322 case 0xE24: 1323 case 0x1824: 1324 case 0x1A24: 1325 rate[0] = DESC_RATE6M; 1326 rate[1] = DESC_RATE9M; 1327 rate[2] = DESC_RATE12M; 1328 rate[3] = DESC_RATE18M; 1329 for (i = 0; i < 4; ++i) 1330 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1331 *rate_num = 4; 1332 break; 1333 case 0xC28: 1334 case 0xE28: 1335 case 0x1828: 1336 case 0x1A28: 1337 rate[0] = DESC_RATE24M; 1338 rate[1] = DESC_RATE36M; 1339 rate[2] = DESC_RATE48M; 1340 rate[3] = DESC_RATE54M; 1341 for (i = 0; i < 4; ++i) 1342 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1343 *rate_num = 4; 1344 break; 1345 case 0xC2C: 1346 case 0xE2C: 1347 case 0x182C: 1348 case 0x1A2C: 1349 rate[0] = DESC_RATEMCS0; 1350 rate[1] = DESC_RATEMCS1; 1351 rate[2] = DESC_RATEMCS2; 1352 rate[3] = DESC_RATEMCS3; 1353 for (i = 0; i < 4; ++i) 1354 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1355 *rate_num = 4; 1356 break; 1357 case 0xC30: 1358 case 0xE30: 1359 case 0x1830: 1360 case 0x1A30: 1361 rate[0] = DESC_RATEMCS4; 1362 rate[1] = DESC_RATEMCS5; 1363 rate[2] = DESC_RATEMCS6; 1364 rate[3] = DESC_RATEMCS7; 1365 for (i = 0; i < 4; ++i) 1366 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1367 *rate_num = 4; 1368 break; 1369 case 0xC34: 1370 case 0xE34: 1371 case 0x1834: 1372 case 0x1A34: 1373 rate[0] = DESC_RATEMCS8; 1374 rate[1] = DESC_RATEMCS9; 1375 rate[2] = DESC_RATEMCS10; 1376 rate[3] = DESC_RATEMCS11; 1377 for (i = 0; i < 4; ++i) 1378 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1379 *rate_num = 4; 1380 break; 1381 case 0xC38: 1382 case 0xE38: 1383 case 0x1838: 1384 case 0x1A38: 1385 rate[0] = DESC_RATEMCS12; 1386 rate[1] = DESC_RATEMCS13; 1387 rate[2] = DESC_RATEMCS14; 1388 rate[3] = DESC_RATEMCS15; 1389 for (i = 0; i < 4; ++i) 1390 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1391 *rate_num = 4; 1392 break; 1393 case 0xC3C: 1394 case 0xE3C: 1395 case 0x183C: 1396 case 0x1A3C: 1397 rate[0] = DESC_RATEVHT1SS_MCS0; 1398 rate[1] = DESC_RATEVHT1SS_MCS1; 1399 rate[2] = DESC_RATEVHT1SS_MCS2; 1400 rate[3] = DESC_RATEVHT1SS_MCS3; 1401 for (i = 0; i < 4; ++i) 1402 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1403 *rate_num = 4; 1404 break; 1405 case 0xC40: 1406 case 0xE40: 1407 case 0x1840: 1408 case 0x1A40: 1409 rate[0] = DESC_RATEVHT1SS_MCS4; 1410 rate[1] = DESC_RATEVHT1SS_MCS5; 1411 rate[2] = DESC_RATEVHT1SS_MCS6; 1412 rate[3] = DESC_RATEVHT1SS_MCS7; 1413 for (i = 0; i < 4; ++i) 1414 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1415 *rate_num = 4; 1416 break; 1417 case 0xC44: 1418 case 0xE44: 1419 case 0x1844: 1420 case 0x1A44: 1421 rate[0] = DESC_RATEVHT1SS_MCS8; 1422 rate[1] = DESC_RATEVHT1SS_MCS9; 1423 rate[2] = DESC_RATEVHT2SS_MCS0; 1424 rate[3] = DESC_RATEVHT2SS_MCS1; 1425 for (i = 0; i < 4; ++i) 1426 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1427 *rate_num = 4; 1428 break; 1429 case 0xC48: 1430 case 0xE48: 1431 case 0x1848: 1432 case 0x1A48: 1433 rate[0] = DESC_RATEVHT2SS_MCS2; 1434 rate[1] = DESC_RATEVHT2SS_MCS3; 1435 rate[2] = DESC_RATEVHT2SS_MCS4; 1436 rate[3] = DESC_RATEVHT2SS_MCS5; 1437 for (i = 0; i < 4; ++i) 1438 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1439 *rate_num = 4; 1440 break; 1441 case 0xC4C: 1442 case 0xE4C: 1443 case 0x184C: 1444 case 0x1A4C: 1445 rate[0] = DESC_RATEVHT2SS_MCS6; 1446 rate[1] = DESC_RATEVHT2SS_MCS7; 1447 rate[2] = DESC_RATEVHT2SS_MCS8; 1448 rate[3] = DESC_RATEVHT2SS_MCS9; 1449 for (i = 0; i < 4; ++i) 1450 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1451 *rate_num = 4; 1452 break; 1453 case 0xCD8: 1454 case 0xED8: 1455 case 0x18D8: 1456 case 0x1AD8: 1457 rate[0] = DESC_RATEMCS16; 1458 rate[1] = DESC_RATEMCS17; 1459 rate[2] = DESC_RATEMCS18; 1460 rate[3] = DESC_RATEMCS19; 1461 for (i = 0; i < 4; ++i) 1462 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1463 *rate_num = 4; 1464 break; 1465 case 0xCDC: 1466 case 0xEDC: 1467 case 0x18DC: 1468 case 0x1ADC: 1469 rate[0] = DESC_RATEMCS20; 1470 rate[1] = DESC_RATEMCS21; 1471 rate[2] = DESC_RATEMCS22; 1472 rate[3] = DESC_RATEMCS23; 1473 for (i = 0; i < 4; ++i) 1474 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1475 *rate_num = 4; 1476 break; 1477 case 0xCE0: 1478 case 0xEE0: 1479 case 0x18E0: 1480 case 0x1AE0: 1481 rate[0] = DESC_RATEVHT3SS_MCS0; 1482 rate[1] = DESC_RATEVHT3SS_MCS1; 1483 rate[2] = DESC_RATEVHT3SS_MCS2; 1484 rate[3] = DESC_RATEVHT3SS_MCS3; 1485 for (i = 0; i < 4; ++i) 1486 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1487 *rate_num = 4; 1488 break; 1489 case 0xCE4: 1490 case 0xEE4: 1491 case 0x18E4: 1492 case 0x1AE4: 1493 rate[0] = DESC_RATEVHT3SS_MCS4; 1494 rate[1] = DESC_RATEVHT3SS_MCS5; 1495 rate[2] = DESC_RATEVHT3SS_MCS6; 1496 rate[3] = DESC_RATEVHT3SS_MCS7; 1497 for (i = 0; i < 4; ++i) 1498 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1499 *rate_num = 4; 1500 break; 1501 case 0xCE8: 1502 case 0xEE8: 1503 case 0x18E8: 1504 case 0x1AE8: 1505 rate[0] = DESC_RATEVHT3SS_MCS8; 1506 rate[1] = DESC_RATEVHT3SS_MCS9; 1507 for (i = 0; i < 2; ++i) 1508 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); 1509 *rate_num = 2; 1510 break; 1511 default: 1512 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr); 1513 break; 1514 } 1515 } 1516 1517 void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum, 1518 u32 regaddr, u32 bitmask, u32 data) 1519 { 1520 struct rtw_dev *rtwdev = adapter; 1521 struct rtw_hal *hal = &rtwdev->hal; 1522 u8 rate_num = 0; 1523 u8 rate; 1524 u8 rates[RTW_RF_PATH_MAX] = {0}; 1525 s8 offset; 1526 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0}; 1527 int i; 1528 1529 phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data, 1530 rates, pwr_by_rate, &rate_num); 1531 1532 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX || 1533 (band != PHY_BAND_2G && band != PHY_BAND_5G) || 1534 rate_num > RTW_RF_PATH_MAX)) 1535 return; 1536 1537 for (i = 0; i < rate_num; i++) { 1538 offset = pwr_by_rate[i]; 1539 rate = rates[i]; 1540 if (band == PHY_BAND_2G) 1541 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; 1542 else if (band == PHY_BAND_5G) 1543 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; 1544 else 1545 continue; 1546 } 1547 } 1548 1549 static 1550 void phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, 1551 u8 rs, u8 size, u8 *rates) 1552 { 1553 u8 rate; 1554 u8 base_idx, rate_idx; 1555 s8 base_2g, base_5g; 1556 1557 if (rs >= RTW_RATE_SECTION_VHT_1S) 1558 base_idx = rates[size - 3]; 1559 else 1560 base_idx = rates[size - 1]; 1561 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx]; 1562 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx]; 1563 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g; 1564 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g; 1565 for (rate = 0; rate < size; rate++) { 1566 rate_idx = rates[rate]; 1567 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g; 1568 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g; 1569 } 1570 } 1571 1572 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) 1573 { 1574 u8 path; 1575 1576 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1577 phy_tx_power_by_rate_config_by_path(hal, path, 1578 RTW_RATE_SECTION_CCK, 1579 rtw_cck_size, rtw_cck_rates); 1580 phy_tx_power_by_rate_config_by_path(hal, path, 1581 RTW_RATE_SECTION_OFDM, 1582 rtw_ofdm_size, rtw_ofdm_rates); 1583 phy_tx_power_by_rate_config_by_path(hal, path, 1584 RTW_RATE_SECTION_HT_1S, 1585 rtw_ht_1s_size, rtw_ht_1s_rates); 1586 phy_tx_power_by_rate_config_by_path(hal, path, 1587 RTW_RATE_SECTION_HT_2S, 1588 rtw_ht_2s_size, rtw_ht_2s_rates); 1589 phy_tx_power_by_rate_config_by_path(hal, path, 1590 RTW_RATE_SECTION_VHT_1S, 1591 rtw_vht_1s_size, rtw_vht_1s_rates); 1592 phy_tx_power_by_rate_config_by_path(hal, path, 1593 RTW_RATE_SECTION_VHT_2S, 1594 rtw_vht_2s_size, rtw_vht_2s_rates); 1595 } 1596 } 1597 1598 static void 1599 phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1600 { 1601 s8 base, orig; 1602 u8 ch; 1603 1604 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) { 1605 base = hal->tx_pwr_by_rate_base_2g[0][rs]; 1606 orig = hal->tx_pwr_limit_2g[regd][bw][rs][ch]; 1607 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base; 1608 } 1609 1610 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) { 1611 base = hal->tx_pwr_by_rate_base_5g[0][rs]; 1612 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base; 1613 } 1614 } 1615 1616 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) 1617 { 1618 u8 regd, bw, rs; 1619 1620 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1621 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1622 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1623 phy_tx_power_limit_config(hal, regd, bw, rs); 1624 } 1625 1626 static s8 get_tx_power_limit(struct rtw_hal *hal, u8 bw, u8 rs, u8 ch, u8 regd) 1627 { 1628 if (regd > RTW_REGD_WW) 1629 return RTW_MAX_POWER_INDEX; 1630 1631 return hal->tx_pwr_limit_2g[regd][bw][rs][ch]; 1632 } 1633 1634 s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, 1635 enum rtw_bandwidth bw, u8 rf_path, 1636 u8 rate, u8 channel, u8 regd) 1637 { 1638 struct rtw_hal *hal = &rtwdev->hal; 1639 s8 power_limit; 1640 u8 rs; 1641 int ch_idx; 1642 1643 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) 1644 rs = RTW_RATE_SECTION_CCK; 1645 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) 1646 rs = RTW_RATE_SECTION_OFDM; 1647 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) 1648 rs = RTW_RATE_SECTION_HT_1S; 1649 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) 1650 rs = RTW_RATE_SECTION_HT_2S; 1651 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) 1652 rs = RTW_RATE_SECTION_VHT_1S; 1653 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) 1654 rs = RTW_RATE_SECTION_VHT_2S; 1655 else 1656 goto err; 1657 1658 ch_idx = rtw_channel_to_idx(band, channel); 1659 if (ch_idx < 0) 1660 goto err; 1661 1662 power_limit = get_tx_power_limit(hal, bw, rs, ch_idx, regd); 1663 1664 return power_limit; 1665 1666 err: 1667 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n", 1668 band, bw, rf_path, rate, channel); 1669 return RTW_MAX_POWER_INDEX; 1670 } 1671 1672 void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, 1673 u8 bw, u8 rs, u8 ch, s8 pwr_limit) 1674 { 1675 struct rtw_hal *hal = &rtwdev->hal; 1676 int ch_idx; 1677 1678 pwr_limit = clamp_t(s8, pwr_limit, 1679 -RTW_MAX_POWER_INDEX, RTW_MAX_POWER_INDEX); 1680 ch_idx = rtw_channel_to_idx(band, ch); 1681 1682 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || 1683 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { 1684 WARN(1, 1685 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", 1686 regd, band, bw, rs, ch_idx, pwr_limit); 1687 return; 1688 } 1689 1690 if (band == PHY_BAND_2G) 1691 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit; 1692 else if (band == PHY_BAND_5G) 1693 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit; 1694 } 1695 1696 static 1697 void rtw_hw_tx_power_limit_init(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs) 1698 { 1699 u8 ch; 1700 1701 /* 2.4G channels */ 1702 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) 1703 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX; 1704 1705 /* 5G channels */ 1706 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) 1707 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX; 1708 } 1709 1710 void rtw_hw_init_tx_power(struct rtw_hal *hal) 1711 { 1712 u8 regd, path, rate, rs, bw; 1713 1714 /* init tx power by rate offset */ 1715 for (path = 0; path < RTW_RF_PATH_MAX; path++) { 1716 for (rate = 0; rate < DESC_RATE_MAX; rate++) { 1717 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0; 1718 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0; 1719 } 1720 } 1721 1722 /* init tx power limit */ 1723 for (regd = 0; regd < RTW_REGD_MAX; regd++) 1724 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) 1725 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) 1726 rtw_hw_tx_power_limit_init(hal, regd, bw, rs); 1727 } 1728