1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 #include <linux/sched.h> 6 #include <linux/of.h> 7 #include "mt76.h" 8 9 #define CHAN2G(_idx, _freq) { \ 10 .band = NL80211_BAND_2GHZ, \ 11 .center_freq = (_freq), \ 12 .hw_value = (_idx), \ 13 .max_power = 30, \ 14 } 15 16 #define CHAN5G(_idx, _freq) { \ 17 .band = NL80211_BAND_5GHZ, \ 18 .center_freq = (_freq), \ 19 .hw_value = (_idx), \ 20 .max_power = 30, \ 21 } 22 23 #define CHAN6G(_idx, _freq) { \ 24 .band = NL80211_BAND_6GHZ, \ 25 .center_freq = (_freq), \ 26 .hw_value = (_idx), \ 27 .max_power = 30, \ 28 } 29 30 static const struct ieee80211_channel mt76_channels_2ghz[] = { 31 CHAN2G(1, 2412), 32 CHAN2G(2, 2417), 33 CHAN2G(3, 2422), 34 CHAN2G(4, 2427), 35 CHAN2G(5, 2432), 36 CHAN2G(6, 2437), 37 CHAN2G(7, 2442), 38 CHAN2G(8, 2447), 39 CHAN2G(9, 2452), 40 CHAN2G(10, 2457), 41 CHAN2G(11, 2462), 42 CHAN2G(12, 2467), 43 CHAN2G(13, 2472), 44 CHAN2G(14, 2484), 45 }; 46 47 static const struct ieee80211_channel mt76_channels_5ghz[] = { 48 CHAN5G(36, 5180), 49 CHAN5G(40, 5200), 50 CHAN5G(44, 5220), 51 CHAN5G(48, 5240), 52 53 CHAN5G(52, 5260), 54 CHAN5G(56, 5280), 55 CHAN5G(60, 5300), 56 CHAN5G(64, 5320), 57 58 CHAN5G(100, 5500), 59 CHAN5G(104, 5520), 60 CHAN5G(108, 5540), 61 CHAN5G(112, 5560), 62 CHAN5G(116, 5580), 63 CHAN5G(120, 5600), 64 CHAN5G(124, 5620), 65 CHAN5G(128, 5640), 66 CHAN5G(132, 5660), 67 CHAN5G(136, 5680), 68 CHAN5G(140, 5700), 69 CHAN5G(144, 5720), 70 71 CHAN5G(149, 5745), 72 CHAN5G(153, 5765), 73 CHAN5G(157, 5785), 74 CHAN5G(161, 5805), 75 CHAN5G(165, 5825), 76 CHAN5G(169, 5845), 77 CHAN5G(173, 5865), 78 }; 79 80 static const struct ieee80211_channel mt76_channels_6ghz[] = { 81 /* UNII-5 */ 82 CHAN6G(1, 5955), 83 CHAN6G(5, 5975), 84 CHAN6G(9, 5995), 85 CHAN6G(13, 6015), 86 CHAN6G(17, 6035), 87 CHAN6G(21, 6055), 88 CHAN6G(25, 6075), 89 CHAN6G(29, 6095), 90 CHAN6G(33, 6115), 91 CHAN6G(37, 6135), 92 CHAN6G(41, 6155), 93 CHAN6G(45, 6175), 94 CHAN6G(49, 6195), 95 CHAN6G(53, 6215), 96 CHAN6G(57, 6235), 97 CHAN6G(61, 6255), 98 CHAN6G(65, 6275), 99 CHAN6G(69, 6295), 100 CHAN6G(73, 6315), 101 CHAN6G(77, 6335), 102 CHAN6G(81, 6355), 103 CHAN6G(85, 6375), 104 CHAN6G(89, 6395), 105 CHAN6G(93, 6415), 106 /* UNII-6 */ 107 CHAN6G(97, 6435), 108 CHAN6G(101, 6455), 109 CHAN6G(105, 6475), 110 CHAN6G(109, 6495), 111 CHAN6G(113, 6515), 112 CHAN6G(117, 6535), 113 /* UNII-7 */ 114 CHAN6G(121, 6555), 115 CHAN6G(125, 6575), 116 CHAN6G(129, 6595), 117 CHAN6G(133, 6615), 118 CHAN6G(137, 6635), 119 CHAN6G(141, 6655), 120 CHAN6G(145, 6675), 121 CHAN6G(149, 6695), 122 CHAN6G(153, 6715), 123 CHAN6G(157, 6735), 124 CHAN6G(161, 6755), 125 CHAN6G(165, 6775), 126 CHAN6G(169, 6795), 127 CHAN6G(173, 6815), 128 CHAN6G(177, 6835), 129 CHAN6G(181, 6855), 130 CHAN6G(185, 6875), 131 /* UNII-8 */ 132 CHAN6G(189, 6895), 133 CHAN6G(193, 6915), 134 CHAN6G(197, 6935), 135 CHAN6G(201, 6955), 136 CHAN6G(205, 6975), 137 CHAN6G(209, 6995), 138 CHAN6G(213, 7015), 139 CHAN6G(217, 7035), 140 CHAN6G(221, 7055), 141 CHAN6G(225, 7075), 142 CHAN6G(229, 7095), 143 CHAN6G(233, 7115), 144 }; 145 146 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { 147 { .throughput = 0 * 1024, .blink_time = 334 }, 148 { .throughput = 1 * 1024, .blink_time = 260 }, 149 { .throughput = 5 * 1024, .blink_time = 220 }, 150 { .throughput = 10 * 1024, .blink_time = 190 }, 151 { .throughput = 20 * 1024, .blink_time = 170 }, 152 { .throughput = 50 * 1024, .blink_time = 150 }, 153 { .throughput = 70 * 1024, .blink_time = 130 }, 154 { .throughput = 100 * 1024, .blink_time = 110 }, 155 { .throughput = 200 * 1024, .blink_time = 80 }, 156 { .throughput = 300 * 1024, .blink_time = 50 }, 157 }; 158 159 struct ieee80211_rate mt76_rates[] = { 160 CCK_RATE(0, 10), 161 CCK_RATE(1, 20), 162 CCK_RATE(2, 55), 163 CCK_RATE(3, 110), 164 OFDM_RATE(11, 60), 165 OFDM_RATE(15, 90), 166 OFDM_RATE(10, 120), 167 OFDM_RATE(14, 180), 168 OFDM_RATE(9, 240), 169 OFDM_RATE(13, 360), 170 OFDM_RATE(8, 480), 171 OFDM_RATE(12, 540), 172 }; 173 EXPORT_SYMBOL_GPL(mt76_rates); 174 175 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = { 176 { .start_freq = 2402, .end_freq = 2494, }, 177 { .start_freq = 5150, .end_freq = 5350, }, 178 { .start_freq = 5350, .end_freq = 5470, }, 179 { .start_freq = 5470, .end_freq = 5725, }, 180 { .start_freq = 5725, .end_freq = 5950, }, 181 { .start_freq = 5945, .end_freq = 6165, }, 182 { .start_freq = 6165, .end_freq = 6405, }, 183 { .start_freq = 6405, .end_freq = 6525, }, 184 { .start_freq = 6525, .end_freq = 6705, }, 185 { .start_freq = 6705, .end_freq = 6865, }, 186 { .start_freq = 6865, .end_freq = 7125, }, 187 }; 188 189 static const struct cfg80211_sar_capa mt76_sar_capa = { 190 .type = NL80211_SAR_TYPE_POWER, 191 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges), 192 .freq_ranges = &mt76_sar_freq_ranges[0], 193 }; 194 195 static int mt76_led_init(struct mt76_phy *phy) 196 { 197 struct mt76_dev *dev = phy->dev; 198 struct ieee80211_hw *hw = phy->hw; 199 200 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set) 201 return 0; 202 203 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s", 204 wiphy_name(hw->wiphy)); 205 206 phy->leds.cdev.name = phy->leds.name; 207 phy->leds.cdev.default_trigger = 208 ieee80211_create_tpt_led_trigger(hw, 209 IEEE80211_TPT_LEDTRIG_FL_RADIO, 210 mt76_tpt_blink, 211 ARRAY_SIZE(mt76_tpt_blink)); 212 213 if (phy == &dev->phy) { 214 struct device_node *np = dev->dev->of_node; 215 216 np = of_get_child_by_name(np, "led"); 217 if (np) { 218 int led_pin; 219 220 if (!of_property_read_u32(np, "led-sources", &led_pin)) 221 phy->leds.pin = led_pin; 222 phy->leds.al = of_property_read_bool(np, 223 "led-active-low"); 224 of_node_put(np); 225 } 226 } 227 228 return led_classdev_register(dev->dev, &phy->leds.cdev); 229 } 230 231 static void mt76_led_cleanup(struct mt76_phy *phy) 232 { 233 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set) 234 return; 235 236 led_classdev_unregister(&phy->leds.cdev); 237 } 238 239 static void mt76_init_stream_cap(struct mt76_phy *phy, 240 struct ieee80211_supported_band *sband, 241 bool vht) 242 { 243 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; 244 int i, nstream = hweight8(phy->antenna_mask); 245 struct ieee80211_sta_vht_cap *vht_cap; 246 u16 mcs_map = 0; 247 248 if (nstream > 1) 249 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; 250 else 251 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; 252 253 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 254 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0; 255 256 if (!vht) 257 return; 258 259 vht_cap = &sband->vht_cap; 260 if (nstream > 1) 261 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 262 else 263 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; 264 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | 265 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; 266 267 for (i = 0; i < 8; i++) { 268 if (i < nstream) 269 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2)); 270 else 271 mcs_map |= 272 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); 273 } 274 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 275 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 276 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW)) 277 vht_cap->vht_mcs.tx_highest |= 278 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); 279 } 280 281 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) 282 { 283 if (phy->cap.has_2ghz) 284 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); 285 if (phy->cap.has_5ghz) 286 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); 287 if (phy->cap.has_6ghz) 288 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht); 289 } 290 EXPORT_SYMBOL_GPL(mt76_set_stream_caps); 291 292 static int 293 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, 294 const struct ieee80211_channel *chan, int n_chan, 295 struct ieee80211_rate *rates, int n_rates, 296 bool ht, bool vht) 297 { 298 struct ieee80211_supported_band *sband = &msband->sband; 299 struct ieee80211_sta_vht_cap *vht_cap; 300 struct ieee80211_sta_ht_cap *ht_cap; 301 struct mt76_dev *dev = phy->dev; 302 void *chanlist; 303 int size; 304 305 size = n_chan * sizeof(*chan); 306 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); 307 if (!chanlist) 308 return -ENOMEM; 309 310 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan), 311 GFP_KERNEL); 312 if (!msband->chan) 313 return -ENOMEM; 314 315 sband->channels = chanlist; 316 sband->n_channels = n_chan; 317 sband->bitrates = rates; 318 sband->n_bitrates = n_rates; 319 320 if (!ht) 321 return 0; 322 323 ht_cap = &sband->ht_cap; 324 ht_cap->ht_supported = true; 325 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 326 IEEE80211_HT_CAP_GRN_FLD | 327 IEEE80211_HT_CAP_SGI_20 | 328 IEEE80211_HT_CAP_SGI_40 | 329 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 330 331 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 332 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 333 334 mt76_init_stream_cap(phy, sband, vht); 335 336 if (!vht) 337 return 0; 338 339 vht_cap = &sband->vht_cap; 340 vht_cap->vht_supported = true; 341 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | 342 IEEE80211_VHT_CAP_RXSTBC_1 | 343 IEEE80211_VHT_CAP_SHORT_GI_80 | 344 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); 345 346 return 0; 347 } 348 349 static int 350 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, 351 int n_rates) 352 { 353 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; 354 355 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, 356 ARRAY_SIZE(mt76_channels_2ghz), rates, 357 n_rates, true, false); 358 } 359 360 static int 361 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, 362 int n_rates, bool vht) 363 { 364 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; 365 366 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, 367 ARRAY_SIZE(mt76_channels_5ghz), rates, 368 n_rates, true, vht); 369 } 370 371 static int 372 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates, 373 int n_rates) 374 { 375 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband; 376 377 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz, 378 ARRAY_SIZE(mt76_channels_6ghz), rates, 379 n_rates, false, false); 380 } 381 382 static void 383 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, 384 enum nl80211_band band) 385 { 386 struct ieee80211_supported_band *sband = &msband->sband; 387 bool found = false; 388 int i; 389 390 if (!sband) 391 return; 392 393 for (i = 0; i < sband->n_channels; i++) { 394 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED) 395 continue; 396 397 found = true; 398 break; 399 } 400 401 if (found) { 402 phy->chandef.chan = &sband->channels[0]; 403 phy->chan_state = &msband->chan[0]; 404 return; 405 } 406 407 sband->n_channels = 0; 408 phy->hw->wiphy->bands[band] = NULL; 409 } 410 411 static int 412 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) 413 { 414 struct mt76_dev *dev = phy->dev; 415 struct wiphy *wiphy = hw->wiphy; 416 417 SET_IEEE80211_DEV(hw, dev->dev); 418 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); 419 420 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 421 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | 422 WIPHY_FLAG_SUPPORTS_TDLS | 423 WIPHY_FLAG_AP_UAPSD; 424 425 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 426 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 427 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); 428 429 wiphy->available_antennas_tx = phy->antenna_mask; 430 wiphy->available_antennas_rx = phy->antenna_mask; 431 432 wiphy->sar_capa = &mt76_sar_capa; 433 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges, 434 sizeof(struct mt76_freq_range_power), 435 GFP_KERNEL); 436 if (!phy->frp) 437 return -ENOMEM; 438 439 hw->txq_data_size = sizeof(struct mt76_txq); 440 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; 441 442 if (!hw->max_tx_fragments) 443 hw->max_tx_fragments = 16; 444 445 ieee80211_hw_set(hw, SIGNAL_DBM); 446 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 447 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); 448 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 449 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 450 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 451 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 452 453 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { 454 ieee80211_hw_set(hw, TX_AMSDU); 455 ieee80211_hw_set(hw, TX_FRAG_LIST); 456 } 457 458 ieee80211_hw_set(hw, MFP_CAPABLE); 459 ieee80211_hw_set(hw, AP_LINK_PS); 460 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 461 462 return 0; 463 } 464 465 struct mt76_phy * 466 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 467 const struct ieee80211_ops *ops, u8 band_idx) 468 { 469 struct ieee80211_hw *hw; 470 unsigned int phy_size; 471 struct mt76_phy *phy; 472 473 phy_size = ALIGN(sizeof(*phy), 8); 474 hw = ieee80211_alloc_hw(size + phy_size, ops); 475 if (!hw) 476 return NULL; 477 478 phy = hw->priv; 479 phy->dev = dev; 480 phy->hw = hw; 481 phy->priv = hw->priv + phy_size; 482 phy->band_idx = band_idx; 483 484 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 485 hw->wiphy->interface_modes = 486 BIT(NL80211_IFTYPE_STATION) | 487 BIT(NL80211_IFTYPE_AP) | 488 #ifdef CONFIG_MAC80211_MESH 489 BIT(NL80211_IFTYPE_MESH_POINT) | 490 #endif 491 BIT(NL80211_IFTYPE_P2P_CLIENT) | 492 BIT(NL80211_IFTYPE_P2P_GO) | 493 BIT(NL80211_IFTYPE_ADHOC); 494 495 return phy; 496 } 497 EXPORT_SYMBOL_GPL(mt76_alloc_phy); 498 499 int mt76_register_phy(struct mt76_phy *phy, bool vht, 500 struct ieee80211_rate *rates, int n_rates) 501 { 502 int ret; 503 504 ret = mt76_phy_init(phy, phy->hw); 505 if (ret) 506 return ret; 507 508 if (phy->cap.has_2ghz) { 509 ret = mt76_init_sband_2g(phy, rates, n_rates); 510 if (ret) 511 return ret; 512 } 513 514 if (phy->cap.has_5ghz) { 515 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 516 if (ret) 517 return ret; 518 } 519 520 if (phy->cap.has_6ghz) { 521 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 522 if (ret) 523 return ret; 524 } 525 526 if (IS_ENABLED(CONFIG_MT76_LEDS)) { 527 ret = mt76_led_init(phy); 528 if (ret) 529 return ret; 530 } 531 532 wiphy_read_of_freq_limits(phy->hw->wiphy); 533 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); 534 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); 535 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ); 536 537 ret = ieee80211_register_hw(phy->hw); 538 if (ret) 539 return ret; 540 541 phy->dev->phys[phy->band_idx] = phy; 542 543 return 0; 544 } 545 EXPORT_SYMBOL_GPL(mt76_register_phy); 546 547 void mt76_unregister_phy(struct mt76_phy *phy) 548 { 549 struct mt76_dev *dev = phy->dev; 550 551 if (IS_ENABLED(CONFIG_MT76_LEDS)) 552 mt76_led_cleanup(phy); 553 mt76_tx_status_check(dev, true); 554 ieee80211_unregister_hw(phy->hw); 555 dev->phys[phy->band_idx] = NULL; 556 } 557 EXPORT_SYMBOL_GPL(mt76_unregister_phy); 558 559 struct mt76_dev * 560 mt76_alloc_device(struct device *pdev, unsigned int size, 561 const struct ieee80211_ops *ops, 562 const struct mt76_driver_ops *drv_ops) 563 { 564 struct ieee80211_hw *hw; 565 struct mt76_phy *phy; 566 struct mt76_dev *dev; 567 int i; 568 569 hw = ieee80211_alloc_hw(size, ops); 570 if (!hw) 571 return NULL; 572 573 dev = hw->priv; 574 dev->hw = hw; 575 dev->dev = pdev; 576 dev->drv = drv_ops; 577 dev->dma_dev = pdev; 578 579 phy = &dev->phy; 580 phy->dev = dev; 581 phy->hw = hw; 582 phy->band_idx = MT_BAND0; 583 dev->phys[phy->band_idx] = phy; 584 585 spin_lock_init(&dev->rx_lock); 586 spin_lock_init(&dev->lock); 587 spin_lock_init(&dev->cc_lock); 588 spin_lock_init(&dev->status_lock); 589 spin_lock_init(&dev->wed_lock); 590 mutex_init(&dev->mutex); 591 init_waitqueue_head(&dev->tx_wait); 592 593 skb_queue_head_init(&dev->mcu.res_q); 594 init_waitqueue_head(&dev->mcu.wait); 595 mutex_init(&dev->mcu.mutex); 596 dev->tx_worker.fn = mt76_tx_worker; 597 598 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 599 hw->wiphy->interface_modes = 600 BIT(NL80211_IFTYPE_STATION) | 601 BIT(NL80211_IFTYPE_AP) | 602 #ifdef CONFIG_MAC80211_MESH 603 BIT(NL80211_IFTYPE_MESH_POINT) | 604 #endif 605 BIT(NL80211_IFTYPE_P2P_CLIENT) | 606 BIT(NL80211_IFTYPE_P2P_GO) | 607 BIT(NL80211_IFTYPE_ADHOC); 608 609 spin_lock_init(&dev->token_lock); 610 idr_init(&dev->token); 611 612 spin_lock_init(&dev->rx_token_lock); 613 idr_init(&dev->rx_token); 614 615 INIT_LIST_HEAD(&dev->wcid_list); 616 617 INIT_LIST_HEAD(&dev->txwi_cache); 618 INIT_LIST_HEAD(&dev->rxwi_cache); 619 dev->token_size = dev->drv->token_size; 620 621 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) 622 skb_queue_head_init(&dev->rx_skb[i]); 623 624 dev->wq = alloc_ordered_workqueue("mt76", 0); 625 if (!dev->wq) { 626 ieee80211_free_hw(hw); 627 return NULL; 628 } 629 630 return dev; 631 } 632 EXPORT_SYMBOL_GPL(mt76_alloc_device); 633 634 int mt76_register_device(struct mt76_dev *dev, bool vht, 635 struct ieee80211_rate *rates, int n_rates) 636 { 637 struct ieee80211_hw *hw = dev->hw; 638 struct mt76_phy *phy = &dev->phy; 639 int ret; 640 641 dev_set_drvdata(dev->dev, dev); 642 ret = mt76_phy_init(phy, hw); 643 if (ret) 644 return ret; 645 646 if (phy->cap.has_2ghz) { 647 ret = mt76_init_sband_2g(phy, rates, n_rates); 648 if (ret) 649 return ret; 650 } 651 652 if (phy->cap.has_5ghz) { 653 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 654 if (ret) 655 return ret; 656 } 657 658 if (phy->cap.has_6ghz) { 659 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 660 if (ret) 661 return ret; 662 } 663 664 wiphy_read_of_freq_limits(hw->wiphy); 665 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ); 666 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ); 667 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ); 668 669 if (IS_ENABLED(CONFIG_MT76_LEDS)) { 670 ret = mt76_led_init(phy); 671 if (ret) 672 return ret; 673 } 674 675 ret = ieee80211_register_hw(hw); 676 if (ret) 677 return ret; 678 679 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); 680 sched_set_fifo_low(dev->tx_worker.task); 681 682 return 0; 683 } 684 EXPORT_SYMBOL_GPL(mt76_register_device); 685 686 void mt76_unregister_device(struct mt76_dev *dev) 687 { 688 struct ieee80211_hw *hw = dev->hw; 689 690 if (IS_ENABLED(CONFIG_MT76_LEDS)) 691 mt76_led_cleanup(&dev->phy); 692 mt76_tx_status_check(dev, true); 693 ieee80211_unregister_hw(hw); 694 } 695 EXPORT_SYMBOL_GPL(mt76_unregister_device); 696 697 void mt76_free_device(struct mt76_dev *dev) 698 { 699 mt76_worker_teardown(&dev->tx_worker); 700 if (dev->wq) { 701 destroy_workqueue(dev->wq); 702 dev->wq = NULL; 703 } 704 ieee80211_free_hw(dev->hw); 705 } 706 EXPORT_SYMBOL_GPL(mt76_free_device); 707 708 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) 709 { 710 struct sk_buff *skb = phy->rx_amsdu[q].head; 711 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 712 struct mt76_dev *dev = phy->dev; 713 714 phy->rx_amsdu[q].head = NULL; 715 phy->rx_amsdu[q].tail = NULL; 716 717 /* 718 * Validate if the amsdu has a proper first subframe. 719 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU 720 * flag of the QoS header gets flipped. In such cases, the first 721 * subframe has a LLC/SNAP header in the location of the destination 722 * address. 723 */ 724 if (skb_shinfo(skb)->frag_list) { 725 int offset = 0; 726 727 if (!(status->flag & RX_FLAG_8023)) { 728 offset = ieee80211_get_hdrlen_from_skb(skb); 729 730 if ((status->flag & 731 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) == 732 RX_FLAG_DECRYPTED) 733 offset += 8; 734 } 735 736 if (ether_addr_equal(skb->data + offset, rfc1042_header)) { 737 dev_kfree_skb(skb); 738 return; 739 } 740 } 741 __skb_queue_tail(&dev->rx_skb[q], skb); 742 } 743 744 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, 745 struct sk_buff *skb) 746 { 747 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 748 749 if (phy->rx_amsdu[q].head && 750 (!status->amsdu || status->first_amsdu || 751 status->seqno != phy->rx_amsdu[q].seqno)) 752 mt76_rx_release_amsdu(phy, q); 753 754 if (!phy->rx_amsdu[q].head) { 755 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; 756 phy->rx_amsdu[q].seqno = status->seqno; 757 phy->rx_amsdu[q].head = skb; 758 } else { 759 *phy->rx_amsdu[q].tail = skb; 760 phy->rx_amsdu[q].tail = &skb->next; 761 } 762 763 if (!status->amsdu || status->last_amsdu) 764 mt76_rx_release_amsdu(phy, q); 765 } 766 767 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) 768 { 769 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 770 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx); 771 772 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) { 773 dev_kfree_skb(skb); 774 return; 775 } 776 777 #ifdef CONFIG_NL80211_TESTMODE 778 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) { 779 phy->test.rx_stats.packets[q]++; 780 if (status->flag & RX_FLAG_FAILED_FCS_CRC) 781 phy->test.rx_stats.fcs_error[q]++; 782 } 783 #endif 784 785 mt76_rx_release_burst(phy, q, skb); 786 } 787 EXPORT_SYMBOL_GPL(mt76_rx); 788 789 bool mt76_has_tx_pending(struct mt76_phy *phy) 790 { 791 struct mt76_queue *q; 792 int i; 793 794 for (i = 0; i < __MT_TXQ_MAX; i++) { 795 q = phy->q_tx[i]; 796 if (q && q->queued) 797 return true; 798 } 799 800 return false; 801 } 802 EXPORT_SYMBOL_GPL(mt76_has_tx_pending); 803 804 static struct mt76_channel_state * 805 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) 806 { 807 struct mt76_sband *msband; 808 int idx; 809 810 if (c->band == NL80211_BAND_2GHZ) 811 msband = &phy->sband_2g; 812 else if (c->band == NL80211_BAND_6GHZ) 813 msband = &phy->sband_6g; 814 else 815 msband = &phy->sband_5g; 816 817 idx = c - &msband->sband.channels[0]; 818 return &msband->chan[idx]; 819 } 820 821 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) 822 { 823 struct mt76_channel_state *state = phy->chan_state; 824 825 state->cc_active += ktime_to_us(ktime_sub(time, 826 phy->survey_time)); 827 phy->survey_time = time; 828 } 829 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); 830 831 void mt76_update_survey(struct mt76_phy *phy) 832 { 833 struct mt76_dev *dev = phy->dev; 834 ktime_t cur_time; 835 836 if (dev->drv->update_survey) 837 dev->drv->update_survey(phy); 838 839 cur_time = ktime_get_boottime(); 840 mt76_update_survey_active_time(phy, cur_time); 841 842 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) { 843 struct mt76_channel_state *state = phy->chan_state; 844 845 spin_lock_bh(&dev->cc_lock); 846 state->cc_bss_rx += dev->cur_cc_bss_rx; 847 dev->cur_cc_bss_rx = 0; 848 spin_unlock_bh(&dev->cc_lock); 849 } 850 } 851 EXPORT_SYMBOL_GPL(mt76_update_survey); 852 853 void mt76_set_channel(struct mt76_phy *phy) 854 { 855 struct mt76_dev *dev = phy->dev; 856 struct ieee80211_hw *hw = phy->hw; 857 struct cfg80211_chan_def *chandef = &hw->conf.chandef; 858 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; 859 int timeout = HZ / 5; 860 861 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); 862 mt76_update_survey(phy); 863 864 if (phy->chandef.chan->center_freq != chandef->chan->center_freq || 865 phy->chandef.width != chandef->width) 866 phy->dfs_state = MT_DFS_STATE_UNKNOWN; 867 868 phy->chandef = *chandef; 869 phy->chan_state = mt76_channel_state(phy, chandef->chan); 870 871 if (!offchannel) 872 phy->main_chan = chandef->chan; 873 874 if (chandef->chan != phy->main_chan) 875 memset(phy->chan_state, 0, sizeof(*phy->chan_state)); 876 } 877 EXPORT_SYMBOL_GPL(mt76_set_channel); 878 879 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 880 struct survey_info *survey) 881 { 882 struct mt76_phy *phy = hw->priv; 883 struct mt76_dev *dev = phy->dev; 884 struct mt76_sband *sband; 885 struct ieee80211_channel *chan; 886 struct mt76_channel_state *state; 887 int ret = 0; 888 889 mutex_lock(&dev->mutex); 890 if (idx == 0 && dev->drv->update_survey) 891 mt76_update_survey(phy); 892 893 if (idx >= phy->sband_2g.sband.n_channels + 894 phy->sband_5g.sband.n_channels) { 895 idx -= (phy->sband_2g.sband.n_channels + 896 phy->sband_5g.sband.n_channels); 897 sband = &phy->sband_6g; 898 } else if (idx >= phy->sband_2g.sband.n_channels) { 899 idx -= phy->sband_2g.sband.n_channels; 900 sband = &phy->sband_5g; 901 } else { 902 sband = &phy->sband_2g; 903 } 904 905 if (idx >= sband->sband.n_channels) { 906 ret = -ENOENT; 907 goto out; 908 } 909 910 chan = &sband->sband.channels[idx]; 911 state = mt76_channel_state(phy, chan); 912 913 memset(survey, 0, sizeof(*survey)); 914 survey->channel = chan; 915 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; 916 survey->filled |= dev->drv->survey_flags; 917 if (state->noise) 918 survey->filled |= SURVEY_INFO_NOISE_DBM; 919 920 if (chan == phy->main_chan) { 921 survey->filled |= SURVEY_INFO_IN_USE; 922 923 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) 924 survey->filled |= SURVEY_INFO_TIME_BSS_RX; 925 } 926 927 survey->time_busy = div_u64(state->cc_busy, 1000); 928 survey->time_rx = div_u64(state->cc_rx, 1000); 929 survey->time = div_u64(state->cc_active, 1000); 930 survey->noise = state->noise; 931 932 spin_lock_bh(&dev->cc_lock); 933 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000); 934 survey->time_tx = div_u64(state->cc_tx, 1000); 935 spin_unlock_bh(&dev->cc_lock); 936 937 out: 938 mutex_unlock(&dev->mutex); 939 940 return ret; 941 } 942 EXPORT_SYMBOL_GPL(mt76_get_survey); 943 944 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 945 struct ieee80211_key_conf *key) 946 { 947 struct ieee80211_key_seq seq; 948 int i; 949 950 wcid->rx_check_pn = false; 951 952 if (!key) 953 return; 954 955 if (key->cipher != WLAN_CIPHER_SUITE_CCMP) 956 return; 957 958 wcid->rx_check_pn = true; 959 960 /* data frame */ 961 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 962 ieee80211_get_key_rx_seq(key, i, &seq); 963 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 964 } 965 966 /* robust management frame */ 967 ieee80211_get_key_rx_seq(key, -1, &seq); 968 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 969 970 } 971 EXPORT_SYMBOL(mt76_wcid_key_setup); 972 973 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal) 974 { 975 int signal = -128; 976 u8 chains; 977 978 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) { 979 int cur, diff; 980 981 cur = *chain_signal; 982 if (!(chains & BIT(0)) || 983 cur > 0) 984 continue; 985 986 if (cur > signal) 987 swap(cur, signal); 988 989 diff = signal - cur; 990 if (diff == 0) 991 signal += 3; 992 else if (diff <= 2) 993 signal += 2; 994 else if (diff <= 6) 995 signal += 1; 996 } 997 998 return signal; 999 } 1000 EXPORT_SYMBOL(mt76_rx_signal); 1001 1002 static void 1003 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, 1004 struct ieee80211_hw **hw, 1005 struct ieee80211_sta **sta) 1006 { 1007 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1008 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1009 struct mt76_rx_status mstat; 1010 1011 mstat = *((struct mt76_rx_status *)skb->cb); 1012 memset(status, 0, sizeof(*status)); 1013 1014 status->flag = mstat.flag; 1015 status->freq = mstat.freq; 1016 status->enc_flags = mstat.enc_flags; 1017 status->encoding = mstat.encoding; 1018 status->bw = mstat.bw; 1019 status->he_ru = mstat.he_ru; 1020 status->he_gi = mstat.he_gi; 1021 status->he_dcm = mstat.he_dcm; 1022 status->rate_idx = mstat.rate_idx; 1023 status->nss = mstat.nss; 1024 status->band = mstat.band; 1025 status->signal = mstat.signal; 1026 status->chains = mstat.chains; 1027 status->ampdu_reference = mstat.ampdu_ref; 1028 status->device_timestamp = mstat.timestamp; 1029 status->mactime = mstat.timestamp; 1030 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal); 1031 if (status->signal <= -128) 1032 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1033 1034 if (ieee80211_is_beacon(hdr->frame_control) || 1035 ieee80211_is_probe_resp(hdr->frame_control)) 1036 status->boottime_ns = ktime_get_boottime_ns(); 1037 1038 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); 1039 BUILD_BUG_ON(sizeof(status->chain_signal) != 1040 sizeof(mstat.chain_signal)); 1041 memcpy(status->chain_signal, mstat.chain_signal, 1042 sizeof(mstat.chain_signal)); 1043 1044 *sta = wcid_to_sta(mstat.wcid); 1045 *hw = mt76_phy_hw(dev, mstat.phy_idx); 1046 } 1047 1048 static void 1049 mt76_check_ccmp_pn(struct sk_buff *skb) 1050 { 1051 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1052 struct mt76_wcid *wcid = status->wcid; 1053 struct ieee80211_hdr *hdr; 1054 int security_idx; 1055 int ret; 1056 1057 if (!(status->flag & RX_FLAG_DECRYPTED)) 1058 return; 1059 1060 if (status->flag & RX_FLAG_ONLY_MONITOR) 1061 return; 1062 1063 if (!wcid || !wcid->rx_check_pn) 1064 return; 1065 1066 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1067 if (status->flag & RX_FLAG_8023) 1068 goto skip_hdr_check; 1069 1070 hdr = mt76_skb_get_hdr(skb); 1071 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1072 /* 1073 * Validate the first fragment both here and in mac80211 1074 * All further fragments will be validated by mac80211 only. 1075 */ 1076 if (ieee80211_is_frag(hdr) && 1077 !ieee80211_is_first_frag(hdr->frame_control)) 1078 return; 1079 } 1080 1081 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c): 1082 * 1083 * the recipient shall maintain a single replay counter for received 1084 * individually addressed robust Management frames that are received 1085 * with the To DS subfield equal to 0, [...] 1086 */ 1087 if (ieee80211_is_mgmt(hdr->frame_control) && 1088 !ieee80211_has_tods(hdr->frame_control)) 1089 security_idx = IEEE80211_NUM_TIDS; 1090 1091 skip_hdr_check: 1092 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); 1093 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx], 1094 sizeof(status->iv)); 1095 if (ret <= 0) { 1096 status->flag |= RX_FLAG_ONLY_MONITOR; 1097 return; 1098 } 1099 1100 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv)); 1101 1102 if (status->flag & RX_FLAG_IV_STRIPPED) 1103 status->flag |= RX_FLAG_PN_VALIDATED; 1104 } 1105 1106 static void 1107 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, 1108 int len) 1109 { 1110 struct mt76_wcid *wcid = status->wcid; 1111 struct ieee80211_rx_status info = { 1112 .enc_flags = status->enc_flags, 1113 .rate_idx = status->rate_idx, 1114 .encoding = status->encoding, 1115 .band = status->band, 1116 .nss = status->nss, 1117 .bw = status->bw, 1118 }; 1119 struct ieee80211_sta *sta; 1120 u32 airtime; 1121 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1122 1123 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); 1124 spin_lock(&dev->cc_lock); 1125 dev->cur_cc_bss_rx += airtime; 1126 spin_unlock(&dev->cc_lock); 1127 1128 if (!wcid || !wcid->sta) 1129 return; 1130 1131 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1132 ieee80211_sta_register_airtime(sta, tidno, 0, airtime); 1133 } 1134 1135 static void 1136 mt76_airtime_flush_ampdu(struct mt76_dev *dev) 1137 { 1138 struct mt76_wcid *wcid; 1139 int wcid_idx; 1140 1141 if (!dev->rx_ampdu_len) 1142 return; 1143 1144 wcid_idx = dev->rx_ampdu_status.wcid_idx; 1145 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 1146 wcid = rcu_dereference(dev->wcid[wcid_idx]); 1147 else 1148 wcid = NULL; 1149 dev->rx_ampdu_status.wcid = wcid; 1150 1151 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len); 1152 1153 dev->rx_ampdu_len = 0; 1154 dev->rx_ampdu_ref = 0; 1155 } 1156 1157 static void 1158 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) 1159 { 1160 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1161 struct mt76_wcid *wcid = status->wcid; 1162 1163 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)) 1164 return; 1165 1166 if (!wcid || !wcid->sta) { 1167 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1168 1169 if (status->flag & RX_FLAG_8023) 1170 return; 1171 1172 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) 1173 return; 1174 1175 wcid = NULL; 1176 } 1177 1178 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) || 1179 status->ampdu_ref != dev->rx_ampdu_ref) 1180 mt76_airtime_flush_ampdu(dev); 1181 1182 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 1183 if (!dev->rx_ampdu_len || 1184 status->ampdu_ref != dev->rx_ampdu_ref) { 1185 dev->rx_ampdu_status = *status; 1186 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff; 1187 dev->rx_ampdu_ref = status->ampdu_ref; 1188 } 1189 1190 dev->rx_ampdu_len += skb->len; 1191 return; 1192 } 1193 1194 mt76_airtime_report(dev, status, skb->len); 1195 } 1196 1197 static void 1198 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) 1199 { 1200 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1201 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1202 struct ieee80211_sta *sta; 1203 struct ieee80211_hw *hw; 1204 struct mt76_wcid *wcid = status->wcid; 1205 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1206 bool ps; 1207 1208 hw = mt76_phy_hw(dev, status->phy_idx); 1209 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && 1210 !(status->flag & RX_FLAG_8023)) { 1211 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); 1212 if (sta) 1213 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; 1214 } 1215 1216 mt76_airtime_check(dev, skb); 1217 1218 if (!wcid || !wcid->sta) 1219 return; 1220 1221 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1222 1223 if (status->signal <= 0) 1224 ewma_signal_add(&wcid->rssi, -status->signal); 1225 1226 wcid->inactive_count = 0; 1227 1228 if (status->flag & RX_FLAG_8023) 1229 return; 1230 1231 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) 1232 return; 1233 1234 if (ieee80211_is_pspoll(hdr->frame_control)) { 1235 ieee80211_sta_pspoll(sta); 1236 return; 1237 } 1238 1239 if (ieee80211_has_morefrags(hdr->frame_control) || 1240 !(ieee80211_is_mgmt(hdr->frame_control) || 1241 ieee80211_is_data(hdr->frame_control))) 1242 return; 1243 1244 ps = ieee80211_has_pm(hdr->frame_control); 1245 1246 if (ps && (ieee80211_is_data_qos(hdr->frame_control) || 1247 ieee80211_is_qos_nullfunc(hdr->frame_control))) 1248 ieee80211_sta_uapsd_trigger(sta, tidno); 1249 1250 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) 1251 return; 1252 1253 if (ps) 1254 set_bit(MT_WCID_FLAG_PS, &wcid->flags); 1255 1256 dev->drv->sta_ps(dev, sta, ps); 1257 1258 if (!ps) 1259 clear_bit(MT_WCID_FLAG_PS, &wcid->flags); 1260 1261 ieee80211_sta_ps_transition(sta, ps); 1262 } 1263 1264 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1265 struct napi_struct *napi) 1266 { 1267 struct ieee80211_sta *sta; 1268 struct ieee80211_hw *hw; 1269 struct sk_buff *skb, *tmp; 1270 LIST_HEAD(list); 1271 1272 spin_lock(&dev->rx_lock); 1273 while ((skb = __skb_dequeue(frames)) != NULL) { 1274 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1275 1276 mt76_check_ccmp_pn(skb); 1277 skb_shinfo(skb)->frag_list = NULL; 1278 mt76_rx_convert(dev, skb, &hw, &sta); 1279 ieee80211_rx_list(hw, sta, skb, &list); 1280 1281 /* subsequent amsdu frames */ 1282 while (nskb) { 1283 skb = nskb; 1284 nskb = nskb->next; 1285 skb->next = NULL; 1286 1287 mt76_rx_convert(dev, skb, &hw, &sta); 1288 ieee80211_rx_list(hw, sta, skb, &list); 1289 } 1290 } 1291 spin_unlock(&dev->rx_lock); 1292 1293 if (!napi) { 1294 netif_receive_skb_list(&list); 1295 return; 1296 } 1297 1298 list_for_each_entry_safe(skb, tmp, &list, list) { 1299 skb_list_del_init(skb); 1300 napi_gro_receive(napi, skb); 1301 } 1302 } 1303 1304 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1305 struct napi_struct *napi) 1306 { 1307 struct sk_buff_head frames; 1308 struct sk_buff *skb; 1309 1310 __skb_queue_head_init(&frames); 1311 1312 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { 1313 mt76_check_sta(dev, skb); 1314 if (mtk_wed_device_active(&dev->mmio.wed)) 1315 __skb_queue_tail(&frames, skb); 1316 else 1317 mt76_rx_aggr_reorder(skb, &frames); 1318 } 1319 1320 mt76_rx_complete(dev, &frames, napi); 1321 } 1322 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); 1323 1324 static int 1325 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif, 1326 struct ieee80211_sta *sta) 1327 { 1328 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1329 struct mt76_dev *dev = phy->dev; 1330 int ret; 1331 int i; 1332 1333 mutex_lock(&dev->mutex); 1334 1335 ret = dev->drv->sta_add(dev, vif, sta); 1336 if (ret) 1337 goto out; 1338 1339 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1340 struct mt76_txq *mtxq; 1341 1342 if (!sta->txq[i]) 1343 continue; 1344 1345 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 1346 mtxq->wcid = wcid->idx; 1347 } 1348 1349 ewma_signal_init(&wcid->rssi); 1350 if (phy->band_idx == MT_BAND1) 1351 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx); 1352 wcid->phy_idx = phy->band_idx; 1353 rcu_assign_pointer(dev->wcid[wcid->idx], wcid); 1354 1355 mt76_packet_id_init(wcid); 1356 out: 1357 mutex_unlock(&dev->mutex); 1358 1359 return ret; 1360 } 1361 1362 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1363 struct ieee80211_sta *sta) 1364 { 1365 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1366 int i, idx = wcid->idx; 1367 1368 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++) 1369 mt76_rx_aggr_stop(dev, wcid, i); 1370 1371 if (dev->drv->sta_remove) 1372 dev->drv->sta_remove(dev, vif, sta); 1373 1374 mt76_packet_id_flush(dev, wcid); 1375 1376 mt76_wcid_mask_clear(dev->wcid_mask, idx); 1377 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); 1378 } 1379 EXPORT_SYMBOL_GPL(__mt76_sta_remove); 1380 1381 static void 1382 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1383 struct ieee80211_sta *sta) 1384 { 1385 mutex_lock(&dev->mutex); 1386 __mt76_sta_remove(dev, vif, sta); 1387 mutex_unlock(&dev->mutex); 1388 } 1389 1390 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1391 struct ieee80211_sta *sta, 1392 enum ieee80211_sta_state old_state, 1393 enum ieee80211_sta_state new_state) 1394 { 1395 struct mt76_phy *phy = hw->priv; 1396 struct mt76_dev *dev = phy->dev; 1397 1398 if (old_state == IEEE80211_STA_NOTEXIST && 1399 new_state == IEEE80211_STA_NONE) 1400 return mt76_sta_add(phy, vif, sta); 1401 1402 if (old_state == IEEE80211_STA_AUTH && 1403 new_state == IEEE80211_STA_ASSOC && 1404 dev->drv->sta_assoc) 1405 dev->drv->sta_assoc(dev, vif, sta); 1406 1407 if (old_state == IEEE80211_STA_NONE && 1408 new_state == IEEE80211_STA_NOTEXIST) 1409 mt76_sta_remove(dev, vif, sta); 1410 1411 return 0; 1412 } 1413 EXPORT_SYMBOL_GPL(mt76_sta_state); 1414 1415 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1416 struct ieee80211_sta *sta) 1417 { 1418 struct mt76_phy *phy = hw->priv; 1419 struct mt76_dev *dev = phy->dev; 1420 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1421 1422 mutex_lock(&dev->mutex); 1423 spin_lock_bh(&dev->status_lock); 1424 rcu_assign_pointer(dev->wcid[wcid->idx], NULL); 1425 spin_unlock_bh(&dev->status_lock); 1426 mutex_unlock(&dev->mutex); 1427 } 1428 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); 1429 1430 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1431 int *dbm) 1432 { 1433 struct mt76_phy *phy = hw->priv; 1434 int n_chains = hweight8(phy->antenna_mask); 1435 int delta = mt76_tx_power_nss_delta(n_chains); 1436 1437 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2); 1438 1439 return 0; 1440 } 1441 EXPORT_SYMBOL_GPL(mt76_get_txpower); 1442 1443 int mt76_init_sar_power(struct ieee80211_hw *hw, 1444 const struct cfg80211_sar_specs *sar) 1445 { 1446 struct mt76_phy *phy = hw->priv; 1447 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; 1448 int i; 1449 1450 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) 1451 return -EINVAL; 1452 1453 for (i = 0; i < sar->num_sub_specs; i++) { 1454 u32 index = sar->sub_specs[i].freq_range_index; 1455 /* SAR specifies power limitaton in 0.25dbm */ 1456 s32 power = sar->sub_specs[i].power >> 1; 1457 1458 if (power > 127 || power < -127) 1459 power = 127; 1460 1461 phy->frp[index].range = &capa->freq_ranges[index]; 1462 phy->frp[index].power = power; 1463 } 1464 1465 return 0; 1466 } 1467 EXPORT_SYMBOL_GPL(mt76_init_sar_power); 1468 1469 int mt76_get_sar_power(struct mt76_phy *phy, 1470 struct ieee80211_channel *chan, 1471 int power) 1472 { 1473 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; 1474 int freq, i; 1475 1476 if (!capa || !phy->frp) 1477 return power; 1478 1479 if (power > 127 || power < -127) 1480 power = 127; 1481 1482 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); 1483 for (i = 0 ; i < capa->num_freq_ranges; i++) { 1484 if (phy->frp[i].range && 1485 freq >= phy->frp[i].range->start_freq && 1486 freq < phy->frp[i].range->end_freq) { 1487 power = min_t(int, phy->frp[i].power, power); 1488 break; 1489 } 1490 } 1491 1492 return power; 1493 } 1494 EXPORT_SYMBOL_GPL(mt76_get_sar_power); 1495 1496 static void 1497 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 1498 { 1499 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif)) 1500 ieee80211_csa_finish(vif); 1501 } 1502 1503 void mt76_csa_finish(struct mt76_dev *dev) 1504 { 1505 if (!dev->csa_complete) 1506 return; 1507 1508 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1509 IEEE80211_IFACE_ITER_RESUME_ALL, 1510 __mt76_csa_finish, dev); 1511 1512 dev->csa_complete = 0; 1513 } 1514 EXPORT_SYMBOL_GPL(mt76_csa_finish); 1515 1516 static void 1517 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) 1518 { 1519 struct mt76_dev *dev = priv; 1520 1521 if (!vif->bss_conf.csa_active) 1522 return; 1523 1524 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif); 1525 } 1526 1527 void mt76_csa_check(struct mt76_dev *dev) 1528 { 1529 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1530 IEEE80211_IFACE_ITER_RESUME_ALL, 1531 __mt76_csa_check, dev); 1532 } 1533 EXPORT_SYMBOL_GPL(mt76_csa_check); 1534 1535 int 1536 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) 1537 { 1538 return 0; 1539 } 1540 EXPORT_SYMBOL_GPL(mt76_set_tim); 1541 1542 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) 1543 { 1544 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1545 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 1546 u8 *hdr, *pn = status->iv; 1547 1548 __skb_push(skb, 8); 1549 memmove(skb->data, skb->data + 8, hdr_len); 1550 hdr = skb->data + hdr_len; 1551 1552 hdr[0] = pn[5]; 1553 hdr[1] = pn[4]; 1554 hdr[2] = 0; 1555 hdr[3] = 0x20 | (key_id << 6); 1556 hdr[4] = pn[3]; 1557 hdr[5] = pn[2]; 1558 hdr[6] = pn[1]; 1559 hdr[7] = pn[0]; 1560 1561 status->flag &= ~RX_FLAG_IV_STRIPPED; 1562 } 1563 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr); 1564 1565 int mt76_get_rate(struct mt76_dev *dev, 1566 struct ieee80211_supported_band *sband, 1567 int idx, bool cck) 1568 { 1569 int i, offset = 0, len = sband->n_bitrates; 1570 1571 if (cck) { 1572 if (sband != &dev->phy.sband_2g.sband) 1573 return 0; 1574 1575 idx &= ~BIT(2); /* short preamble */ 1576 } else if (sband == &dev->phy.sband_2g.sband) { 1577 offset = 4; 1578 } 1579 1580 for (i = offset; i < len; i++) { 1581 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) 1582 return i; 1583 } 1584 1585 return 0; 1586 } 1587 EXPORT_SYMBOL_GPL(mt76_get_rate); 1588 1589 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1590 const u8 *mac) 1591 { 1592 struct mt76_phy *phy = hw->priv; 1593 1594 set_bit(MT76_SCANNING, &phy->state); 1595 } 1596 EXPORT_SYMBOL_GPL(mt76_sw_scan); 1597 1598 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1599 { 1600 struct mt76_phy *phy = hw->priv; 1601 1602 clear_bit(MT76_SCANNING, &phy->state); 1603 } 1604 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete); 1605 1606 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 1607 { 1608 struct mt76_phy *phy = hw->priv; 1609 struct mt76_dev *dev = phy->dev; 1610 1611 mutex_lock(&dev->mutex); 1612 *tx_ant = phy->antenna_mask; 1613 *rx_ant = phy->antenna_mask; 1614 mutex_unlock(&dev->mutex); 1615 1616 return 0; 1617 } 1618 EXPORT_SYMBOL_GPL(mt76_get_antenna); 1619 1620 struct mt76_queue * 1621 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1622 int ring_base, u32 flags) 1623 { 1624 struct mt76_queue *hwq; 1625 int err; 1626 1627 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); 1628 if (!hwq) 1629 return ERR_PTR(-ENOMEM); 1630 1631 hwq->flags = flags; 1632 1633 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); 1634 if (err < 0) 1635 return ERR_PTR(err); 1636 1637 return hwq; 1638 } 1639 EXPORT_SYMBOL_GPL(mt76_init_queue); 1640 1641 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) 1642 { 1643 int offset = 0; 1644 1645 if (phy->chandef.chan->band != NL80211_BAND_2GHZ) 1646 offset = 4; 1647 1648 /* pick the lowest rate for hidden nodes */ 1649 if (rateidx < 0) 1650 rateidx = 0; 1651 1652 rateidx += offset; 1653 if (rateidx >= ARRAY_SIZE(mt76_rates)) 1654 rateidx = offset; 1655 1656 return mt76_rates[rateidx].hw_value; 1657 } 1658 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate); 1659 1660 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1661 struct mt76_sta_stats *stats) 1662 { 1663 int i, ei = wi->initial_stat_idx; 1664 u64 *data = wi->data; 1665 1666 wi->sta_count++; 1667 1668 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK]; 1669 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM]; 1670 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT]; 1671 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF]; 1672 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT]; 1673 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU]; 1674 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU]; 1675 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB]; 1676 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU]; 1677 1678 for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++) 1679 data[ei++] += stats->tx_bw[i]; 1680 1681 for (i = 0; i < 12; i++) 1682 data[ei++] += stats->tx_mcs[i]; 1683 1684 wi->worker_stat_count = ei - wi->initial_stat_idx; 1685 } 1686 EXPORT_SYMBOL_GPL(mt76_ethtool_worker); 1687 1688 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy) 1689 { 1690 struct ieee80211_hw *hw = phy->hw; 1691 struct mt76_dev *dev = phy->dev; 1692 1693 if (dev->region == NL80211_DFS_UNSET || 1694 test_bit(MT76_SCANNING, &phy->state)) 1695 return MT_DFS_STATE_DISABLED; 1696 1697 if (!hw->conf.radar_enabled) { 1698 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) && 1699 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) 1700 return MT_DFS_STATE_ACTIVE; 1701 1702 return MT_DFS_STATE_DISABLED; 1703 } 1704 1705 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP)) 1706 return MT_DFS_STATE_CAC; 1707 1708 return MT_DFS_STATE_ACTIVE; 1709 } 1710 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state); 1711