1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 #include <linux/sched.h> 6 #include <linux/of.h> 7 #include "mt76.h" 8 9 #define CHAN2G(_idx, _freq) { \ 10 .band = NL80211_BAND_2GHZ, \ 11 .center_freq = (_freq), \ 12 .hw_value = (_idx), \ 13 .max_power = 30, \ 14 } 15 16 #define CHAN5G(_idx, _freq) { \ 17 .band = NL80211_BAND_5GHZ, \ 18 .center_freq = (_freq), \ 19 .hw_value = (_idx), \ 20 .max_power = 30, \ 21 } 22 23 static const struct ieee80211_channel mt76_channels_2ghz[] = { 24 CHAN2G(1, 2412), 25 CHAN2G(2, 2417), 26 CHAN2G(3, 2422), 27 CHAN2G(4, 2427), 28 CHAN2G(5, 2432), 29 CHAN2G(6, 2437), 30 CHAN2G(7, 2442), 31 CHAN2G(8, 2447), 32 CHAN2G(9, 2452), 33 CHAN2G(10, 2457), 34 CHAN2G(11, 2462), 35 CHAN2G(12, 2467), 36 CHAN2G(13, 2472), 37 CHAN2G(14, 2484), 38 }; 39 40 static const struct ieee80211_channel mt76_channels_5ghz[] = { 41 CHAN5G(36, 5180), 42 CHAN5G(40, 5200), 43 CHAN5G(44, 5220), 44 CHAN5G(48, 5240), 45 46 CHAN5G(52, 5260), 47 CHAN5G(56, 5280), 48 CHAN5G(60, 5300), 49 CHAN5G(64, 5320), 50 51 CHAN5G(100, 5500), 52 CHAN5G(104, 5520), 53 CHAN5G(108, 5540), 54 CHAN5G(112, 5560), 55 CHAN5G(116, 5580), 56 CHAN5G(120, 5600), 57 CHAN5G(124, 5620), 58 CHAN5G(128, 5640), 59 CHAN5G(132, 5660), 60 CHAN5G(136, 5680), 61 CHAN5G(140, 5700), 62 CHAN5G(144, 5720), 63 64 CHAN5G(149, 5745), 65 CHAN5G(153, 5765), 66 CHAN5G(157, 5785), 67 CHAN5G(161, 5805), 68 CHAN5G(165, 5825), 69 CHAN5G(169, 5845), 70 CHAN5G(173, 5865), 71 }; 72 73 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { 74 { .throughput = 0 * 1024, .blink_time = 334 }, 75 { .throughput = 1 * 1024, .blink_time = 260 }, 76 { .throughput = 5 * 1024, .blink_time = 220 }, 77 { .throughput = 10 * 1024, .blink_time = 190 }, 78 { .throughput = 20 * 1024, .blink_time = 170 }, 79 { .throughput = 50 * 1024, .blink_time = 150 }, 80 { .throughput = 70 * 1024, .blink_time = 130 }, 81 { .throughput = 100 * 1024, .blink_time = 110 }, 82 { .throughput = 200 * 1024, .blink_time = 80 }, 83 { .throughput = 300 * 1024, .blink_time = 50 }, 84 }; 85 86 static int mt76_led_init(struct mt76_dev *dev) 87 { 88 struct device_node *np = dev->dev->of_node; 89 struct ieee80211_hw *hw = dev->hw; 90 int led_pin; 91 92 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 93 return 0; 94 95 snprintf(dev->led_name, sizeof(dev->led_name), 96 "mt76-%s", wiphy_name(hw->wiphy)); 97 98 dev->led_cdev.name = dev->led_name; 99 dev->led_cdev.default_trigger = 100 ieee80211_create_tpt_led_trigger(hw, 101 IEEE80211_TPT_LEDTRIG_FL_RADIO, 102 mt76_tpt_blink, 103 ARRAY_SIZE(mt76_tpt_blink)); 104 105 np = of_get_child_by_name(np, "led"); 106 if (np) { 107 if (!of_property_read_u32(np, "led-sources", &led_pin)) 108 dev->led_pin = led_pin; 109 dev->led_al = of_property_read_bool(np, "led-active-low"); 110 } 111 112 return led_classdev_register(dev->dev, &dev->led_cdev); 113 } 114 115 static void mt76_led_cleanup(struct mt76_dev *dev) 116 { 117 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 118 return; 119 120 led_classdev_unregister(&dev->led_cdev); 121 } 122 123 static void mt76_init_stream_cap(struct mt76_phy *phy, 124 struct ieee80211_supported_band *sband, 125 bool vht) 126 { 127 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; 128 int i, nstream = hweight8(phy->antenna_mask); 129 struct ieee80211_sta_vht_cap *vht_cap; 130 u16 mcs_map = 0; 131 132 if (nstream > 1) 133 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; 134 else 135 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; 136 137 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 138 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0; 139 140 if (!vht) 141 return; 142 143 vht_cap = &sband->vht_cap; 144 if (nstream > 1) 145 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 146 else 147 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; 148 149 for (i = 0; i < 8; i++) { 150 if (i < nstream) 151 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2)); 152 else 153 mcs_map |= 154 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); 155 } 156 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 157 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 158 } 159 160 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) 161 { 162 if (phy->cap.has_2ghz) 163 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); 164 if (phy->cap.has_5ghz) 165 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); 166 } 167 EXPORT_SYMBOL_GPL(mt76_set_stream_caps); 168 169 static int 170 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, 171 const struct ieee80211_channel *chan, int n_chan, 172 struct ieee80211_rate *rates, int n_rates, bool vht) 173 { 174 struct ieee80211_supported_band *sband = &msband->sband; 175 struct ieee80211_sta_vht_cap *vht_cap; 176 struct ieee80211_sta_ht_cap *ht_cap; 177 struct mt76_dev *dev = phy->dev; 178 void *chanlist; 179 int size; 180 181 size = n_chan * sizeof(*chan); 182 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); 183 if (!chanlist) 184 return -ENOMEM; 185 186 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan), 187 GFP_KERNEL); 188 if (!msband->chan) 189 return -ENOMEM; 190 191 sband->channels = chanlist; 192 sband->n_channels = n_chan; 193 sband->bitrates = rates; 194 sband->n_bitrates = n_rates; 195 196 ht_cap = &sband->ht_cap; 197 ht_cap->ht_supported = true; 198 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 199 IEEE80211_HT_CAP_GRN_FLD | 200 IEEE80211_HT_CAP_SGI_20 | 201 IEEE80211_HT_CAP_SGI_40 | 202 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 203 204 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 205 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 206 207 mt76_init_stream_cap(phy, sband, vht); 208 209 if (!vht) 210 return 0; 211 212 vht_cap = &sband->vht_cap; 213 vht_cap->vht_supported = true; 214 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | 215 IEEE80211_VHT_CAP_RXSTBC_1 | 216 IEEE80211_VHT_CAP_SHORT_GI_80 | 217 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | 218 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | 219 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); 220 221 return 0; 222 } 223 224 static int 225 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, 226 int n_rates) 227 { 228 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; 229 230 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, 231 ARRAY_SIZE(mt76_channels_2ghz), rates, 232 n_rates, false); 233 } 234 235 static int 236 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, 237 int n_rates, bool vht) 238 { 239 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; 240 241 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, 242 ARRAY_SIZE(mt76_channels_5ghz), rates, 243 n_rates, vht); 244 } 245 246 static void 247 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, 248 enum nl80211_band band) 249 { 250 struct ieee80211_supported_band *sband = &msband->sband; 251 bool found = false; 252 int i; 253 254 if (!sband) 255 return; 256 257 for (i = 0; i < sband->n_channels; i++) { 258 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED) 259 continue; 260 261 found = true; 262 break; 263 } 264 265 if (found) { 266 phy->chandef.chan = &sband->channels[0]; 267 phy->chan_state = &msband->chan[0]; 268 return; 269 } 270 271 sband->n_channels = 0; 272 phy->hw->wiphy->bands[band] = NULL; 273 } 274 275 static void 276 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) 277 { 278 struct mt76_dev *dev = phy->dev; 279 struct wiphy *wiphy = hw->wiphy; 280 281 SET_IEEE80211_DEV(hw, dev->dev); 282 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); 283 284 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 285 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | 286 WIPHY_FLAG_SUPPORTS_TDLS | 287 WIPHY_FLAG_AP_UAPSD; 288 289 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 290 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 291 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); 292 293 wiphy->available_antennas_tx = dev->phy.antenna_mask; 294 wiphy->available_antennas_rx = dev->phy.antenna_mask; 295 296 hw->txq_data_size = sizeof(struct mt76_txq); 297 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; 298 299 if (!hw->max_tx_fragments) 300 hw->max_tx_fragments = 16; 301 302 ieee80211_hw_set(hw, SIGNAL_DBM); 303 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 304 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); 305 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 306 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 307 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 308 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 309 310 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { 311 ieee80211_hw_set(hw, TX_AMSDU); 312 ieee80211_hw_set(hw, TX_FRAG_LIST); 313 } 314 315 ieee80211_hw_set(hw, MFP_CAPABLE); 316 ieee80211_hw_set(hw, AP_LINK_PS); 317 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 318 319 wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 320 wiphy->interface_modes = 321 BIT(NL80211_IFTYPE_STATION) | 322 BIT(NL80211_IFTYPE_AP) | 323 #ifdef CONFIG_MAC80211_MESH 324 BIT(NL80211_IFTYPE_MESH_POINT) | 325 #endif 326 BIT(NL80211_IFTYPE_P2P_CLIENT) | 327 BIT(NL80211_IFTYPE_P2P_GO) | 328 BIT(NL80211_IFTYPE_ADHOC); 329 } 330 331 struct mt76_phy * 332 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 333 const struct ieee80211_ops *ops) 334 { 335 struct ieee80211_hw *hw; 336 unsigned int phy_size; 337 struct mt76_phy *phy; 338 339 phy_size = ALIGN(sizeof(*phy), 8); 340 hw = ieee80211_alloc_hw(size + phy_size, ops); 341 if (!hw) 342 return NULL; 343 344 phy = hw->priv; 345 phy->dev = dev; 346 phy->hw = hw; 347 phy->priv = hw->priv + phy_size; 348 349 return phy; 350 } 351 EXPORT_SYMBOL_GPL(mt76_alloc_phy); 352 353 int mt76_register_phy(struct mt76_phy *phy, bool vht, 354 struct ieee80211_rate *rates, int n_rates) 355 { 356 int ret; 357 358 mt76_phy_init(phy, phy->hw); 359 360 if (phy->cap.has_2ghz) { 361 ret = mt76_init_sband_2g(phy, rates, n_rates); 362 if (ret) 363 return ret; 364 } 365 366 if (phy->cap.has_5ghz) { 367 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 368 if (ret) 369 return ret; 370 } 371 372 wiphy_read_of_freq_limits(phy->hw->wiphy); 373 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); 374 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); 375 376 ret = ieee80211_register_hw(phy->hw); 377 if (ret) 378 return ret; 379 380 phy->dev->phy2 = phy; 381 382 return 0; 383 } 384 EXPORT_SYMBOL_GPL(mt76_register_phy); 385 386 void mt76_unregister_phy(struct mt76_phy *phy) 387 { 388 struct mt76_dev *dev = phy->dev; 389 390 mt76_tx_status_check(dev, NULL, true); 391 ieee80211_unregister_hw(phy->hw); 392 dev->phy2 = NULL; 393 } 394 EXPORT_SYMBOL_GPL(mt76_unregister_phy); 395 396 struct mt76_dev * 397 mt76_alloc_device(struct device *pdev, unsigned int size, 398 const struct ieee80211_ops *ops, 399 const struct mt76_driver_ops *drv_ops) 400 { 401 struct ieee80211_hw *hw; 402 struct mt76_phy *phy; 403 struct mt76_dev *dev; 404 int i; 405 406 hw = ieee80211_alloc_hw(size, ops); 407 if (!hw) 408 return NULL; 409 410 dev = hw->priv; 411 dev->hw = hw; 412 dev->dev = pdev; 413 dev->drv = drv_ops; 414 415 phy = &dev->phy; 416 phy->dev = dev; 417 phy->hw = hw; 418 419 spin_lock_init(&dev->rx_lock); 420 spin_lock_init(&dev->lock); 421 spin_lock_init(&dev->cc_lock); 422 mutex_init(&dev->mutex); 423 init_waitqueue_head(&dev->tx_wait); 424 skb_queue_head_init(&dev->status_list); 425 426 skb_queue_head_init(&dev->mcu.res_q); 427 init_waitqueue_head(&dev->mcu.wait); 428 mutex_init(&dev->mcu.mutex); 429 dev->tx_worker.fn = mt76_tx_worker; 430 431 spin_lock_init(&dev->token_lock); 432 idr_init(&dev->token); 433 434 INIT_LIST_HEAD(&dev->txwi_cache); 435 436 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) 437 skb_queue_head_init(&dev->rx_skb[i]); 438 439 dev->wq = alloc_ordered_workqueue("mt76", 0); 440 if (!dev->wq) { 441 ieee80211_free_hw(hw); 442 return NULL; 443 } 444 445 return dev; 446 } 447 EXPORT_SYMBOL_GPL(mt76_alloc_device); 448 449 int mt76_register_device(struct mt76_dev *dev, bool vht, 450 struct ieee80211_rate *rates, int n_rates) 451 { 452 struct ieee80211_hw *hw = dev->hw; 453 struct mt76_phy *phy = &dev->phy; 454 int ret; 455 456 dev_set_drvdata(dev->dev, dev); 457 mt76_phy_init(phy, hw); 458 459 if (phy->cap.has_2ghz) { 460 ret = mt76_init_sband_2g(phy, rates, n_rates); 461 if (ret) 462 return ret; 463 } 464 465 if (phy->cap.has_5ghz) { 466 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 467 if (ret) 468 return ret; 469 } 470 471 wiphy_read_of_freq_limits(hw->wiphy); 472 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ); 473 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ); 474 475 if (IS_ENABLED(CONFIG_MT76_LEDS)) { 476 ret = mt76_led_init(dev); 477 if (ret) 478 return ret; 479 } 480 481 ret = ieee80211_register_hw(hw); 482 if (ret) 483 return ret; 484 485 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); 486 sched_set_fifo_low(dev->tx_worker.task); 487 488 return 0; 489 } 490 EXPORT_SYMBOL_GPL(mt76_register_device); 491 492 void mt76_unregister_device(struct mt76_dev *dev) 493 { 494 struct ieee80211_hw *hw = dev->hw; 495 496 if (IS_ENABLED(CONFIG_MT76_LEDS)) 497 mt76_led_cleanup(dev); 498 mt76_tx_status_check(dev, NULL, true); 499 ieee80211_unregister_hw(hw); 500 } 501 EXPORT_SYMBOL_GPL(mt76_unregister_device); 502 503 void mt76_free_device(struct mt76_dev *dev) 504 { 505 mt76_worker_teardown(&dev->tx_worker); 506 if (dev->wq) { 507 destroy_workqueue(dev->wq); 508 dev->wq = NULL; 509 } 510 ieee80211_free_hw(dev->hw); 511 } 512 EXPORT_SYMBOL_GPL(mt76_free_device); 513 514 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) 515 { 516 struct sk_buff *skb = phy->rx_amsdu[q].head; 517 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 518 struct mt76_dev *dev = phy->dev; 519 520 phy->rx_amsdu[q].head = NULL; 521 phy->rx_amsdu[q].tail = NULL; 522 523 /* 524 * Validate if the amsdu has a proper first subframe. 525 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU 526 * flag of the QoS header gets flipped. In such cases, the first 527 * subframe has a LLC/SNAP header in the location of the destination 528 * address. 529 */ 530 if (skb_shinfo(skb)->frag_list) { 531 int offset = 0; 532 533 if (!(status->flag & RX_FLAG_8023)) { 534 offset = ieee80211_get_hdrlen_from_skb(skb); 535 536 if ((status->flag & 537 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) == 538 RX_FLAG_DECRYPTED) 539 offset += 8; 540 } 541 542 if (ether_addr_equal(skb->data + offset, rfc1042_header)) { 543 dev_kfree_skb(skb); 544 return; 545 } 546 } 547 __skb_queue_tail(&dev->rx_skb[q], skb); 548 } 549 550 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, 551 struct sk_buff *skb) 552 { 553 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 554 555 if (phy->rx_amsdu[q].head && 556 (!status->amsdu || status->first_amsdu || 557 status->seqno != phy->rx_amsdu[q].seqno)) 558 mt76_rx_release_amsdu(phy, q); 559 560 if (!phy->rx_amsdu[q].head) { 561 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; 562 phy->rx_amsdu[q].seqno = status->seqno; 563 phy->rx_amsdu[q].head = skb; 564 } else { 565 *phy->rx_amsdu[q].tail = skb; 566 phy->rx_amsdu[q].tail = &skb->next; 567 } 568 569 if (!status->amsdu || status->last_amsdu) 570 mt76_rx_release_amsdu(phy, q); 571 } 572 573 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) 574 { 575 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 576 struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy); 577 578 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) { 579 dev_kfree_skb(skb); 580 return; 581 } 582 583 #ifdef CONFIG_NL80211_TESTMODE 584 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) { 585 phy->test.rx_stats.packets[q]++; 586 if (status->flag & RX_FLAG_FAILED_FCS_CRC) 587 phy->test.rx_stats.fcs_error[q]++; 588 } 589 #endif 590 591 mt76_rx_release_burst(phy, q, skb); 592 } 593 EXPORT_SYMBOL_GPL(mt76_rx); 594 595 bool mt76_has_tx_pending(struct mt76_phy *phy) 596 { 597 struct mt76_queue *q; 598 int i; 599 600 for (i = 0; i < __MT_TXQ_MAX; i++) { 601 q = phy->q_tx[i]; 602 if (q && q->queued) 603 return true; 604 } 605 606 return false; 607 } 608 EXPORT_SYMBOL_GPL(mt76_has_tx_pending); 609 610 static struct mt76_channel_state * 611 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) 612 { 613 struct mt76_sband *msband; 614 int idx; 615 616 if (c->band == NL80211_BAND_2GHZ) 617 msband = &phy->sband_2g; 618 else 619 msband = &phy->sband_5g; 620 621 idx = c - &msband->sband.channels[0]; 622 return &msband->chan[idx]; 623 } 624 625 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) 626 { 627 struct mt76_channel_state *state = phy->chan_state; 628 629 state->cc_active += ktime_to_us(ktime_sub(time, 630 phy->survey_time)); 631 phy->survey_time = time; 632 } 633 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); 634 635 void mt76_update_survey(struct mt76_dev *dev) 636 { 637 ktime_t cur_time; 638 639 if (dev->drv->update_survey) 640 dev->drv->update_survey(dev); 641 642 cur_time = ktime_get_boottime(); 643 mt76_update_survey_active_time(&dev->phy, cur_time); 644 if (dev->phy2) 645 mt76_update_survey_active_time(dev->phy2, cur_time); 646 647 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) { 648 struct mt76_channel_state *state = dev->phy.chan_state; 649 650 spin_lock_bh(&dev->cc_lock); 651 state->cc_bss_rx += dev->cur_cc_bss_rx; 652 dev->cur_cc_bss_rx = 0; 653 spin_unlock_bh(&dev->cc_lock); 654 } 655 } 656 EXPORT_SYMBOL_GPL(mt76_update_survey); 657 658 void mt76_set_channel(struct mt76_phy *phy) 659 { 660 struct mt76_dev *dev = phy->dev; 661 struct ieee80211_hw *hw = phy->hw; 662 struct cfg80211_chan_def *chandef = &hw->conf.chandef; 663 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; 664 int timeout = HZ / 5; 665 666 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); 667 mt76_update_survey(dev); 668 669 phy->chandef = *chandef; 670 phy->chan_state = mt76_channel_state(phy, chandef->chan); 671 672 if (!offchannel) 673 phy->main_chan = chandef->chan; 674 675 if (chandef->chan != phy->main_chan) 676 memset(phy->chan_state, 0, sizeof(*phy->chan_state)); 677 } 678 EXPORT_SYMBOL_GPL(mt76_set_channel); 679 680 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 681 struct survey_info *survey) 682 { 683 struct mt76_phy *phy = hw->priv; 684 struct mt76_dev *dev = phy->dev; 685 struct mt76_sband *sband; 686 struct ieee80211_channel *chan; 687 struct mt76_channel_state *state; 688 int ret = 0; 689 690 mutex_lock(&dev->mutex); 691 if (idx == 0 && dev->drv->update_survey) 692 mt76_update_survey(dev); 693 694 sband = &phy->sband_2g; 695 if (idx >= sband->sband.n_channels) { 696 idx -= sband->sband.n_channels; 697 sband = &phy->sband_5g; 698 } 699 700 if (idx >= sband->sband.n_channels) { 701 ret = -ENOENT; 702 goto out; 703 } 704 705 chan = &sband->sband.channels[idx]; 706 state = mt76_channel_state(phy, chan); 707 708 memset(survey, 0, sizeof(*survey)); 709 survey->channel = chan; 710 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; 711 survey->filled |= dev->drv->survey_flags; 712 if (state->noise) 713 survey->filled |= SURVEY_INFO_NOISE_DBM; 714 715 if (chan == phy->main_chan) { 716 survey->filled |= SURVEY_INFO_IN_USE; 717 718 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) 719 survey->filled |= SURVEY_INFO_TIME_BSS_RX; 720 } 721 722 survey->time_busy = div_u64(state->cc_busy, 1000); 723 survey->time_rx = div_u64(state->cc_rx, 1000); 724 survey->time = div_u64(state->cc_active, 1000); 725 survey->noise = state->noise; 726 727 spin_lock_bh(&dev->cc_lock); 728 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000); 729 survey->time_tx = div_u64(state->cc_tx, 1000); 730 spin_unlock_bh(&dev->cc_lock); 731 732 out: 733 mutex_unlock(&dev->mutex); 734 735 return ret; 736 } 737 EXPORT_SYMBOL_GPL(mt76_get_survey); 738 739 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 740 struct ieee80211_key_conf *key) 741 { 742 struct ieee80211_key_seq seq; 743 int i; 744 745 wcid->rx_check_pn = false; 746 747 if (!key) 748 return; 749 750 if (key->cipher != WLAN_CIPHER_SUITE_CCMP) 751 return; 752 753 wcid->rx_check_pn = true; 754 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 755 ieee80211_get_key_rx_seq(key, i, &seq); 756 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 757 } 758 } 759 EXPORT_SYMBOL(mt76_wcid_key_setup); 760 761 static void 762 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, 763 struct ieee80211_hw **hw, 764 struct ieee80211_sta **sta) 765 { 766 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 767 struct mt76_rx_status mstat; 768 769 mstat = *((struct mt76_rx_status *)skb->cb); 770 memset(status, 0, sizeof(*status)); 771 772 status->flag = mstat.flag; 773 status->freq = mstat.freq; 774 status->enc_flags = mstat.enc_flags; 775 status->encoding = mstat.encoding; 776 status->bw = mstat.bw; 777 status->he_ru = mstat.he_ru; 778 status->he_gi = mstat.he_gi; 779 status->he_dcm = mstat.he_dcm; 780 status->rate_idx = mstat.rate_idx; 781 status->nss = mstat.nss; 782 status->band = mstat.band; 783 status->signal = mstat.signal; 784 status->chains = mstat.chains; 785 status->ampdu_reference = mstat.ampdu_ref; 786 status->device_timestamp = mstat.timestamp; 787 status->mactime = mstat.timestamp; 788 789 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); 790 BUILD_BUG_ON(sizeof(status->chain_signal) != 791 sizeof(mstat.chain_signal)); 792 memcpy(status->chain_signal, mstat.chain_signal, 793 sizeof(mstat.chain_signal)); 794 795 *sta = wcid_to_sta(mstat.wcid); 796 *hw = mt76_phy_hw(dev, mstat.ext_phy); 797 } 798 799 static int 800 mt76_check_ccmp_pn(struct sk_buff *skb) 801 { 802 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 803 struct mt76_wcid *wcid = status->wcid; 804 struct ieee80211_hdr *hdr; 805 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 806 int ret; 807 808 if (!(status->flag & RX_FLAG_DECRYPTED)) 809 return 0; 810 811 if (!wcid || !wcid->rx_check_pn) 812 return 0; 813 814 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 815 /* 816 * Validate the first fragment both here and in mac80211 817 * All further fragments will be validated by mac80211 only. 818 */ 819 hdr = mt76_skb_get_hdr(skb); 820 if (ieee80211_is_frag(hdr) && 821 !ieee80211_is_first_frag(hdr->frame_control)) 822 return 0; 823 } 824 825 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); 826 ret = memcmp(status->iv, wcid->rx_key_pn[tidno], 827 sizeof(status->iv)); 828 if (ret <= 0) 829 return -EINVAL; /* replay */ 830 831 memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv)); 832 833 if (status->flag & RX_FLAG_IV_STRIPPED) 834 status->flag |= RX_FLAG_PN_VALIDATED; 835 836 return 0; 837 } 838 839 static void 840 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, 841 int len) 842 { 843 struct mt76_wcid *wcid = status->wcid; 844 struct ieee80211_rx_status info = { 845 .enc_flags = status->enc_flags, 846 .rate_idx = status->rate_idx, 847 .encoding = status->encoding, 848 .band = status->band, 849 .nss = status->nss, 850 .bw = status->bw, 851 }; 852 struct ieee80211_sta *sta; 853 u32 airtime; 854 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 855 856 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); 857 spin_lock(&dev->cc_lock); 858 dev->cur_cc_bss_rx += airtime; 859 spin_unlock(&dev->cc_lock); 860 861 if (!wcid || !wcid->sta) 862 return; 863 864 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 865 ieee80211_sta_register_airtime(sta, tidno, 0, airtime); 866 } 867 868 static void 869 mt76_airtime_flush_ampdu(struct mt76_dev *dev) 870 { 871 struct mt76_wcid *wcid; 872 int wcid_idx; 873 874 if (!dev->rx_ampdu_len) 875 return; 876 877 wcid_idx = dev->rx_ampdu_status.wcid_idx; 878 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 879 wcid = rcu_dereference(dev->wcid[wcid_idx]); 880 else 881 wcid = NULL; 882 dev->rx_ampdu_status.wcid = wcid; 883 884 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len); 885 886 dev->rx_ampdu_len = 0; 887 dev->rx_ampdu_ref = 0; 888 } 889 890 static void 891 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) 892 { 893 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 894 struct mt76_wcid *wcid = status->wcid; 895 896 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)) 897 return; 898 899 if (!wcid || !wcid->sta) { 900 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 901 902 if (status->flag & RX_FLAG_8023) 903 return; 904 905 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) 906 return; 907 908 wcid = NULL; 909 } 910 911 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) || 912 status->ampdu_ref != dev->rx_ampdu_ref) 913 mt76_airtime_flush_ampdu(dev); 914 915 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 916 if (!dev->rx_ampdu_len || 917 status->ampdu_ref != dev->rx_ampdu_ref) { 918 dev->rx_ampdu_status = *status; 919 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff; 920 dev->rx_ampdu_ref = status->ampdu_ref; 921 } 922 923 dev->rx_ampdu_len += skb->len; 924 return; 925 } 926 927 mt76_airtime_report(dev, status, skb->len); 928 } 929 930 static void 931 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) 932 { 933 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 934 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 935 struct ieee80211_sta *sta; 936 struct ieee80211_hw *hw; 937 struct mt76_wcid *wcid = status->wcid; 938 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 939 bool ps; 940 941 hw = mt76_phy_hw(dev, status->ext_phy); 942 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && 943 !(status->flag & RX_FLAG_8023)) { 944 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); 945 if (sta) 946 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; 947 } 948 949 mt76_airtime_check(dev, skb); 950 951 if (!wcid || !wcid->sta) 952 return; 953 954 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 955 956 if (status->signal <= 0) 957 ewma_signal_add(&wcid->rssi, -status->signal); 958 959 wcid->inactive_count = 0; 960 961 if (status->flag & RX_FLAG_8023) 962 return; 963 964 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) 965 return; 966 967 if (ieee80211_is_pspoll(hdr->frame_control)) { 968 ieee80211_sta_pspoll(sta); 969 return; 970 } 971 972 if (ieee80211_has_morefrags(hdr->frame_control) || 973 !(ieee80211_is_mgmt(hdr->frame_control) || 974 ieee80211_is_data(hdr->frame_control))) 975 return; 976 977 ps = ieee80211_has_pm(hdr->frame_control); 978 979 if (ps && (ieee80211_is_data_qos(hdr->frame_control) || 980 ieee80211_is_qos_nullfunc(hdr->frame_control))) 981 ieee80211_sta_uapsd_trigger(sta, tidno); 982 983 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) 984 return; 985 986 if (ps) 987 set_bit(MT_WCID_FLAG_PS, &wcid->flags); 988 else 989 clear_bit(MT_WCID_FLAG_PS, &wcid->flags); 990 991 dev->drv->sta_ps(dev, sta, ps); 992 ieee80211_sta_ps_transition(sta, ps); 993 } 994 995 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 996 struct napi_struct *napi) 997 { 998 struct ieee80211_sta *sta; 999 struct ieee80211_hw *hw; 1000 struct sk_buff *skb, *tmp; 1001 LIST_HEAD(list); 1002 1003 spin_lock(&dev->rx_lock); 1004 while ((skb = __skb_dequeue(frames)) != NULL) { 1005 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1006 1007 if (mt76_check_ccmp_pn(skb)) { 1008 dev_kfree_skb(skb); 1009 continue; 1010 } 1011 1012 skb_shinfo(skb)->frag_list = NULL; 1013 mt76_rx_convert(dev, skb, &hw, &sta); 1014 ieee80211_rx_list(hw, sta, skb, &list); 1015 1016 /* subsequent amsdu frames */ 1017 while (nskb) { 1018 skb = nskb; 1019 nskb = nskb->next; 1020 skb->next = NULL; 1021 1022 mt76_rx_convert(dev, skb, &hw, &sta); 1023 ieee80211_rx_list(hw, sta, skb, &list); 1024 } 1025 } 1026 spin_unlock(&dev->rx_lock); 1027 1028 if (!napi) { 1029 netif_receive_skb_list(&list); 1030 return; 1031 } 1032 1033 list_for_each_entry_safe(skb, tmp, &list, list) { 1034 skb_list_del_init(skb); 1035 napi_gro_receive(napi, skb); 1036 } 1037 } 1038 1039 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1040 struct napi_struct *napi) 1041 { 1042 struct sk_buff_head frames; 1043 struct sk_buff *skb; 1044 1045 __skb_queue_head_init(&frames); 1046 1047 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { 1048 mt76_check_sta(dev, skb); 1049 mt76_rx_aggr_reorder(skb, &frames); 1050 } 1051 1052 mt76_rx_complete(dev, &frames, napi); 1053 } 1054 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); 1055 1056 static int 1057 mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, 1058 struct ieee80211_sta *sta, bool ext_phy) 1059 { 1060 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1061 int ret; 1062 int i; 1063 1064 mutex_lock(&dev->mutex); 1065 1066 ret = dev->drv->sta_add(dev, vif, sta); 1067 if (ret) 1068 goto out; 1069 1070 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1071 struct mt76_txq *mtxq; 1072 1073 if (!sta->txq[i]) 1074 continue; 1075 1076 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 1077 mtxq->wcid = wcid; 1078 } 1079 1080 ewma_signal_init(&wcid->rssi); 1081 if (ext_phy) 1082 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx); 1083 wcid->ext_phy = ext_phy; 1084 rcu_assign_pointer(dev->wcid[wcid->idx], wcid); 1085 1086 out: 1087 mutex_unlock(&dev->mutex); 1088 1089 return ret; 1090 } 1091 1092 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1093 struct ieee80211_sta *sta) 1094 { 1095 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1096 int i, idx = wcid->idx; 1097 1098 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++) 1099 mt76_rx_aggr_stop(dev, wcid, i); 1100 1101 if (dev->drv->sta_remove) 1102 dev->drv->sta_remove(dev, vif, sta); 1103 1104 mt76_tx_status_check(dev, wcid, true); 1105 mt76_wcid_mask_clear(dev->wcid_mask, idx); 1106 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); 1107 } 1108 EXPORT_SYMBOL_GPL(__mt76_sta_remove); 1109 1110 static void 1111 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1112 struct ieee80211_sta *sta) 1113 { 1114 mutex_lock(&dev->mutex); 1115 __mt76_sta_remove(dev, vif, sta); 1116 mutex_unlock(&dev->mutex); 1117 } 1118 1119 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1120 struct ieee80211_sta *sta, 1121 enum ieee80211_sta_state old_state, 1122 enum ieee80211_sta_state new_state) 1123 { 1124 struct mt76_phy *phy = hw->priv; 1125 struct mt76_dev *dev = phy->dev; 1126 bool ext_phy = phy != &dev->phy; 1127 1128 if (old_state == IEEE80211_STA_NOTEXIST && 1129 new_state == IEEE80211_STA_NONE) 1130 return mt76_sta_add(dev, vif, sta, ext_phy); 1131 1132 if (old_state == IEEE80211_STA_AUTH && 1133 new_state == IEEE80211_STA_ASSOC && 1134 dev->drv->sta_assoc) 1135 dev->drv->sta_assoc(dev, vif, sta); 1136 1137 if (old_state == IEEE80211_STA_NONE && 1138 new_state == IEEE80211_STA_NOTEXIST) 1139 mt76_sta_remove(dev, vif, sta); 1140 1141 return 0; 1142 } 1143 EXPORT_SYMBOL_GPL(mt76_sta_state); 1144 1145 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1146 struct ieee80211_sta *sta) 1147 { 1148 struct mt76_phy *phy = hw->priv; 1149 struct mt76_dev *dev = phy->dev; 1150 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1151 1152 mutex_lock(&dev->mutex); 1153 rcu_assign_pointer(dev->wcid[wcid->idx], NULL); 1154 mutex_unlock(&dev->mutex); 1155 } 1156 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); 1157 1158 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1159 int *dbm) 1160 { 1161 struct mt76_phy *phy = hw->priv; 1162 int n_chains = hweight8(phy->antenna_mask); 1163 int delta = mt76_tx_power_nss_delta(n_chains); 1164 1165 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2); 1166 1167 return 0; 1168 } 1169 EXPORT_SYMBOL_GPL(mt76_get_txpower); 1170 1171 static void 1172 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 1173 { 1174 if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif)) 1175 ieee80211_csa_finish(vif); 1176 } 1177 1178 void mt76_csa_finish(struct mt76_dev *dev) 1179 { 1180 if (!dev->csa_complete) 1181 return; 1182 1183 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1184 IEEE80211_IFACE_ITER_RESUME_ALL, 1185 __mt76_csa_finish, dev); 1186 1187 dev->csa_complete = 0; 1188 } 1189 EXPORT_SYMBOL_GPL(mt76_csa_finish); 1190 1191 static void 1192 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) 1193 { 1194 struct mt76_dev *dev = priv; 1195 1196 if (!vif->csa_active) 1197 return; 1198 1199 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif); 1200 } 1201 1202 void mt76_csa_check(struct mt76_dev *dev) 1203 { 1204 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1205 IEEE80211_IFACE_ITER_RESUME_ALL, 1206 __mt76_csa_check, dev); 1207 } 1208 EXPORT_SYMBOL_GPL(mt76_csa_check); 1209 1210 int 1211 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) 1212 { 1213 return 0; 1214 } 1215 EXPORT_SYMBOL_GPL(mt76_set_tim); 1216 1217 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) 1218 { 1219 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1220 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 1221 u8 *hdr, *pn = status->iv; 1222 1223 __skb_push(skb, 8); 1224 memmove(skb->data, skb->data + 8, hdr_len); 1225 hdr = skb->data + hdr_len; 1226 1227 hdr[0] = pn[5]; 1228 hdr[1] = pn[4]; 1229 hdr[2] = 0; 1230 hdr[3] = 0x20 | (key_id << 6); 1231 hdr[4] = pn[3]; 1232 hdr[5] = pn[2]; 1233 hdr[6] = pn[1]; 1234 hdr[7] = pn[0]; 1235 1236 status->flag &= ~RX_FLAG_IV_STRIPPED; 1237 } 1238 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr); 1239 1240 int mt76_get_rate(struct mt76_dev *dev, 1241 struct ieee80211_supported_band *sband, 1242 int idx, bool cck) 1243 { 1244 int i, offset = 0, len = sband->n_bitrates; 1245 1246 if (cck) { 1247 if (sband == &dev->phy.sband_5g.sband) 1248 return 0; 1249 1250 idx &= ~BIT(2); /* short preamble */ 1251 } else if (sband == &dev->phy.sband_2g.sband) { 1252 offset = 4; 1253 } 1254 1255 for (i = offset; i < len; i++) { 1256 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) 1257 return i; 1258 } 1259 1260 return 0; 1261 } 1262 EXPORT_SYMBOL_GPL(mt76_get_rate); 1263 1264 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1265 const u8 *mac) 1266 { 1267 struct mt76_phy *phy = hw->priv; 1268 1269 set_bit(MT76_SCANNING, &phy->state); 1270 } 1271 EXPORT_SYMBOL_GPL(mt76_sw_scan); 1272 1273 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1274 { 1275 struct mt76_phy *phy = hw->priv; 1276 1277 clear_bit(MT76_SCANNING, &phy->state); 1278 } 1279 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete); 1280 1281 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 1282 { 1283 struct mt76_phy *phy = hw->priv; 1284 struct mt76_dev *dev = phy->dev; 1285 1286 mutex_lock(&dev->mutex); 1287 *tx_ant = phy->antenna_mask; 1288 *rx_ant = phy->antenna_mask; 1289 mutex_unlock(&dev->mutex); 1290 1291 return 0; 1292 } 1293 EXPORT_SYMBOL_GPL(mt76_get_antenna); 1294 1295 struct mt76_queue * 1296 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1297 int ring_base) 1298 { 1299 struct mt76_queue *hwq; 1300 int err; 1301 1302 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); 1303 if (!hwq) 1304 return ERR_PTR(-ENOMEM); 1305 1306 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); 1307 if (err < 0) 1308 return ERR_PTR(err); 1309 1310 return hwq; 1311 } 1312 EXPORT_SYMBOL_GPL(mt76_init_queue); 1313